Compare commits
487 Commits
v1.14.0-al
...
master
Author | SHA1 | Date |
---|---|---|
|
f997f3b236 | |
|
56ca7ae4e9 | |
|
0293499bbb | |
|
a7c36d109e | |
|
dc90876712 | |
|
65a29d4094 | |
|
b030687997 | |
|
d410a0be5e | |
|
7d9d366798 | |
|
357aa96976 | |
|
d4402a750f | |
|
82c383f3f6 | |
|
d8593e9b80 | |
|
0c75b330a9 | |
|
b9a85d4e35 | |
|
82519160c4 | |
|
1258855dd8 | |
|
2746a92030 | |
|
bcbf425b28 | |
|
caa18b1146 | |
|
0a4e1d3bbf | |
|
4b3997f850 | |
|
e95003cefc | |
|
8e2d872e8f | |
|
ad9cae7daf | |
|
0b929ac04d | |
|
232e731240 | |
|
7b3798aeec | |
|
925ffe9371 | |
|
e7ecc935cb | |
|
3b4768350e | |
|
aa0afafdd3 | |
|
0876615df2 | |
|
ff7e09785c | |
|
7e9dd8d372 | |
|
5c64613d0c | |
|
c08ce424aa | |
|
1b8626397f | |
|
78ffbd5280 | |
|
7148248453 | |
|
f5a4ba9186 | |
|
6013c0b6e9 | |
|
ea357c2ea2 | |
|
9889226422 | |
|
981f489dfe | |
|
33ebc2d45b | |
|
5c2fbc293c | |
|
997a0a1f1a | |
|
5469b5e5ef | |
|
27957d4fdb | |
|
2841f04cbd | |
|
d2f22e8a43 | |
|
7b5ac69dec | |
|
17caa1bc7d | |
|
3a294dd3bf | |
|
69bf770da5 | |
|
00d5f45335 | |
|
bdf17e6909 | |
|
708f3e453a | |
|
017379f7e9 | |
|
2e121f16cd | |
|
1c6f1db6df | |
|
40bf0ab3a1 | |
|
f3775f9dff | |
|
5e3e25aa3c | |
|
601081ebf2 | |
|
440eada373 | |
|
9edc1b4940 | |
|
201f2d9175 | |
|
f5bd00b28c | |
|
8b1404681c | |
|
5afc4ecd42 | |
|
2cea078eba | |
|
8fafe59f7e | |
|
4418fb007f | |
|
f85bf8fc89 | |
|
8bf67146c1 | |
|
980ee22886 | |
|
567594441d | |
|
eb2cf6aaab | |
|
632754df63 | |
|
8a477540bb | |
|
b1ea332546 | |
|
952d0f0e4e | |
|
bef7fce5cb | |
|
74aeab08b8 | |
|
6efd86bb31 | |
|
dfdd9a5207 | |
|
e335bc242c | |
|
8f9f9ef722 | |
|
d0f7464246 | |
|
6bcb4774c5 | |
|
e81e311dcd | |
|
0243055f7f | |
|
de58287f3c | |
|
55678eaed4 | |
|
6f93ef0e5a | |
|
414cb996b3 | |
|
7eafdfd269 | |
|
99b7902307 | |
|
5247671fa1 | |
|
a93e9fe1e7 | |
|
94a8a8eef4 | |
|
8bcd2c3473 | |
|
9ba4d2f5ac | |
|
5380f6159e | |
|
9b6adfd2d8 | |
|
cee1d9bb98 | |
|
108073c6bc | |
|
99583d7273 | |
|
c35162b834 | |
|
5718bbd95e | |
|
696e2a9a75 | |
|
64fbe7560d | |
|
28bee90e5e | |
|
bb4ec94ca9 | |
|
3ff577c1a1 | |
|
bd7d74cd1e | |
|
88b8fdb13f | |
|
b7b1d9ea39 | |
|
3bd480e8ba | |
|
f5ee2ce432 | |
|
140f745e03 | |
|
4f8892d55d | |
|
b1138ed602 | |
|
b8a9249f26 | |
|
cfa08e4ca0 | |
|
f80c61373d | |
|
b8f6874c58 | |
|
46495a227c | |
|
295c69f76a | |
|
7c30e24097 | |
|
de65bc7097 | |
|
f67dcc954e | |
|
4bf059ffec | |
|
2d74aee39d | |
|
cb0acc0d61 | |
|
b30c170c96 | |
|
e160bfcf1d | |
|
a2c2057761 | |
|
a2980eb1b6 | |
|
db76ef01e2 | |
|
a4795803d1 | |
|
fd8241598c | |
|
c3e5b1dc7b | |
|
b0560d63dd | |
|
47b5bdcafd | |
|
5f4bd5e765 | |
|
ee054544c4 | |
|
3024d3321e | |
|
a1290871ea | |
|
bcb3b08376 | |
|
f4f63c8d25 | |
|
be98c622e0 | |
|
84359efa64 | |
|
7bf8413888 | |
|
95802b0204 | |
|
96f4744eb2 | |
|
1aeed5a32e | |
|
592fa3224d | |
|
ff1b8fd429 | |
|
001c107025 | |
|
9824e3d8f8 | |
|
0b5fe5ec82 | |
|
292328b23f | |
|
3c6394a391 | |
|
f53dffef80 | |
|
ddfd650db4 | |
|
538aff6c9e | |
|
f281d69594 | |
|
e2330eba0f | |
|
027741bd38 | |
|
868be81689 | |
|
689680162e | |
|
f4919abce5 | |
|
7f988b2dd0 | |
|
cf45b57569 | |
|
d2694e513c | |
|
c60a2e05f6 | |
|
74df2b4e4d | |
|
0e8e6f0e55 | |
|
aee6e859b5 | |
|
e2d70a7534 | |
|
55aead1053 | |
|
b8e9c849de | |
|
42a772f700 | |
|
9966a3fdb7 | |
|
59c2837c1b | |
|
d12ea82bc4 | |
|
664c7f6c09 | |
|
fe06ebc5a0 | |
|
efca312a57 | |
|
ef629a240f | |
|
d44ab1d1a8 | |
|
1b7bdf4d2b | |
|
bd51389f9a | |
|
0793caac7a | |
|
e3f0ec122b | |
|
80677b3f77 | |
|
3e4771b914 | |
|
f86aca8816 | |
|
66d49eb465 | |
|
b11f53008c | |
|
d74b6da145 | |
|
c9efbe26b5 | |
|
c693ff8865 | |
|
e15e496a58 | |
|
941ace03dc | |
|
6347158e3e | |
|
1c4af4bb58 | |
|
5bcd841869 | |
|
a8cafd0496 | |
|
3e97b8ecd8 | |
|
483fbf41d0 | |
|
956ed039df | |
|
b1c6095564 | |
|
591193b358 | |
|
450b642e19 | |
|
ea5bc281b9 | |
|
2ce5c464f8 | |
|
a56fb20287 | |
|
aa02d191bc | |
|
f69d55b61b | |
|
e37fe36cfc | |
|
00dcfc0b6a | |
|
c44235afc5 | |
|
ea9bcecb6a | |
|
d96c8863ff | |
|
3751a16058 | |
|
6093332de5 | |
|
0b75a70173 | |
|
7b9bf2fea1 | |
|
702de1da1e | |
|
97a3d51d83 | |
|
b20715eb4e | |
|
5b8e346eb0 | |
|
e16a8e6f53 | |
|
eb3fed6aeb | |
|
3ed8f15934 | |
|
2af742b2d3 | |
|
22adecf2ce | |
|
f5cbcbb4b2 | |
|
33d46e5919 | |
|
312fbd088b | |
|
a0178b7511 | |
|
80c4221546 | |
|
f804e7323b | |
|
69b123f032 | |
|
571801fc39 | |
|
b85f29890d | |
|
afc4b4fc22 | |
|
6eb2e71a58 | |
|
2f9eb82a15 | |
|
c7d47050d9 | |
|
b04b9a5000 | |
|
74f615ad75 | |
|
8f1e955154 | |
|
cf1a15c9ff | |
|
23848b3c0e | |
|
7b54c00133 | |
|
fe0975788a | |
|
52f445985a | |
|
0714abb806 | |
|
c92b0d8edd | |
|
cd1ae695f5 | |
|
bd5692f08a | |
|
ead9434383 | |
|
58a576332b | |
|
bc4348d4a0 | |
|
343ffdaaf8 | |
|
ff813d91f1 | |
|
ad438dc5b2 | |
|
721b4726af | |
|
c97336c458 | |
|
cc39b22fb9 | |
|
0dc8cb9a31 | |
|
994b068366 | |
|
96b33e3a42 | |
|
42dfade3fe | |
|
453e5a1505 | |
|
06e2cc6eaf | |
|
f969605411 | |
|
e1d9afaa11 | |
|
a12ebddea8 | |
|
5bf846b965 | |
|
8f954fb96d | |
|
34e17a61bd | |
|
fac95ad9ab | |
|
8b03a530d3 | |
|
4478ade8ca | |
|
472822e2b7 | |
|
bd71f13ba8 | |
|
678b794c8f | |
|
9baf285c00 | |
|
21ea6f6eec | |
|
12ad34ccfb | |
|
5964cc0ecb | |
|
307163dabc | |
|
d75b38af78 | |
|
c680adc609 | |
|
92954bd082 | |
|
251fb32845 | |
|
4eaed73480 | |
|
2496cf3c08 | |
|
9d68815af0 | |
|
cd6b9a4474 | |
|
a5ffc84e2a | |
|
e8ddbd7e77 | |
|
8ce34ba61f | |
|
4779e7bce9 | |
|
ca072092bb | |
|
eaf2973188 | |
|
793b22106d | |
|
d1e12dfb7b | |
|
2ac7306bfb | |
|
75cecbb87c | |
|
a004c9488b | |
|
4ae3e675ca | |
|
5e9501ad17 | |
|
595c5b4e1f | |
|
4d3554ba80 | |
|
1801dca105 | |
|
a9de52fe9a | |
|
ab9e018835 | |
|
86837297dc | |
|
3a72ee9d68 | |
|
c0430749e1 | |
|
850c4fb9ef | |
|
4492764fe2 | |
|
5730c74159 | |
|
cd4063e5f6 | |
|
cb26864e00 | |
|
713b6b7c2d | |
|
ade8678a04 | |
|
100b9b7add | |
|
94a9f8bdbb | |
|
afd0ac414e | |
|
a29335eb0c | |
|
0e25236c54 | |
|
12d6522e80 | |
|
a34c412b50 | |
|
fc2593b9aa | |
|
20c2a182f0 | |
|
e67ae000e1 | |
|
c5279a45b3 | |
|
e2f0e6e9f7 | |
|
7ad3d481c9 | |
|
f6eff100ef | |
|
d4cf986b48 | |
|
9dde2b04dd | |
|
96d4577581 | |
|
fa7e6adbd3 | |
|
2a8fe27add | |
|
c08663deb2 | |
|
fc44746412 | |
|
f42dcb79ec | |
|
c5365b2468 | |
|
932ceb3ac9 | |
|
3588f6b3ee | |
|
13bbcaa9b7 | |
|
f41e152f55 | |
|
958e20dd6b | |
|
cad6ad5dfa | |
|
4cf881aa3e | |
|
669dac4a58 | |
|
78aa46a36e | |
|
d14e17c68b | |
|
c5b2d3c65a | |
|
cdccd62c32 | |
|
6f66e8bae7 | |
|
2b7ad8e673 | |
|
8d1041bbce | |
|
486e3add02 | |
|
94a9764ddc | |
|
e89a092a68 | |
|
8cbefd8c87 | |
|
7993912668 | |
|
def5021e9d | |
|
b6e0b45ca1 | |
|
73313d72e5 | |
|
5b7110468d | |
|
d2959507b4 | |
|
d8f8dff520 | |
|
1656b3b818 | |
|
1381a58126 | |
|
70314fe38b | |
|
6a8b90bb69 | |
|
f3e95de6f9 | |
|
6257c985c3 | |
|
4e651a0a58 | |
|
7835614378 | |
|
71cedc7f5f | |
|
c7deed92c2 | |
|
0d35e6d00e | |
|
b45612808e | |
|
c31c8e4a58 | |
|
4cbca9ca99 | |
|
98b6bbb77a | |
|
36c75b73ed | |
|
c160708fa1 | |
|
e19e32428e | |
|
2f6ff56c41 | |
|
9a8cbfd4ff | |
|
1467ceaa21 | |
|
37f2af6459 | |
|
5d48d3c89b | |
|
e8ee7db2df | |
|
d1bf5a0ac9 | |
|
87ee756ae7 | |
|
d58cbe1ea4 | |
|
b3f93ecfa9 | |
|
d1d6a993fe | |
|
18c48313ee | |
|
5c3b177d6e | |
|
8e09bf628e | |
|
707360fd34 | |
|
273e95998d | |
|
a428a17109 | |
|
6aa4f151e8 | |
|
31e3e53129 | |
|
7e309cf535 | |
|
1417bef111 | |
|
48bab1473c | |
|
45cf955fd0 | |
|
25138677a3 | |
|
6d5cf4326d | |
|
4984d59858 | |
|
a5cd153fa0 | |
|
2d8213fefb | |
|
305b3b671e | |
|
a4aa49fd00 | |
|
8e18f30d6a | |
|
4eb2d273ab | |
|
6d83324b41 | |
|
6ac0f2cbed | |
|
d4614ed69c | |
|
596172923b | |
|
1d2515901b | |
|
83a4bb853a | |
|
c12b040d8d | |
|
5865ee6ce2 | |
|
73cce5999c | |
|
a05c7dc006 | |
|
8659599d97 | |
|
51aec83d12 | |
|
7fb59a8b62 | |
|
02598f609b | |
|
71e103fb4e | |
|
38da3abc9b | |
|
2100504161 | |
|
5ce16455bb | |
|
787fd3a64a | |
|
0064b9ac63 | |
|
74522cd394 | |
|
ba5ffbac4f | |
|
49a37a78de | |
|
dbe0d22a1e | |
|
31e7fdc0b5 | |
|
f4773821b7 | |
|
b8639a7f89 | |
|
eda3685732 | |
|
4eaf8ea6e9 | |
|
f543f4ce92 | |
|
e37b2dcf05 | |
|
8412c2069b | |
|
664d27fbcd | |
|
6c7a70f4e5 | |
|
4c7c15ae5b | |
|
85061f5e26 | |
|
9b7151020d | |
|
2667cef349 | |
|
5f258dd8b3 | |
|
bfd88d2507 | |
|
138537bdb2 | |
|
afe172d6a4 | |
|
d9318779fd | |
|
f43597eaa0 | |
|
1964f6dc78 | |
|
e07b0937ef | |
|
9ea0382293 | |
|
1e3795e3b8 | |
|
6f6762579b | |
|
c0657e289e | |
|
38619e9ffe | |
|
a846783aaa | |
|
dfe1089e57 | |
|
c026200b23 |
|
@ -0,0 +1,36 @@
|
|||
# Config for the Gemini Pull Request Review Bot.
|
||||
# https://github.com/marketplace/gemini-code-assist
|
||||
|
||||
# Enables fun features such as a poem in the initial pull request summary.
|
||||
# Type: boolean, default: false.
|
||||
have_fun: false
|
||||
|
||||
code_review:
|
||||
# Disables Gemini from acting on PRs.
|
||||
# Type: boolean, default: false.
|
||||
disable: false
|
||||
|
||||
# Minimum severity of comments to post (LOW, MEDIUM, HIGH, CRITICAL).
|
||||
# Type: string, default: MEDIUM.
|
||||
comment_severity_threshold: MEDIUM
|
||||
|
||||
# Max number of review comments (-1 for unlimited).
|
||||
# Type: integer, default: -1.
|
||||
max_review_comments: -1
|
||||
|
||||
pull_request_opened:
|
||||
# Post helpful instructions when PR is opened.
|
||||
# Type: boolean, default: false.
|
||||
help: true
|
||||
|
||||
# Post PR summary when opened.
|
||||
# Type boolean, default: true.
|
||||
summary: true
|
||||
|
||||
# Post code review on PR open.
|
||||
# Type boolean, default: true.
|
||||
code_review: true
|
||||
|
||||
# List of glob patterns to ignore (files and directories).
|
||||
# Type: array of string, default: [].
|
||||
ignore_patterns: []
|
|
@ -2,30 +2,48 @@
|
|||
|
||||
<!--
|
||||
Add one of the following kinds:
|
||||
|
||||
/kind api-change
|
||||
/kind bug
|
||||
/kind cleanup
|
||||
/kind deprecation
|
||||
/kind design
|
||||
/kind documentation
|
||||
/kind failing-test
|
||||
/kind feature
|
||||
/kind documentation
|
||||
/kind cleanup
|
||||
|
||||
Optionally add one or more of the following kinds if applicable:
|
||||
/kind api-change
|
||||
/kind deprecation
|
||||
/kind failing-test
|
||||
/kind flake
|
||||
/kind regression
|
||||
|
||||
-->
|
||||
|
||||
**What this PR does / why we need it**:
|
||||
|
||||
**Which issue(s) this PR fixes**:
|
||||
<!--
|
||||
*Automatically closes linked issue when PR is merged.
|
||||
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.*
|
||||
-->
|
||||
Fixes #
|
||||
|
||||
<!--
|
||||
*Optionally link to the umbrella issue if this PR resolves part of it.
|
||||
Usage: `Part of #<issue number>`, or `Part of (paste link of issue)`.*
|
||||
Part of #
|
||||
-->
|
||||
|
||||
**Special notes for your reviewer**:
|
||||
<!--
|
||||
Such as a test report of this PR.
|
||||
-->
|
||||
|
||||
**Does this PR introduce a user-facing change?**:
|
||||
<!--
|
||||
If no, just write "NONE" in the release-note block below.
|
||||
If yes, a release note is required.
|
||||
Some brief examples of release notes:
|
||||
1. `karmada-controller-manager`: Fixed the issue that xxx
|
||||
2. `karmada-scheduler`: The deprecated flag `--xxx` now has been removed. Users of this flag should xxx.
|
||||
3. `API Change`: Introduced `spec.<field>` to the PropagationPolicy API for xxx.
|
||||
-->
|
||||
```release-note
|
||||
|
||||
|
|
|
@ -19,18 +19,18 @@ updates:
|
|||
|
||||
- package-ecosystem: docker
|
||||
directory: /cluster/images/
|
||||
target-branch: "release-1.12"
|
||||
target-branch: "release-1.15"
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: /cluster/images/
|
||||
target-branch: "release-1.11"
|
||||
target-branch: "release-1.14"
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: /cluster/images/
|
||||
target-branch: "release-1.10"
|
||||
target-branch: "release-1.13"
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
|
|
@ -27,10 +27,10 @@ jobs:
|
|||
- karmada-search
|
||||
- karmada-operator
|
||||
- karmada-metrics-adapter
|
||||
karmada-version: [ release-1.12, release-1.11, release-1.10 ]
|
||||
karmada-version: [ release-1.15, release-1.14, release-1.13 ]
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ matrix.karmada-version }}
|
||||
- name: install Go
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
export REGISTRY="docker.io/karmada"
|
||||
make image-${{ matrix.target }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.30.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
|
||||
|
@ -56,15 +56,17 @@ jobs:
|
|||
format: 'sarif'
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
cache: false
|
||||
output: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif'
|
||||
- name: display scan results
|
||||
uses: aquasecurity/trivy-action@0.30.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
|
||||
with:
|
||||
image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}'
|
||||
format: 'table'
|
||||
ignore-unfixed: true
|
||||
cache: false
|
||||
vuln-type: 'os,library'
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
|
|
|
@ -31,7 +31,11 @@ jobs:
|
|||
- karmada-metrics-adapter
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -42,7 +46,7 @@ jobs:
|
|||
export REGISTRY="docker.io/karmada"
|
||||
make image-${{ matrix.target }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.30.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
|
||||
|
@ -52,8 +56,9 @@ jobs:
|
|||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
output: 'trivy-results.sarif'
|
||||
cache: false
|
||||
- name: display scan results
|
||||
uses: aquasecurity/trivy-action@0.30.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
|
||||
with:
|
||||
|
@ -61,6 +66,7 @@ jobs:
|
|||
format: 'table'
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
cache: false
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
|
|
|
@ -19,8 +19,8 @@ jobs:
|
|||
max-parallel: 5
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubeapiserver-version: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ]
|
||||
karmada-version: [ master, release-1.12, release-1.11, release-1.10 ]
|
||||
kubeapiserver-version: [ v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0, v1.32.0, v1.33.0 ]
|
||||
karmada-version: [ master, release-1.15, release-1.14, release-1.13 ]
|
||||
env:
|
||||
KARMADA_APISERVER_VERSION: ${{ matrix.kubeapiserver-version }}
|
||||
steps:
|
||||
|
@ -38,7 +38,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
max-parallel: 5
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -35,7 +35,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
|
|
@ -20,7 +20,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -38,7 +38,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -66,7 +66,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -85,7 +85,7 @@ jobs:
|
|||
GOTESTSUM_ENABLED: enabled
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -116,7 +116,7 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -132,7 +132,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -144,6 +144,7 @@ jobs:
|
|||
- name: setup e2e test environment
|
||||
run: |
|
||||
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
|
||||
export KUBE_CACHE_MUTATION_DETECTOR=true
|
||||
hack/local-up-karmada.sh
|
||||
- name: run e2e
|
||||
run: |
|
||||
|
@ -172,7 +173,7 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -188,7 +189,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
|
|
@ -31,7 +31,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
uses: sigstore/cosign-installer@v3.9.2
|
||||
with:
|
||||
cosign-release: 'v2.2.3'
|
||||
- name: install QEMU
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
if: ${{ github.repository == 'karmada-io/karmada' }}
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
|
|
@ -31,7 +31,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
uses: sigstore/cosign-installer@v3.9.2
|
||||
with:
|
||||
cosign-release: 'v2.2.3'
|
||||
- name: install QEMU
|
||||
|
|
|
@ -7,7 +7,7 @@ on:
|
|||
- 'dependabot/**'
|
||||
|
||||
permissions:
|
||||
contents: read # Required by actions/checkout@v4 to fetch the repository contents.
|
||||
contents: read # Required by actions/checkout@v5 to fetch the repository contents.
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
|
@ -19,7 +19,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossas/fossa-action@v1
|
||||
with:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# validate any chart changes under charts directory
|
||||
name: Chart
|
||||
env:
|
||||
HELM_VERSION: v3.11.2
|
||||
KUSTOMIZE_VERSION: 5.4.3
|
||||
HELM_VERSION: v3.17.3
|
||||
KUSTOMIZE_VERSION: 5.6.0
|
||||
on:
|
||||
push:
|
||||
# Exclude branches created by Dependabot to avoid triggering current workflow
|
||||
|
@ -26,10 +26,10 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -47,6 +47,8 @@ jobs:
|
|||
uses: syntaqx/setup-kustomize@v1
|
||||
with:
|
||||
kustomize-version: ${{ env.KUSTOMIZE_VERSION }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Run chart-testing (template)
|
||||
run: |
|
||||
|
|
|
@ -24,10 +24,10 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -70,10 +70,10 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
# Latest three minor releases of Kubernetes
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
|
|
|
@ -24,7 +24,7 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
- amd64
|
||||
- arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -52,7 +52,7 @@ jobs:
|
|||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
steps:
|
||||
- name: download cli
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: _output/release
|
||||
pattern: cli-*
|
||||
|
@ -84,7 +84,7 @@ jobs:
|
|||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Rename the crds directory
|
||||
run: |
|
||||
mv ./charts/karmada/_crds ./charts/karmada/crds
|
||||
|
@ -126,7 +126,7 @@ jobs:
|
|||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Making helm charts
|
||||
env:
|
||||
VERSION: ${{ github.ref_name }}
|
||||
|
@ -165,9 +165,9 @@ jobs:
|
|||
hashes: ${{ steps.sbom-hash.outputs.hashes}}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Generate sbom for karmada file system
|
||||
uses: aquasecurity/trivy-action@0.30.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
with:
|
||||
scan-type: 'fs'
|
||||
format: 'spdx'
|
||||
|
@ -220,7 +220,7 @@ jobs:
|
|||
echo "Got the latest tag:$LATEST_TAG"
|
||||
echo "event.tag:"${{ github.event.release.tag_name }}
|
||||
echo "latestTag=$LATEST_TAG" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name
|
||||
- name: Update new version in krew-index
|
||||
if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.23.7
|
||||
1.24.6
|
||||
|
|
189
.golangci.yml
189
.golangci.yml
|
@ -1,10 +1,11 @@
|
|||
# This files contains all configuration options for analysis running.
|
||||
# More details please refer to: https://golangci-lint.run/usage/configuration/
|
||||
|
||||
version: "2"
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# timeout for analysis, e.g. 30s, 5m, default timeout is disabled
|
||||
timeout: 10m
|
||||
|
||||
|
||||
# One of 'readonly' and 'vendor'.
|
||||
# - readonly: the go command is disallowed from the implicit automatic updating of go.mod described above.
|
||||
# Instead, it fails when any changes to go.mod are needed. This setting is most useful to check
|
||||
|
@ -14,95 +15,95 @@ run:
|
|||
modules-download-mode: readonly
|
||||
linters:
|
||||
enable:
|
||||
# linters maintained by golang.org
|
||||
- gofmt
|
||||
- goimports
|
||||
- govet
|
||||
# linters default enabled by golangci-lint .
|
||||
- errcheck
|
||||
- gosimple
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
# other linters supported by golangci-lint.
|
||||
- gci
|
||||
- gocyclo
|
||||
- gosec
|
||||
- misspell
|
||||
- whitespace
|
||||
- revive
|
||||
- depguard
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: "io/ioutil"
|
||||
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||
goimports:
|
||||
local-prefixes: github.com/karmada-io/karmada
|
||||
gocyclo:
|
||||
# minimal cyclomatic complexity to report
|
||||
min-complexity: 15
|
||||
gci:
|
||||
sections:
|
||||
- Standard
|
||||
- Default
|
||||
- Prefix(github.com/karmada-io/karmada)
|
||||
revive:
|
||||
rules:
|
||||
# Disable if-return as it is too strict and not always useful.
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
|
||||
- name: if-return
|
||||
disabled: true
|
||||
- name: package-comments
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: error-strings
|
||||
- name: error-return
|
||||
- name: receiver-naming
|
||||
- name: increment-decrement
|
||||
- name: range
|
||||
- name: error-naming
|
||||
- name: dot-imports
|
||||
- name: errorf
|
||||
- name: exported
|
||||
- name: var-declaration
|
||||
- name: blank-imports
|
||||
- name: indent-error-flow
|
||||
- name: unreachable-code
|
||||
- name: var-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: unused-parameter
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: unexported-return
|
||||
- name: time-naming
|
||||
- name: empty-block
|
||||
|
||||
issues:
|
||||
# The list of ids of default excludes to include or disable. By default it's empty.
|
||||
include:
|
||||
# disable excluding of issues about comments from revive
|
||||
# see https://golangci-lint.run/usage/configuration/#command-line-options for more info
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
# Which dirs to exclude: issues from them won't be reported.
|
||||
# Can use regexp here: `generated.*`, regexp is applied on full path,
|
||||
# including the path prefix if one is set.
|
||||
# Default dirs are skipped independently of this option's value (see exclude-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
||||
# Default: []
|
||||
exclude-dirs:
|
||||
- hack/tools/preferredimports # This code is directly lifted from the Kubernetes codebase, skip checking
|
||||
- (^|/)vendor($|/)
|
||||
- (^|/)third_party($|/)
|
||||
- pkg/util/lifted # This code is lifted from other projects(Kubernetes, Kubefed, and so on), skip checking.
|
||||
# Enables exclude of directories:
|
||||
# - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
# Default: true
|
||||
exclude-dirs-use-default: false
|
||||
- depguard
|
||||
- gocyclo
|
||||
- gosec
|
||||
- misspell
|
||||
- revive
|
||||
- whitespace
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil'
|
||||
gocyclo:
|
||||
# minimal cyclomatic complexity to report
|
||||
min-complexity: 15
|
||||
revive:
|
||||
rules:
|
||||
# Disable if-return as it is too strict and not always useful.
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
|
||||
- name: if-return
|
||||
disabled: true
|
||||
# Disable package-comments for now since most packages in this project are primarily for internal use.
|
||||
# If we decide to provide public packages in the future, we can move them to a separate
|
||||
# repository and revisit adding package-level comments at that time.
|
||||
- name: package-comments
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: error-strings
|
||||
- name: error-return
|
||||
- name: receiver-naming
|
||||
- name: increment-decrement
|
||||
- name: range
|
||||
- name: error-naming
|
||||
- name: dot-imports
|
||||
- name: errorf
|
||||
- name: exported
|
||||
- name: var-declaration
|
||||
- name: blank-imports
|
||||
- name: indent-error-flow
|
||||
- name: unreachable-code
|
||||
- name: var-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: unused-parameter
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: unexported-return
|
||||
- name: time-naming
|
||||
- name: empty-block
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
# Disable QF1008 to retain embedded fields for better readability.
|
||||
- "-QF1008"
|
||||
# Disable ST1000 (staticcheck) for now since most packages in this project are primarily for internal use.
|
||||
# If we decide to provide public packages in the future, we can move them to a separate
|
||||
# repository and revisit adding package-level comments at that time.
|
||||
- "-ST1000"
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- hack/tools/preferredimports
|
||||
- (^|/)vendor($|/)
|
||||
- (^|/)third_party($|/)
|
||||
- pkg/util/lifted
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- Standard
|
||||
- Default
|
||||
- Prefix(github.com/karmada-io/karmada)
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/karmada-io/karmada
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- hack/tools/preferredimports
|
||||
- (^|/)vendor($|/)
|
||||
- (^|/)third_party($|/)
|
||||
- pkg/util/lifted
|
||||
|
|
17
README.md
17
README.md
|
@ -186,12 +186,17 @@ nginx 2/2 2 2 20s
|
|||
|
||||
## Kubernetes compatibility
|
||||
|
||||
| | Kubernetes 1.16 | Kubernetes 1.17 | Kubernetes 1.18 | Kubernetes 1.19 | Kubernetes 1.20 | Kubernetes 1.21 | Kubernetes 1.22 | Kubernetes 1.23 | Kubernetes 1.24 | Kubernetes 1.25 | Kubernetes 1.26 | Kubernetes 1.27 | Kubernetes 1.28 | Kubernetes 1.29 |
|
||||
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| Karmada v1.7 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.8 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.9 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada HEAD (master) | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
Karmada is compatible with a wide range of Kubernetes versions. For detailed compatibility instructions,
|
||||
please refer to the [Kubernetes Compatibility](https://karmada.io/docs/administrator/compatibility/).
|
||||
|
||||
The following table shows the compatibility test results against the latest 10 Kubernetes versions:
|
||||
|
||||
| | Kubernetes 1.33 | Kubernetes 1.32 | Kubernetes 1.31 | Kubernetes 1.30 | Kubernetes 1.29 | Kubernetes 1.28 | Kubernetes 1.27 | Kubernetes 1.26 | Kubernetes 1.25 | Kubernetes 1.24 | Kubernetes 1.23 |
|
||||
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| Karmada v1.12 | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.13 | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.14 | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada HEAD (master) | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
|
||||
Key:
|
||||
* `✓` Karmada and the Kubernetes version are exactly compatible.
|
||||
|
|
29
ROADMAP.md
29
ROADMAP.md
|
@ -1,29 +1,4 @@
|
|||
# Karmada Roadmap
|
||||
|
||||
This document defines a high level roadmap for Karmada development and upcoming releases.
|
||||
Community and contributor involvement is vital for successfully implementing all desired items for each release.
|
||||
We hope that the items listed below will inspire further engagement from the community to keep Karmada progressing and shipping exciting and valuable features.
|
||||
|
||||
## 2024 H1
|
||||
- Lazy mode of PropagationPolicy
|
||||
- Cluster Problem Detector(CPD) - Part one: Cluster condition-based remedy system
|
||||
- Scheduler Enhancement - enable scheduler estimator supports resource quota
|
||||
- Scheduler Enhancement - Provide a mechanism of re-balance workloads
|
||||
|
||||
## 2024 H2
|
||||
- AI training and batch job support (Including PyTorch, Spark, Flink and so on)
|
||||
- Karmada Dashboard - alpha release
|
||||
- Multi-cluster workflow
|
||||
- Scheduler Enhancement - Optimize scheduling with GPU resources
|
||||
|
||||
## Pending
|
||||
- Cluster addon management
|
||||
- Multi-cluster Application
|
||||
- Multi-cluster monitoring
|
||||
- Multi-cluster logging
|
||||
- Multi-cluster storage
|
||||
- Multi-cluster RBAC
|
||||
- Multi-cluster networking
|
||||
- Data migration across clusters
|
||||
- Image registry across clouds
|
||||
- Multi-cluster Service Mesh solutions
|
||||
This document has been moved to [karmada-io/community](https://github.com/karmada-io/community/blob/main/ROADMAP.md)
|
||||
to include all efforts for this repository and subprojects.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,6 +24,11 @@ spec:
|
|||
- name: karmada-agent
|
||||
image: docker.io/karmada/karmada-agent:latest
|
||||
imagePullPolicy: {{image_pull_policy}}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/karmada-agent
|
||||
- --karmada-kubeconfig=/etc/karmada/config/karmada.config
|
||||
|
@ -31,9 +36,10 @@ spec:
|
|||
- --cluster-name={{member_cluster_name}}
|
||||
- --cluster-api-endpoint={{member_cluster_api_endpoint}}
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=:8080
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --feature-gates=CustomizedClusterResourceModeling=true,MultiClusterService=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -27,6 +27,11 @@ spec:
|
|||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/karmada-aggregated-apiserver
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
|
@ -42,6 +47,8 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --logging-format=json
|
||||
- --bind-address=$(POD_IP)
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
|
|
|
@ -26,15 +26,23 @@ spec:
|
|||
privileged: false
|
||||
image: docker.io/karmada/karmada-controller-manager:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-controller-manager
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --failover-eviction-timeout=30s
|
||||
- --controllers=*,hpaScaleTargetMarker,deploymentReplicasSyncer
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --enable-no-execute-taint-eviction=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -26,14 +26,22 @@ spec:
|
|||
privileged: false
|
||||
image: docker.io/karmada/karmada-descheduler:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-descheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10358
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10358
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -27,10 +27,17 @@ spec:
|
|||
privileged: false
|
||||
image: docker.io/karmada/karmada-metrics-adapter:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --client-ca-file=/etc/karmada/pki/server/ca.crt
|
||||
|
@ -40,6 +47,8 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
- --logging-format=json
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
|
|
|
@ -26,6 +26,13 @@ spec:
|
|||
privileged: false
|
||||
image: docker.io/karmada/karmada-scheduler-estimator:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-scheduler-estimator
|
||||
- --kubeconfig=/etc/{{member_cluster_name}}-kubeconfig
|
||||
|
@ -33,8 +40,9 @@ spec:
|
|||
- --grpc-auth-cert-file=/etc/karmada/pki/server/tls.crt
|
||||
- --grpc-auth-key-file=/etc/karmada/pki/server/tls.key
|
||||
- --grpc-client-ca-file=/etc/karmada/pki/server/ca.crt
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
- --logging-format=json
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
|
|
@ -39,16 +39,24 @@ spec:
|
|||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-scheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
- --enable-scheduler-estimator=true
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
|
|
|
@ -27,6 +27,13 @@ spec:
|
|||
privileged: false
|
||||
image: docker.io/karmada/karmada-search:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-search
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
|
@ -42,6 +49,8 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
- --logging-format=json
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
|
|
|
@ -26,15 +26,22 @@ spec:
|
|||
privileged: false
|
||||
image: docker.io/karmada/karmada-webhook:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/karmada-webhook
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --bind-address=0.0.0.0
|
||||
- --metrics-bind-address=:8080
|
||||
- --default-not-ready-toleration-seconds=30
|
||||
- --default-unreachable-toleration-seconds=30
|
||||
- --bind-address=$(POD_IP)
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):8000
|
||||
- --secure-port=8443
|
||||
- --cert-dir=/etc/karmada/pki/server
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --allow-no-execute-taint-policy=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
|
|
|
@ -296,3 +296,31 @@ webhooks:
|
|||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 3
|
||||
- name: resourcebinding.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["work.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["resourcebindings"]
|
||||
scope: "Namespaced"
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/validate-resourcebinding
|
||||
caBundle: {{caBundle}}
|
||||
failurePolicy: Fail
|
||||
sideEffects: NoneOnDryRun
|
||||
admissionReviewVersions: ["v1"]
|
||||
timeoutSeconds: 3
|
||||
- name: clustertaintpolicy.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["policy.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["clustertaintpolicies"]
|
||||
scope: "Cluster"
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/validate-clustertaintpolicy
|
||||
caBundle: {{caBundle}}
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 3
|
||||
|
|
|
@ -1,6 +1,26 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
karmada:
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2025-06-13T16:23:17.081220385+08:00"
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 2.x.x
|
||||
description: A Helm chart for karmada
|
||||
digest: cd93e64198f364ff2330d718d80b8f321530ab8147521ef2b6263198a35bc7e0
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: chaosi@zju.edu.cn
|
||||
name: chaosi-zju
|
||||
- email: amiralavi7@gmail.com
|
||||
name: a7i
|
||||
name: karmada
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.14.0/karmada-chart-v1.14.0.tgz
|
||||
version: v1.14.0
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2025-03-10T11:24:11.714162019+08:00"
|
||||
|
@ -424,4 +444,4 @@ entries:
|
|||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.8.0/karmada-operator-chart-v1.8.0.tgz
|
||||
version: v1.8.0
|
||||
generated: "2025-03-10T11:24:11.702431444+08:00"
|
||||
generated: "2025-06-13T16:23:17.069242033+08:00"
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: karmadas.operator.karmada.io
|
||||
spec:
|
||||
group: operator.karmada.io
|
||||
|
@ -653,6 +653,8 @@ spec:
|
|||
description: |-
|
||||
awsElasticBlockStore represents an AWS Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
|
||||
awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
|
||||
properties:
|
||||
fsType:
|
||||
|
@ -684,8 +686,10 @@ spec:
|
|||
- volumeID
|
||||
type: object
|
||||
azureDisk:
|
||||
description: azureDisk represents an Azure Data Disk
|
||||
mount on the host and bind mount to the pod.
|
||||
description: |-
|
||||
azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
|
||||
are redirected to the disk.csi.azure.com CSI driver.
|
||||
properties:
|
||||
cachingMode:
|
||||
description: 'cachingMode is the Host Caching mode:
|
||||
|
@ -724,8 +728,10 @@ spec:
|
|||
- diskURI
|
||||
type: object
|
||||
azureFile:
|
||||
description: azureFile represents an Azure File Service
|
||||
mount on the host and bind mount to the pod.
|
||||
description: |-
|
||||
azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
|
||||
are redirected to the file.csi.azure.com CSI driver.
|
||||
properties:
|
||||
readOnly:
|
||||
description: |-
|
||||
|
@ -744,8 +750,9 @@ spec:
|
|||
- shareName
|
||||
type: object
|
||||
cephfs:
|
||||
description: cephFS represents a Ceph FS mount on the
|
||||
host that shares a pod's lifetime
|
||||
description: |-
|
||||
cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
|
||||
Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
|
||||
properties:
|
||||
monitors:
|
||||
description: |-
|
||||
|
@ -798,6 +805,8 @@ spec:
|
|||
cinder:
|
||||
description: |-
|
||||
cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
|
||||
are redirected to the cinder.csi.openstack.org CSI driver.
|
||||
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
|
@ -909,7 +918,7 @@ spec:
|
|||
csi:
|
||||
description: csi (Container Storage Interface) represents
|
||||
ephemeral storage that is handled by certain external
|
||||
CSI drivers (Beta feature).
|
||||
CSI drivers.
|
||||
properties:
|
||||
driver:
|
||||
description: |-
|
||||
|
@ -1397,6 +1406,7 @@ spec:
|
|||
description: |-
|
||||
flexVolume represents a generic volume resource that is
|
||||
provisioned/attached using an exec based plugin.
|
||||
Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
|
||||
properties:
|
||||
driver:
|
||||
description: driver is the name of the driver to
|
||||
|
@ -1442,9 +1452,9 @@ spec:
|
|||
- driver
|
||||
type: object
|
||||
flocker:
|
||||
description: flocker represents a Flocker volume attached
|
||||
to a kubelet's host machine. This depends on the Flocker
|
||||
control service being running
|
||||
description: |-
|
||||
flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
|
||||
Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
|
||||
properties:
|
||||
datasetName:
|
||||
description: |-
|
||||
|
@ -1460,6 +1470,8 @@ spec:
|
|||
description: |-
|
||||
gcePersistentDisk represents a GCE Disk resource that is attached to a
|
||||
kubelet's host machine and then exposed to the pod.
|
||||
Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
|
||||
gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
|
||||
properties:
|
||||
fsType:
|
||||
|
@ -1495,7 +1507,7 @@ spec:
|
|||
gitRepo:
|
||||
description: |-
|
||||
gitRepo represents a git repository at a particular revision.
|
||||
DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
|
||||
into the Pod's container.
|
||||
properties:
|
||||
|
@ -1519,6 +1531,7 @@ spec:
|
|||
glusterfs:
|
||||
description: |-
|
||||
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
properties:
|
||||
endpoints:
|
||||
|
@ -1578,7 +1591,7 @@ spec:
|
|||
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
properties:
|
||||
pullPolicy:
|
||||
|
@ -1728,9 +1741,9 @@ spec:
|
|||
- claimName
|
||||
type: object
|
||||
photonPersistentDisk:
|
||||
description: photonPersistentDisk represents a PhotonController
|
||||
persistent disk attached and mounted on kubelets host
|
||||
machine
|
||||
description: |-
|
||||
photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
|
||||
Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
|
@ -1746,8 +1759,11 @@ spec:
|
|||
- pdID
|
||||
type: object
|
||||
portworxVolume:
|
||||
description: portworxVolume represents a portworx volume
|
||||
attached and mounted on kubelets host machine
|
||||
description: |-
|
||||
portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
|
||||
Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
|
||||
are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
|
||||
is on.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
|
@ -2118,8 +2134,9 @@ spec:
|
|||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
quobyte:
|
||||
description: quobyte represents a Quobyte mount on the
|
||||
host that shares a pod's lifetime
|
||||
description: |-
|
||||
quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
|
||||
Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
|
||||
properties:
|
||||
group:
|
||||
description: |-
|
||||
|
@ -2158,6 +2175,7 @@ spec:
|
|||
rbd:
|
||||
description: |-
|
||||
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
properties:
|
||||
fsType:
|
||||
|
@ -2230,8 +2248,9 @@ spec:
|
|||
- monitors
|
||||
type: object
|
||||
scaleIO:
|
||||
description: scaleIO represents a ScaleIO persistent
|
||||
volume attached and mounted on Kubernetes nodes.
|
||||
description: |-
|
||||
scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
|
||||
properties:
|
||||
fsType:
|
||||
default: xfs
|
||||
|
@ -2364,8 +2383,9 @@ spec:
|
|||
type: string
|
||||
type: object
|
||||
storageos:
|
||||
description: storageOS represents a StorageOS volume
|
||||
attached and mounted on Kubernetes nodes.
|
||||
description: |-
|
||||
storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
|
||||
Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
|
@ -2410,8 +2430,10 @@ spec:
|
|||
type: string
|
||||
type: object
|
||||
vsphereVolume:
|
||||
description: vsphereVolume represents a vSphere volume
|
||||
attached and mounted on kubelets host machine
|
||||
description: |-
|
||||
vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
|
||||
Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
|
||||
are redirected to the csi.vsphere.vmware.com CSI driver.
|
||||
properties:
|
||||
fsType:
|
||||
description: |-
|
||||
|
@ -2471,6 +2493,17 @@ spec:
|
|||
and services.
|
||||
More info: http://kubernetes.io/docs/user-guide/labels
|
||||
type: object
|
||||
loadBalancerClass:
|
||||
description: |-
|
||||
LoadBalancerClass specifies the load balancer implementation class for the Karmada API server.
|
||||
This field is applicable only when ServiceType is set to LoadBalancer.
|
||||
If specified, the service will be processed by the load balancer implementation that matches the specified class.
|
||||
By default, this is not set and the LoadBalancer type of Service uses the cloud provider's default load balancer
|
||||
implementation.
|
||||
Once set, it cannot be changed. The value must be a label-style identifier, with an optional prefix such as
|
||||
"internal-vip" or "example.com/internal-vip".
|
||||
More info: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
|
||||
type: string
|
||||
priorityClassName:
|
||||
default: system-node-critical
|
||||
description: |-
|
||||
|
@ -2741,7 +2774,7 @@ spec:
|
|||
Cannot be updated.
|
||||
items:
|
||||
description: EnvFromSource represents the source of
|
||||
a set of ConfigMaps
|
||||
a set of ConfigMaps or Secrets
|
||||
properties:
|
||||
configMapRef:
|
||||
description: The ConfigMap to select from
|
||||
|
@ -2762,8 +2795,8 @@ spec:
|
|||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
prefix:
|
||||
description: An optional identifier to prepend
|
||||
to each key in the ConfigMap. Must be a C_IDENTIFIER.
|
||||
description: Optional text to prepend to the name
|
||||
of each environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The Secret to select from
|
||||
|
@ -2814,7 +2847,8 @@ spec:
|
|||
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute
|
||||
in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -2829,7 +2863,7 @@ spec:
|
|||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request
|
||||
description: HTTPGet specifies an HTTP GET request
|
||||
to perform.
|
||||
properties:
|
||||
host:
|
||||
|
@ -2880,8 +2914,8 @@ spec:
|
|||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that
|
||||
the container should sleep before being terminated.
|
||||
description: Sleep represents a duration that
|
||||
the container should sleep.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds
|
||||
|
@ -2894,8 +2928,8 @@ spec:
|
|||
tcpSocket:
|
||||
description: |-
|
||||
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
for the backward compatibility. There are no validation of this field and
|
||||
lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
for backward compatibility. There is no validation of this field and
|
||||
lifecycle hooks will fail at runtime when it is specified.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect
|
||||
|
@ -2927,7 +2961,8 @@ spec:
|
|||
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute
|
||||
in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -2942,7 +2977,7 @@ spec:
|
|||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request
|
||||
description: HTTPGet specifies an HTTP GET request
|
||||
to perform.
|
||||
properties:
|
||||
host:
|
||||
|
@ -2993,8 +3028,8 @@ spec:
|
|||
- port
|
||||
type: object
|
||||
sleep:
|
||||
description: Sleep represents the duration that
|
||||
the container should sleep before being terminated.
|
||||
description: Sleep represents a duration that
|
||||
the container should sleep.
|
||||
properties:
|
||||
seconds:
|
||||
description: Seconds is the number of seconds
|
||||
|
@ -3007,8 +3042,8 @@ spec:
|
|||
tcpSocket:
|
||||
description: |-
|
||||
Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
for the backward compatibility. There are no validation of this field and
|
||||
lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
for backward compatibility. There is no validation of this field and
|
||||
lifecycle hooks will fail at runtime when it is specified.
|
||||
properties:
|
||||
host:
|
||||
description: 'Optional: Host name to connect
|
||||
|
@ -3027,6 +3062,12 @@ spec:
|
|||
- port
|
||||
type: object
|
||||
type: object
|
||||
stopSignal:
|
||||
description: |-
|
||||
StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
If not specified, the default is defined by the container runtime in use.
|
||||
StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
type: string
|
||||
type: object
|
||||
livenessProbe:
|
||||
description: |-
|
||||
|
@ -3036,7 +3077,8 @@ spec:
|
|||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute
|
||||
in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -3057,8 +3099,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving
|
||||
a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service.
|
||||
|
@ -3077,7 +3118,7 @@ spec:
|
|||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request
|
||||
description: HTTPGet specifies an HTTP GET request
|
||||
to perform.
|
||||
properties:
|
||||
host:
|
||||
|
@ -3145,7 +3186,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving
|
||||
description: TCPSocket specifies a connection to
|
||||
a TCP port.
|
||||
properties:
|
||||
host:
|
||||
|
@ -3251,7 +3292,8 @@ spec:
|
|||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute
|
||||
in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -3272,8 +3314,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving
|
||||
a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service.
|
||||
|
@ -3292,7 +3333,7 @@ spec:
|
|||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request
|
||||
description: HTTPGet specifies an HTTP GET request
|
||||
to perform.
|
||||
properties:
|
||||
host:
|
||||
|
@ -3360,7 +3401,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving
|
||||
description: TCPSocket specifies a connection to
|
||||
a TCP port.
|
||||
properties:
|
||||
host:
|
||||
|
@ -3710,7 +3751,8 @@ spec:
|
|||
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
properties:
|
||||
exec:
|
||||
description: Exec specifies the action to take.
|
||||
description: Exec specifies a command to execute
|
||||
in the container.
|
||||
properties:
|
||||
command:
|
||||
description: |-
|
||||
|
@ -3731,8 +3773,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
grpc:
|
||||
description: GRPC specifies an action involving
|
||||
a GRPC port.
|
||||
description: GRPC specifies a GRPC HealthCheckRequest.
|
||||
properties:
|
||||
port:
|
||||
description: Port number of the gRPC service.
|
||||
|
@ -3751,7 +3792,7 @@ spec:
|
|||
- port
|
||||
type: object
|
||||
httpGet:
|
||||
description: HTTPGet specifies the http request
|
||||
description: HTTPGet specifies an HTTP GET request
|
||||
to perform.
|
||||
properties:
|
||||
host:
|
||||
|
@ -3819,7 +3860,7 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
tcpSocket:
|
||||
description: TCPSocket specifies an action involving
|
||||
description: TCPSocket specifies a connection to
|
||||
a TCP port.
|
||||
properties:
|
||||
host:
|
||||
|
@ -5167,9 +5208,27 @@ spec:
|
|||
description: HTTPSource specifies how to download the CRD tarball
|
||||
via either HTTP or HTTPS protocol.
|
||||
properties:
|
||||
proxy:
|
||||
description: |-
|
||||
Proxy specifies the configuration of a proxy server to use when downloading the CRD tarball.
|
||||
When set, the operator will use the configuration to determine how to establish a connection to the proxy to fetch the tarball from the URL specified above.
|
||||
This is useful in environments where direct access to the server hosting the CRD tarball is restricted and a proxy must be used to reach that server.
|
||||
If a proxy configuration is not set, the operator will attempt to download the tarball directly from the URL specified above without using a proxy.
|
||||
properties:
|
||||
proxyURL:
|
||||
description: |-
|
||||
ProxyURL specifies the HTTP/HTTPS proxy server URL to use when downloading the CRD tarball.
|
||||
This is useful in environments where direct access to the server hosting the CRD tarball is restricted and a proxy must be used to reach that server.
|
||||
The format should be a valid URL, e.g., "http://proxy.example.com:8080".
|
||||
type: string
|
||||
required:
|
||||
- proxyURL
|
||||
type: object
|
||||
url:
|
||||
description: URL specifies the URL of the CRD tarball resource.
|
||||
type: string
|
||||
required:
|
||||
- url
|
||||
type: object
|
||||
type: object
|
||||
customCertificate:
|
||||
|
@ -5270,6 +5329,12 @@ spec:
|
|||
required:
|
||||
- registry
|
||||
type: object
|
||||
suspend:
|
||||
description: |-
|
||||
Suspend indicates that the operator should suspend reconciliation
|
||||
for this Karmada control plane and all its managed resources.
|
||||
Karmada instances for which this field is not explicitly set to `true` will continue to be reconciled as usual.
|
||||
type: boolean
|
||||
type: object
|
||||
status:
|
||||
description: Most recently observed status of the Karmada.
|
||||
|
|
|
@ -44,6 +44,13 @@ spec:
|
|||
- /bin/karmada-operator
|
||||
- --leader-elect-resource-namespace={{ .Release.Namespace }}
|
||||
- --v=2
|
||||
{{- range .Values.operator.extraArgs }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.env }}
|
||||
env:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.operator.resources }}
|
||||
resources: {{- toYaml .Values.operator.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -58,7 +58,31 @@ operator:
|
|||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## @param.resources
|
||||
|
||||
## @param operator.env List of environment variables to inject
|
||||
##
|
||||
## - Each entry must be a valid Kubernetes EnvVar object.
|
||||
## - Supports both literal values and valueFrom references (ConfigMap, Secret, fieldRef, etc.).
|
||||
## - If omitted or set to an empty array (`[]`), no env stanza will be included.
|
||||
##
|
||||
## A sample stanza is shown below.
|
||||
##
|
||||
# env:
|
||||
# - name: http_proxy
|
||||
# value: "http://best-awesome-proxy.com:8080"
|
||||
# - name: https_proxy
|
||||
# value: "http://best-awesome-proxy.com:8080"
|
||||
# - name: no_proxy
|
||||
# value: "localhost,127.0.0.1,*.svc,*.cluster.local"
|
||||
|
||||
## @param operator.extraArgs List of extra arguments for the operator binary
|
||||
##
|
||||
## A sample stanza is shown below.
|
||||
##
|
||||
# extraArgs:
|
||||
# - --arg1=val1
|
||||
# - --arg2
|
||||
|
||||
resources: {}
|
||||
# If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
|
|
|
@ -108,9 +108,9 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
> **Note**: There are some RBAC resources that are used by the `preJob` that can not be deleted by the `uninstall` command above. You might have to clean them manually with tools like `kubectl`. You can clean them by commands:
|
||||
|
||||
```console
|
||||
kubectl delete sa/karmada-pre-job -nkarmada-system
|
||||
kubectl delete clusterRole/karmada-pre-job
|
||||
kubectl delete clusterRoleBinding/karmada-pre-job
|
||||
kubectl delete sa/karmada-hook-job -nkarmada-system
|
||||
kubectl delete clusterRole/karmada-hook-job
|
||||
kubectl delete clusterRoleBinding/karmada-hook-job
|
||||
kubectl delete ns karmada-system
|
||||
```
|
||||
|
||||
|
@ -272,6 +272,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada
|
|||
| `scheduler.affinity` | Affinity of the scheduler | `{}` |
|
||||
| `scheduler.tolerations` | Tolerations of the scheduler | `[]` |
|
||||
| `scheduler.strategy` | Strategy of the scheduler | `{"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "0", "maxSurge": "50%"} }` |
|
||||
| `scheduler.enableSchedulerEstimator` | Enable calling cluster scheduler estimator for adjusting replicas | `false` |
|
||||
| `webhook.labels` | Labels of the webhook deployment | `{"app": "karmada-webhook"}` |
|
||||
| `webhook.replicaCount` | Target replicas of the webhook | `1` |
|
||||
| `webhook.podLabels` | Labels of the webhook pods | `{}` |
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: workloadrebalancers.apps.karmada.io
|
||||
spec:
|
||||
group: apps.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: cronfederatedhpas.autoscaling.karmada.io
|
||||
spec:
|
||||
group: autoscaling.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: federatedhpas.autoscaling.karmada.io
|
||||
spec:
|
||||
group: autoscaling.karmada.io
|
||||
|
@ -82,7 +82,9 @@ spec:
|
|||
policies:
|
||||
description: |-
|
||||
policies is a list of potential scaling polices which can be used during scaling.
|
||||
At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
|
||||
If not set, use the default values:
|
||||
- For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
|
||||
- For scale down: allow all pods to be removed in a 15s window.
|
||||
items:
|
||||
description: HPAScalingPolicy is a single policy which must
|
||||
hold true for a specified past interval.
|
||||
|
@ -124,6 +126,24 @@ spec:
|
|||
- For scale down: 300 (i.e. the stabilization window is 300 seconds long).
|
||||
format: int32
|
||||
type: integer
|
||||
tolerance:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: |-
|
||||
tolerance is the tolerance on the ratio between the current and desired
|
||||
metric value under which no updates are made to the desired number of
|
||||
replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
|
||||
set, the default cluster-wide tolerance is applied (by default 10%).
|
||||
|
||||
For example, if autoscaling is configured with a memory consumption target of 100Mi,
|
||||
and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
|
||||
triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
||||
|
||||
This is an alpha field and requires enabling the HPAConfigurableTolerance
|
||||
feature gate.
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
scaleUp:
|
||||
description: |-
|
||||
|
@ -136,7 +156,9 @@ spec:
|
|||
policies:
|
||||
description: |-
|
||||
policies is a list of potential scaling polices which can be used during scaling.
|
||||
At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
|
||||
If not set, use the default values:
|
||||
- For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
|
||||
- For scale down: allow all pods to be removed in a 15s window.
|
||||
items:
|
||||
description: HPAScalingPolicy is a single policy which must
|
||||
hold true for a specified past interval.
|
||||
|
@ -178,6 +200,24 @@ spec:
|
|||
- For scale down: 300 (i.e. the stabilization window is 300 seconds long).
|
||||
format: int32
|
||||
type: integer
|
||||
tolerance:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: |-
|
||||
tolerance is the tolerance on the ratio between the current and desired
|
||||
metric value under which no updates are made to the desired number of
|
||||
replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
|
||||
set, the default cluster-wide tolerance is applied (by default 10%).
|
||||
|
||||
For example, if autoscaling is configured with a memory consumption target of 100Mi,
|
||||
and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
|
||||
triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
||||
|
||||
This is an alpha field and requires enabling the HPAConfigurableTolerance
|
||||
feature gate.
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
|
@ -209,7 +249,6 @@ spec:
|
|||
each pod of the current scale target (e.g. CPU or memory). Such metrics are
|
||||
built in to Kubernetes, and have special scaling options on top of those
|
||||
available to normal per-pod metrics using the "pods" source.
|
||||
This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
|
||||
properties:
|
||||
container:
|
||||
description: container is the name of the container in the
|
||||
|
@ -650,8 +689,6 @@ spec:
|
|||
description: |-
|
||||
type is the type of metric source. It should be one of "ContainerResource", "External",
|
||||
"Object", "Pods" or "Resource", each mapping to a matching field in the object.
|
||||
Note: "ContainerResource" type is available on when the feature-gate
|
||||
HPAContainerMetrics is enabled
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
|
@ -1147,8 +1184,6 @@ spec:
|
|||
description: |-
|
||||
type is the type of metric source. It will be one of "ContainerResource", "External",
|
||||
"Object", "Pods" or "Resource", each corresponds to a matching field in the object.
|
||||
Note: "ContainerResource" type is available on when the feature-gate
|
||||
HPAContainerMetrics is enabled
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: resourceinterpretercustomizations.config.karmada.io
|
||||
spec:
|
||||
group: config.karmada.io
|
||||
|
@ -60,6 +60,59 @@ spec:
|
|||
customizations:
|
||||
description: Customizations describe the interpretation rules.
|
||||
properties:
|
||||
componentResource:
|
||||
description: |-
|
||||
ComponentResource describes the rules for Karmada to discover the resource requirements
|
||||
for multiple components from the given object.
|
||||
This is designed for CRDs with multiple components (e.g., FlinkDeployment), but
|
||||
can also be used for single-component resources like Deployment.
|
||||
If implemented, the controller will use this to obtain per-component replica and resource
|
||||
requirements, and will not call ReplicaResource.
|
||||
If not implemented, the controller will fall back to ReplicaResource for backward compatibility.
|
||||
This will only be used when the feature gate 'MultiplePodTemplatesScheduling' is enabled.
|
||||
properties:
|
||||
luaScript:
|
||||
description: |-
|
||||
LuaScript holds the Lua script that is used to extract the desired replica count and resource
|
||||
requirements for each component of the resource.
|
||||
|
||||
The script should implement a function as follows:
|
||||
|
||||
```
|
||||
luaScript: >
|
||||
function GetComponents(desiredObj)
|
||||
local components = {}
|
||||
|
||||
local jobManagerComponent = {
|
||||
name = "jobmanager",
|
||||
replicas = desiredObj.spec.jobManager.replicas
|
||||
}
|
||||
table.insert(components, jobManagerComponent)
|
||||
|
||||
local taskManagerComponent = {
|
||||
name = "taskmanager",
|
||||
replicas = desiredObj.spec.taskManager.replicas
|
||||
}
|
||||
table.insert(components, taskManagerComponent)
|
||||
|
||||
return components
|
||||
end
|
||||
```
|
||||
|
||||
The content of the LuaScript needs to be a whole function including both
|
||||
declaration and implementation.
|
||||
|
||||
The parameters will be supplied by the system:
|
||||
- desiredObj: the object represents the configuration to be applied
|
||||
to the member cluster.
|
||||
|
||||
The function expects one return value:
|
||||
- components: the resource requirements for each component.
|
||||
The returned value will be set into a ResourceBinding or ClusterResourceBinding.
|
||||
type: string
|
||||
required:
|
||||
- luaScript
|
||||
type: object
|
||||
dependencyInterpretation:
|
||||
description: |-
|
||||
DependencyInterpretation describes the rules for Karmada to analyze the
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: resourceinterpreterwebhookconfigurations.config.karmada.io
|
||||
spec:
|
||||
group: config.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: multiclusteringresses.networking.karmada.io
|
||||
spec:
|
||||
group: networking.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: multiclusterservices.networking.karmada.io
|
||||
spec:
|
||||
group: networking.karmada.io
|
||||
|
@ -245,6 +245,8 @@ spec:
|
|||
Ports is a list of records of service ports
|
||||
If used, every port defined in the service should have an entry in it
|
||||
items:
|
||||
description: PortStatus represents the error condition
|
||||
of a service port
|
||||
properties:
|
||||
error:
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clusteroverridepolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clusterpropagationpolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
@ -153,16 +153,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -223,6 +226,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
placement:
|
||||
description: Placement represents the rule for select clusters to
|
||||
|
|
|
@ -0,0 +1,257 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clustertaintpolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
names:
|
||||
kind: ClusterTaintPolicy
|
||||
listKind: ClusterTaintPolicyList
|
||||
plural: clustertaintpolicies
|
||||
singular: clustertaintpolicy
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
ClusterTaintPolicy automates taint management on Cluster objects based
|
||||
on declarative conditions.
|
||||
The system evaluates AddOnConditions to determine when to add taints,
|
||||
and RemoveOnConditions to determine when to remove taints.
|
||||
AddOnConditions are evaluated before RemoveOnConditions.
|
||||
Taints are NEVER automatically removed when the ClusterTaintPolicy is deleted.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec represents the desired behavior of ClusterTaintPolicy.
|
||||
properties:
|
||||
addOnConditions:
|
||||
description: |-
|
||||
AddOnConditions defines the conditions to match for triggering
|
||||
the controller to add taints on the cluster object.
|
||||
The match conditions are ANDed.
|
||||
If AddOnConditions is empty, no taints will be added.
|
||||
items:
|
||||
description: |-
|
||||
MatchCondition represents the condition match detail of activating the failover
|
||||
relevant taints on target clusters.
|
||||
properties:
|
||||
conditionType:
|
||||
description: ConditionType specifies the ClusterStatus condition
|
||||
type.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a relationship to a set of values.
|
||||
Valid operators are In, NotIn.
|
||||
type: string
|
||||
statusValues:
|
||||
description: |-
|
||||
StatusValues is an array of metav1.ConditionStatus values.
|
||||
The item specifies the ClusterStatus condition status.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- conditionType
|
||||
- operator
|
||||
- statusValues
|
||||
type: object
|
||||
type: array
|
||||
removeOnConditions:
|
||||
description: |-
|
||||
RemoveOnConditions defines the conditions to match for triggering
|
||||
the controller to remove taints from the cluster object.
|
||||
The match conditions are ANDed.
|
||||
If RemoveOnConditions is empty, no taints will be removed.
|
||||
items:
|
||||
description: |-
|
||||
MatchCondition represents the condition match detail of activating the failover
|
||||
relevant taints on target clusters.
|
||||
properties:
|
||||
conditionType:
|
||||
description: ConditionType specifies the ClusterStatus condition
|
||||
type.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a relationship to a set of values.
|
||||
Valid operators are In, NotIn.
|
||||
type: string
|
||||
statusValues:
|
||||
description: |-
|
||||
StatusValues is an array of metav1.ConditionStatus values.
|
||||
The item specifies the ClusterStatus condition status.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- conditionType
|
||||
- operator
|
||||
- statusValues
|
||||
type: object
|
||||
type: array
|
||||
taints:
|
||||
description: |-
|
||||
Taints specifies the taints that need to be added or removed on
|
||||
the cluster object which match with TargetClusters.
|
||||
If the Taints is modified, the system will process the taints based on
|
||||
the latest value of Taints during the next condition-triggered execution,
|
||||
regardless of whether the taint has been added or removed.
|
||||
items:
|
||||
description: Taint describes the taint that needs to be applied
|
||||
to the cluster.
|
||||
properties:
|
||||
effect:
|
||||
description: Effect represents the taint effect to be applied
|
||||
to a cluster.
|
||||
type: string
|
||||
key:
|
||||
description: Key represents the taint key to be applied to a
|
||||
cluster.
|
||||
type: string
|
||||
value:
|
||||
description: Value represents the taint value corresponding
|
||||
to the taint key.
|
||||
type: string
|
||||
required:
|
||||
- effect
|
||||
- key
|
||||
type: object
|
||||
minItems: 1
|
||||
type: array
|
||||
targetClusters:
|
||||
description: |-
|
||||
TargetClusters specifies the clusters that ClusterTaintPolicy needs
|
||||
to pay attention to.
|
||||
For clusters that no longer match the TargetClusters, the taints
|
||||
will be kept unchanged.
|
||||
If targetClusters is not set, any cluster can be selected.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
exclude:
|
||||
description: ExcludedClusters is the list of clusters to be ignored.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
fieldSelector:
|
||||
description: |-
|
||||
FieldSelector is a filter to select member clusters by fields.
|
||||
The key(field) of the match expression should be 'provider', 'region', or 'zone',
|
||||
and the operator of the match expression should be 'In' or 'NotIn'.
|
||||
If non-nil and non-empty, only the clusters match this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of field selector requirements.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
labelSelector:
|
||||
description: |-
|
||||
LabelSelector is a filter to select member clusters by labels.
|
||||
If non-nil and non-empty, only the clusters match this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- taints
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: federatedresourcequotas.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
@ -16,7 +16,14 @@ spec:
|
|||
singular: federatedresourcequota
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.overall
|
||||
name: OVERALL
|
||||
type: string
|
||||
- jsonPath: .status.overallUsed
|
||||
name: OVERALL_USED
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: FederatedResourceQuota sets aggregate quota restrictions enforced
|
||||
|
@ -54,9 +61,15 @@ spec:
|
|||
type: object
|
||||
staticAssignments:
|
||||
description: |-
|
||||
StaticAssignments represents the subset of desired hard limits for each cluster.
|
||||
Note: for clusters not present in this list, Karmada will set an empty ResourceQuota to them, which means these
|
||||
clusters will have no quotas in the referencing namespace.
|
||||
StaticAssignments specifies ResourceQuota settings for specific clusters.
|
||||
If non-empty, Karmada will create ResourceQuotas in the corresponding clusters.
|
||||
Clusters not listed here or when StaticAssignments is empty will have no ResourceQuotas created.
|
||||
|
||||
This field addresses multi-cluster configuration management challenges by allowing centralized
|
||||
control over ResourceQuotas across clusters.
|
||||
|
||||
Note: The Karmada scheduler currently does NOT use this configuration for scheduling decisions.
|
||||
Future updates may integrate it into the scheduling logic.
|
||||
items:
|
||||
description: StaticClusterAssignment represents the set of desired
|
||||
hard limits for a specific cluster.
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: overridepolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: propagationpolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
@ -150,16 +150,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -220,6 +223,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
placement:
|
||||
description: Placement represents the rule for select clusters to
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: remedies.remedy.karmada.io
|
||||
spec:
|
||||
group: remedy.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clusterresourcebindings.work.karmada.io
|
||||
spec:
|
||||
group: work.karmada.io
|
||||
|
@ -261,6 +261,199 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
type: array
|
||||
components:
|
||||
description: |-
|
||||
Components represents the requirements of multiple pod templates of the referencing resource.
|
||||
It is designed to support workloads that consist of multiple pod templates,
|
||||
such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment),
|
||||
where each workload is composed of more than one pod template. It is also capable of representing
|
||||
single-component workloads, such as Deployment.
|
||||
|
||||
Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above.
|
||||
It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled.
|
||||
items:
|
||||
description: Component represents the requirements for a specific
|
||||
component.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of this component.
|
||||
It is required when the resource contains multiple components to ensure proper identification,
|
||||
and must also be unique within the same resource.
|
||||
maxLength: 32
|
||||
type: string
|
||||
replicaRequirements:
|
||||
description: ReplicaRequirements represents the requirements
|
||||
required by each replica for this component.
|
||||
properties:
|
||||
nodeClaim:
|
||||
description: NodeClaim represents the node claim HardNodeAffinity,
|
||||
NodeSelector and Tolerations required by each replica.
|
||||
properties:
|
||||
hardNodeAffinity:
|
||||
description: |-
|
||||
A node selector represents the union of the results of one or more label queries over a set of
|
||||
nodes; that is, it represents the OR of the selectors represented by the node selector terms.
|
||||
Note that only PodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
is included here because it has a hard limit on pod scheduling.
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
description: Required. A list of node selector terms.
|
||||
The terms are ORed.
|
||||
items:
|
||||
description: |-
|
||||
A null or empty node selector term matches no objects. The requirements of
|
||||
them are ANDed.
|
||||
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of node selector requirements
|
||||
by node's labels.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
type: object
|
||||
tolerations:
|
||||
description: If specified, the pod's tolerations.
|
||||
items:
|
||||
description: |-
|
||||
The pod this Toleration is attached to tolerates any taint that matches
|
||||
the triple <key,value,effect> using the matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: |-
|
||||
Effect indicates the taint effect to match. Empty means match all taint effects.
|
||||
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: |-
|
||||
Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a key's relationship to the value.
|
||||
Valid operators are Exists and Equal. Defaults to Equal.
|
||||
Exists is equivalent to wildcard for value, so that a pod can
|
||||
tolerate all taints of a particular category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: |-
|
||||
TolerationSeconds represents the period of time the toleration (which must be
|
||||
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
|
||||
it is not set, which means tolerate the taint forever (do not evict). Zero and
|
||||
negative values will be treated as 0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: |-
|
||||
Value is the taint value the toleration matches to.
|
||||
If the operator is Exists, the value should be empty, otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
priorityClassName:
|
||||
description: PriorityClassName represents the resources
|
||||
priorityClassName
|
||||
type: string
|
||||
resourceRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ResourceRequest represents the resources required
|
||||
by each replica.
|
||||
type: object
|
||||
type: object
|
||||
replicas:
|
||||
description: Replicas represents the replica number of the resource's
|
||||
component.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
- replicas
|
||||
type: object
|
||||
type: array
|
||||
conflictResolution:
|
||||
default: Abort
|
||||
description: |-
|
||||
|
@ -315,16 +508,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -385,6 +581,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
gracefulEvictionTasks:
|
||||
description: |-
|
||||
|
@ -454,10 +727,12 @@ spec:
|
|||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never".
|
||||
enum:
|
||||
- Immediately
|
||||
- Directly
|
||||
- Graciously
|
||||
- Gracefully
|
||||
- Never
|
||||
type: string
|
||||
reason:
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: resourcebindings.work.karmada.io
|
||||
spec:
|
||||
group: work.karmada.io
|
||||
|
@ -261,6 +261,199 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
type: array
|
||||
components:
|
||||
description: |-
|
||||
Components represents the requirements of multiple pod templates of the referencing resource.
|
||||
It is designed to support workloads that consist of multiple pod templates,
|
||||
such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment),
|
||||
where each workload is composed of more than one pod template. It is also capable of representing
|
||||
single-component workloads, such as Deployment.
|
||||
|
||||
Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above.
|
||||
It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled.
|
||||
items:
|
||||
description: Component represents the requirements for a specific
|
||||
component.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of this component.
|
||||
It is required when the resource contains multiple components to ensure proper identification,
|
||||
and must also be unique within the same resource.
|
||||
maxLength: 32
|
||||
type: string
|
||||
replicaRequirements:
|
||||
description: ReplicaRequirements represents the requirements
|
||||
required by each replica for this component.
|
||||
properties:
|
||||
nodeClaim:
|
||||
description: NodeClaim represents the node claim HardNodeAffinity,
|
||||
NodeSelector and Tolerations required by each replica.
|
||||
properties:
|
||||
hardNodeAffinity:
|
||||
description: |-
|
||||
A node selector represents the union of the results of one or more label queries over a set of
|
||||
nodes; that is, it represents the OR of the selectors represented by the node selector terms.
|
||||
Note that only PodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
is included here because it has a hard limit on pod scheduling.
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
description: Required. A list of node selector terms.
|
||||
The terms are ORed.
|
||||
items:
|
||||
description: |-
|
||||
A null or empty node selector term matches no objects. The requirements of
|
||||
them are ANDed.
|
||||
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of node selector requirements
|
||||
by node's labels.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
type: object
|
||||
tolerations:
|
||||
description: If specified, the pod's tolerations.
|
||||
items:
|
||||
description: |-
|
||||
The pod this Toleration is attached to tolerates any taint that matches
|
||||
the triple <key,value,effect> using the matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: |-
|
||||
Effect indicates the taint effect to match. Empty means match all taint effects.
|
||||
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: |-
|
||||
Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a key's relationship to the value.
|
||||
Valid operators are Exists and Equal. Defaults to Equal.
|
||||
Exists is equivalent to wildcard for value, so that a pod can
|
||||
tolerate all taints of a particular category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: |-
|
||||
TolerationSeconds represents the period of time the toleration (which must be
|
||||
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
|
||||
it is not set, which means tolerate the taint forever (do not evict). Zero and
|
||||
negative values will be treated as 0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: |-
|
||||
Value is the taint value the toleration matches to.
|
||||
If the operator is Exists, the value should be empty, otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
priorityClassName:
|
||||
description: PriorityClassName represents the resources
|
||||
priorityClassName
|
||||
type: string
|
||||
resourceRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ResourceRequest represents the resources required
|
||||
by each replica.
|
||||
type: object
|
||||
type: object
|
||||
replicas:
|
||||
description: Replicas represents the replica number of the resource's
|
||||
component.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
- replicas
|
||||
type: object
|
||||
type: array
|
||||
conflictResolution:
|
||||
default: Abort
|
||||
description: |-
|
||||
|
@ -315,16 +508,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -385,6 +581,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
gracefulEvictionTasks:
|
||||
description: |-
|
||||
|
@ -454,10 +727,12 @@ spec:
|
|||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never".
|
||||
enum:
|
||||
- Immediately
|
||||
- Directly
|
||||
- Graciously
|
||||
- Gracefully
|
||||
- Never
|
||||
type: string
|
||||
reason:
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: works.work.karmada.io
|
||||
spec:
|
||||
group: work.karmada.io
|
||||
|
|
|
@ -3,6 +3,7 @@ resources:
|
|||
- bases/multicluster/multicluster.x-k8s.io_serviceimports.yaml
|
||||
- bases/policy/policy.karmada.io_clusteroverridepolicies.yaml
|
||||
- bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml
|
||||
- bases/policy/policy.karmada.io_clustertaintpolicies.yaml
|
||||
- bases/policy/policy.karmada.io_federatedresourcequotas.yaml
|
||||
- bases/policy/policy.karmada.io_overridepolicies.yaml
|
||||
- bases/policy/policy.karmada.io_propagationpolicies.yaml
|
||||
|
|
|
@ -152,7 +152,6 @@ app: {{$name}}-controller-manager
|
|||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "karmada.scheduler.labels" -}}
|
||||
{{ $name := include "karmada.name" . }}
|
||||
{{- if .Values.scheduler.labels -}}
|
||||
|
@ -173,7 +172,6 @@ app: {{$name}}-scheduler
|
|||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "karmada.descheduler.labels" -}}
|
||||
{{ $name := include "karmada.name" . }}
|
||||
{{- if .Values.descheduler.labels -}}
|
||||
|
@ -207,7 +205,6 @@ app: {{$name}}
|
|||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "karmada.webhook.labels" -}}
|
||||
{{ $name := include "karmada.name" .}}
|
||||
{{- if .Values.webhook.labels }}
|
||||
|
@ -306,6 +303,10 @@ app: {{- include "karmada.name" .}}-search
|
|||
{{- include "karmada.commonLabels" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "karmada.preUpdateJob.labels" -}}
|
||||
{{- include "karmada.commonLabels" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "karmada.staticResourceJob.labels" -}}
|
||||
{{- include "karmada.commonLabels" . -}}
|
||||
{{- end -}}
|
||||
|
@ -356,6 +357,16 @@ app: {{- include "karmada.name" .}}-search
|
|||
secretName: {{ $name }}-cert
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common env for POD_IP
|
||||
*/}}
|
||||
{{- define "karmada.env.podIP" -}}
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper karmada internal etcd image name
|
||||
*/}}
|
||||
|
@ -545,35 +556,82 @@ Return the proper Docker Image Registry Secret Names
|
|||
{{ include "common.images.pullSecrets" (dict "images" (list .Values.cfssl.image .Values.kubectl.image .Values.etcd.internal.image .Values.agent.image .Values.apiServer.image .Values.controllerManager.image .Values.descheduler.image .Values.schedulerEstimator.image .Values.scheduler.image .Values.webhook.image .Values.aggregatedApiServer.image .Values.metricsAdapter.image .Values.search.image .Values.kubeControllerManager.image) "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-controllerManager.
|
||||
Iterates over .Values.controllerManager.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.controllerManager.featureGates" -}}
|
||||
{{- if (not (empty .Values.controllerManager.featureGates)) }}
|
||||
{{- $featureGatesFlag := "" -}}
|
||||
{{- if .Values.controllerManager.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.controllerManager.featureGates -}}
|
||||
{{- if not (empty (toString $value)) }}
|
||||
{{- $featureGatesFlag = cat $featureGatesFlag $key "=" $value "," -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if gt (len $featureGatesFlag) 0 }}
|
||||
{{- $featureGatesFlag := trimSuffix "," $featureGatesFlag | nospace -}}
|
||||
{{- printf "%s=%s" "--feature-gates" $featureGatesFlag -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "karmada.schedulerEstimator.featureGates" -}}
|
||||
{{- $featureGatesArg := index . "featureGatesArg" -}}
|
||||
{{- if (not (empty $featureGatesArg)) }}
|
||||
{{- $featureGatesFlag := "" -}}
|
||||
{{- range $key, $value := $featureGatesArg -}}
|
||||
{{- if not (empty (toString $value)) }}
|
||||
{{- $featureGatesFlag = cat $featureGatesFlag $key "=" $value "," -}}
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-webhook.
|
||||
Iterates over .Values.webhook.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.webhook.featureGates" -}}
|
||||
{{- if .Values.webhook.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.webhook.featureGates -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if gt (len $featureGatesFlag) 0 }}
|
||||
{{- $featureGatesFlag := trimSuffix "," $featureGatesFlag | nospace -}}
|
||||
{{- printf "%s=%s" "--feature-gates" $featureGatesFlag -}}
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-scheduler.
|
||||
Iterates over .Values.scheduler.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.scheduler.featureGates" -}}
|
||||
{{- if .Values.scheduler.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.scheduler.featureGates -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-schedulerEstimator.
|
||||
Iterates over .Values.schedulerEstimator.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.schedulerEstimator.featureGates" -}}
|
||||
{{- if .Values.schedulerEstimator.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.schedulerEstimator.featureGates -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -274,4 +274,18 @@ webhooks:
|
|||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 3
|
||||
- name: resourcebinding.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["work.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["resourcebindings"]
|
||||
scope: "Namespaced"
|
||||
clientConfig:
|
||||
url: https://{{ $name }}-webhook.{{ $namespace }}.svc:443/validate-resourcebinding
|
||||
{{- include "karmada.webhook.caBundle" . | nindent 6 }}
|
||||
failurePolicy: Fail
|
||||
sideEffects: NoneOnDryRun
|
||||
admissionReviewVersions: ["v1"]
|
||||
timeoutSeconds: 3
|
||||
{{- end -}}
|
||||
|
|
|
@ -101,6 +101,8 @@ spec:
|
|||
- name: {{ $name }}
|
||||
image: {{ template "karmada.agent.image" . }}
|
||||
imagePullPolicy: {{ .Values.agent.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-agent
|
||||
- --karmada-kubeconfig=/etc/kubeconfig/kubeconfig
|
||||
|
@ -110,7 +112,8 @@ spec:
|
|||
{{- end }}
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --leader-elect-resource-namespace={{ include "karmada.namespace" . }}
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -43,6 +43,8 @@ spec:
|
|||
- name: apiserver-cert
|
||||
mountPath: /etc/kubernetes/pki
|
||||
readOnly: true
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-aggregated-apiserver
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
|
@ -67,6 +69,7 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
resources:
|
||||
{{- toYaml .Values.aggregatedApiServer.resources | nindent 12 }}
|
||||
readinessProbe:
|
||||
|
|
|
@ -50,13 +50,15 @@ spec:
|
|||
- name: {{ $name }}-controller-manager
|
||||
image: {{ template "karmada.controllerManager.image" . }}
|
||||
imagePullPolicy: {{ .Values.controllerManager.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-controller-manager
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --leader-elect-resource-namespace={{ $systemNamespace }}
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=:8080
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --v=2
|
||||
{{- if .Values.controllerManager.controllers }}
|
||||
- --controllers={{ .Values.controllerManager.controllers }}
|
||||
|
|
|
@ -47,11 +47,13 @@ spec:
|
|||
- name: {{ $name }}-descheduler
|
||||
image: {{ template "karmada.descheduler.image" . }}
|
||||
imagePullPolicy: {{ .Values.descheduler.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-descheduler
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10358
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10358
|
||||
- --leader-elect-resource-namespace={{ $systemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
|
|
|
@ -41,10 +41,12 @@ spec:
|
|||
- name: apiserver-cert
|
||||
mountPath: /etc/kubernetes/pki
|
||||
readOnly: true
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --metrics-bind-address=:8080
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --authentication-kubeconfig=/etc/kubeconfig
|
||||
- --authorization-kubeconfig=/etc/kubeconfig
|
||||
- --tls-cert-file=/etc/kubernetes/pki/karmada.crt
|
||||
|
@ -53,6 +55,7 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
resources:
|
||||
{{- toYaml .Values.metricsAdapter.resources | nindent 12 }}
|
||||
readinessProbe:
|
||||
|
|
|
@ -44,6 +44,8 @@ spec:
|
|||
- name: karmada-scheduler-estimator
|
||||
image: {{ template "karmada.schedulerEstimator.image" $ }}
|
||||
imagePullPolicy: {{ $.Values.schedulerEstimator.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-scheduler-estimator
|
||||
- --kubeconfig=/etc/{{ $clusterName }}-kubeconfig
|
||||
|
@ -51,11 +53,16 @@ spec:
|
|||
- --grpc-auth-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --grpc-auth-key-file=/etc/karmada/pki/karmada.key
|
||||
- --grpc-client-ca-file=/etc/karmada/pki/server-ca.crt
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
{{- with (include "karmada.schedulerEstimator.featureGates" (dict "featureGatesArg" $.Values.schedulerEstimator.featureGates)) }}
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
{{- /*
|
||||
We use '$' to refer to the root context.
|
||||
Inside this 'range' loop, '.' refers to the current item from '.Values.schedulerEstimator.memberClusters'.
|
||||
Using '$' ensures that we can access the top-level '.Values.schedulerEstimator.featureGates'.
|
||||
*/}}
|
||||
{{- with (include "karmada.schedulerEstimator.featureGates" $) }}
|
||||
- {{ . }}
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
|
|
@ -47,15 +47,23 @@ spec:
|
|||
- name: {{ $name }}-scheduler
|
||||
image: {{ template "karmada.scheduler.image" .}}
|
||||
imagePullPolicy: {{ .Values.scheduler.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-scheduler
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
- --leader-elect-resource-namespace={{ $systemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
|
||||
{{- if .Values.scheduler.enableSchedulerEstimator }}
|
||||
- --enable-scheduler-estimator=true
|
||||
{{- end }}
|
||||
{{- with (include "karmada.scheduler.featureGates" .) }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
|
|
@ -56,6 +56,8 @@ spec:
|
|||
- name: kubeconfig-secret
|
||||
subPath: kubeconfig
|
||||
mountPath: /etc/kubeconfig
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-search
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
|
@ -80,6 +82,7 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
|
@ -90,7 +93,7 @@ spec:
|
|||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.apiServer.resources | nindent 12 }}
|
||||
{{- toYaml .Values.search.resources | nindent 12 }}
|
||||
priorityClassName: {{ .Values.search.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.search.kubeconfig.volume" . | nindent 8 }}
|
||||
|
|
|
@ -47,12 +47,19 @@ spec:
|
|||
- name: {{ $name }}-webhook
|
||||
image: {{ template "karmada.webhook.image" . }}
|
||||
imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-webhook
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --bind-address=0.0.0.0
|
||||
- --bind-address=$(POD_IP)
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):8000
|
||||
- --secure-port=8443
|
||||
- --cert-dir=/var/serving-cert
|
||||
{{- with (include "karmada.webhook.featureGates" .) }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
- containerPort: 8080
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
name: {{ $name }}-crds-kustomization
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -28,7 +28,7 @@ metadata:
|
|||
name: {{ $name }}-crds-autoscaling-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -48,7 +48,7 @@ metadata:
|
|||
name: {{ $name }}-crds-config-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -68,7 +68,7 @@ metadata:
|
|||
name: {{ $name }}-crds-multicluster-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -88,7 +88,7 @@ metadata:
|
|||
name: {{ $name }}-crds-networking-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -109,7 +109,7 @@ metadata:
|
|||
name: {{ $name }}-crds-policy-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -129,7 +129,7 @@ metadata:
|
|||
name: {{ $name }}-crds-remedy-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -149,7 +149,7 @@ metadata:
|
|||
name: {{ $name }}-crds-work-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -169,7 +169,7 @@ metadata:
|
|||
name: {{ $name }}-crds-apps-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -189,7 +189,7 @@ metadata:
|
|||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -201,7 +201,7 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -219,7 +219,7 @@ kind: ClusterRoleBinding
|
|||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -242,7 +242,7 @@ metadata:
|
|||
name: {{ $name }}-static-resources
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -268,7 +268,7 @@ metadata:
|
|||
name: {{ $name }}-crds-patches
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -440,8 +440,8 @@ spec:
|
|||
mkdir -p /opt/configs
|
||||
mkdir -p /opt/certs
|
||||
cp -r -L /opt/mount/* /opt/configs/
|
||||
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/server-ca.key" -out "/opt/certs/server-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/front-proxy-ca.key" -out "/opt/certs/front-proxy-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
openssl req -x509 -sha256 -new -nodes -days {{ .Values.certs.auto.rootCAExpiryDays }} -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/server-ca.key" -out "/opt/certs/server-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
openssl req -x509 -sha256 -new -nodes -days {{ .Values.certs.auto.rootCAExpiryDays }} -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/front-proxy-ca.key" -out "/opt/certs/front-proxy-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
echo '{"signing":{"default":{"expiry":{{ printf `"%s"` .Values.certs.auto.expiry }},"usages":["signing","key encipherment","client auth","server auth"]}}}' > "/opt/certs/server-ca-config.json"
|
||||
echo '{"CN":"system:admin","hosts":{{ tpl (toJson .Values.certs.auto.hosts) . }},"names":[{"O":"system:masters"}],"key":{"algo":"rsa","size":{{ .Values.certs.auto.rsaSize }}}}' | cfssl gencert -ca=/opt/certs/server-ca.crt -ca-key=/opt/certs/server-ca.key -config=/opt/certs/server-ca-config.json - | cfssljson -bare /opt/certs/karmada
|
||||
echo '{"signing":{"default":{"expiry":{{ printf `"%s"` .Values.certs.auto.expiry }},"usages":["signing","key encipherment","client auth","server auth"]}}}' > "/opt/certs/front-proxy-ca-config.json"
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
{{- $name := include "karmada.name" . -}}
|
||||
{{- $namespace := include "karmada.namespace" . -}}
|
||||
{{- if eq .Values.installMode "host" }}
|
||||
{{- if eq .Values.certs.mode "auto" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $name }}-static-resources
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
data:
|
||||
{{- print "webhook-configuration.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.webhook.configuration" . | nindent 8 }}
|
||||
{{- print "system-namespace.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.systemNamespace" . | nindent 8 }}
|
||||
{{- print "karmada-aggregated-apiserver-apiservice.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.apiservice" . | nindent 8 }}
|
||||
{{- print "cluster-proxy-admin-rbac.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.proxyRbac" . | nindent 8 }}
|
||||
{{- print "bootstrap-token-configuration.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.bootstrapToken.configuration" . | nindent 8 }}
|
||||
{{- print "clusterrole.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.clusterrole" . | nindent 8 }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $name }}-crds-patches
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
data:
|
||||
{{- print "webhook_in_clusterresourcebindings.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.crd.patch.webhook.clusterresourcebinding" . | nindent 8 }}
|
||||
{{- print "webhook_in_resourcebindings.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.crd.patch.webhook.resourcebinding" . | nindent 8 }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ $name }}-pre-upgrade"
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "3"
|
||||
"helm.sh/hook-delete-policy": {{ .Values.preUpdateJob.hookDeletePolicy }}
|
||||
{{- if "karmada.preUpdateJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preUpdateJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: {{ $name }}-pre-upgrade
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ $name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
{{- include "karmada.imagePullSecrets" . | nindent 6 }}
|
||||
{{- with .Values.preUpdateJob.tolerations}}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.preUpdateJob.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ $name }}-hook-job
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pre-upgrade
|
||||
image: {{ template "karmada.kubectl.image" . }}
|
||||
imagePullPolicy: {{ .Values.kubectl.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
# Fetch certs from existing secret
|
||||
karmada_ca=$(kubectl get secret {{ $name }}-cert -n {{ $namespace }} -o jsonpath='{.data.server-ca\.crt}')
|
||||
kubectl get configmap {{ $name }}-static-resources -n {{ $namespace }} -o yaml | sed -e "s/{{ print "{{ ca_crt }}" }}/${karmada_ca}/g" | kubectl apply -f -
|
||||
kubectl get configmap {{ $name }}-crds-patches -n {{ $namespace }} -o yaml | sed -e "s/{{ print "{{ ca_crt }}" }}/${karmada_ca}/g" | kubectl apply -f -
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -98,6 +98,12 @@ preInstallJob:
|
|||
## Define policies that determine when to delete corresponding hook resources: before-hook-creation,hook-succeeded,hook-failed
|
||||
hookDeletePolicy: "hook-succeeded"
|
||||
|
||||
preUpdateJob:
|
||||
tolerations: []
|
||||
nodeSelector: {}
|
||||
## Define policies that determine when to delete corresponding hook resources: before-hook-creation,hook-succeeded,hook-failed
|
||||
hookDeletePolicy: "hook-succeeded"
|
||||
|
||||
## static-resource job config
|
||||
staticResourceJob:
|
||||
tolerations: []
|
||||
|
@ -128,7 +134,10 @@ certs:
|
|||
mode: auto
|
||||
auto:
|
||||
## @param certs.auto.expiry expiry of the certificate
|
||||
## Note: The expiry value should not exceed the rootCA expiry time (rootCAExpiryDays * 24h)
|
||||
expiry: 43800h
|
||||
## @param certs.auto.rootCAExpiryDays expiry of the root CA certificate in days, defaults to 3650 days (10 years)
|
||||
rootCAExpiryDays: 3650
|
||||
## @param certs.auto.hosts hosts of the certificate
|
||||
hosts: [
|
||||
"kubernetes.default.svc",
|
||||
|
@ -234,6 +243,10 @@ scheduler:
|
|||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param scheduler.priorityClassName the priority class name for the karmada-scheduler
|
||||
priorityClassName: "system-node-critical"
|
||||
## @param scheduler.enableSchedulerEstimator enable scheduler estimator
|
||||
enableSchedulerEstimator: false
|
||||
## @param scheduler.featureGates A set of key=value pairs that describe feature gates for karmada-scheduler
|
||||
featureGates: {}
|
||||
|
||||
## webhook config
|
||||
webhook:
|
||||
|
@ -288,10 +301,12 @@ webhook:
|
|||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxSurge: 50%
|
||||
## @param apiServer.podDisruptionBudget
|
||||
## @param webhook.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param webhook.priorityClassName the priority class name for the karmada-webhook
|
||||
priorityClassName: "system-node-critical"
|
||||
## @param webhook.featureGates A set of key=value pairs that describe feature gates for karmada-webhook
|
||||
featureGates: {}
|
||||
|
||||
## controller manager config
|
||||
controllerManager:
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.21.3
|
||||
FROM alpine:3.22.1
|
||||
|
||||
ARG BINARY
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.21.3
|
||||
FROM alpine:3.22.1
|
||||
|
||||
ARG BINARY
|
||||
ARG TARGETPLATFORM
|
||||
|
|
|
@ -30,13 +30,16 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/agent/app/options"
|
||||
|
@ -62,6 +65,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/indexregistry"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
|
||||
"github.com/karmada-io/karmada/pkg/util/restmapper"
|
||||
|
@ -71,13 +75,32 @@ import (
|
|||
|
||||
// NewAgentCommand creates a *cobra.Command object with default parameters
|
||||
func NewAgentCommand(ctx context.Context) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
genericFlagSet.AddGoFlagSet(flag.CommandLine)
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet, controllers.ControllerNames())
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaAgentComponentName,
|
||||
Long: `The karmada-agent is the agent of member clusters. It can register a specific cluster to the Karmada control
|
||||
plane and sync manifests from the Karmada control plane to the member cluster. In addition, it also syncs the status of member
|
||||
cluster and manifests to the Karmada control plane.`,
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
|
@ -98,16 +121,6 @@ cluster and manifests to the Karmada control plane.`,
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
genericFlagSet.AddGoFlagSet(flag.CommandLine)
|
||||
opts.AddFlags(genericFlagSet, controllers.ControllerNames())
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaAgentComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
@ -164,22 +177,15 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
ClusterConfig: clusterConfig,
|
||||
}
|
||||
|
||||
id, err := util.ObtainClusterID(clusterKubeClient)
|
||||
registerOption.ClusterID, err = util.ObtainClusterID(clusterKubeClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, name, err := util.IsClusterIdentifyUnique(karmadaClient, id)
|
||||
if err != nil {
|
||||
if err = registerOption.Validate(karmadaClient, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok && opts.ClusterName != name {
|
||||
return fmt.Errorf("the same cluster has been registered with name %s", name)
|
||||
}
|
||||
|
||||
registerOption.ClusterID = id
|
||||
|
||||
clusterSecret, impersonatorSecret, err := util.ObtainCredentialsFromMemberCluster(clusterKubeClient, registerOption)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -220,6 +226,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
clusterv1alpha1.SchemeGroupVersion.WithKind("Cluster").GroupKind().String(): opts.ConcurrentClusterSyncs,
|
||||
},
|
||||
CacheSyncTimeout: opts.ClusterCacheSyncTimeout.Duration,
|
||||
UsePriorityQueue: ptr.To(features.FeatureGate.Enabled(features.ControllerPriorityQueue)),
|
||||
},
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.DefaultTransform = fedinformer.StripUnusedFields
|
||||
|
@ -235,11 +242,12 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
return err
|
||||
}
|
||||
|
||||
crtlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
|
||||
|
||||
if err = setupControllers(controllerManager, opts, ctx.Done()); err != nil {
|
||||
if err = setupControllers(ctx, controllerManager, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -251,25 +259,27 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) error {
|
||||
func setupControllers(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) error {
|
||||
restConfig := mgr.GetConfig()
|
||||
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, 0, stopChan)
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(ctx, dynamicClientSet, 0)
|
||||
controlPlaneKubeClientSet := kubeclientset.NewForConfigOrDie(restConfig)
|
||||
|
||||
// We need a service lister to build a resource interpreter with `ClusterIPServiceResolver`
|
||||
// witch allows connection to the customized interpreter webhook without a cluster DNS service.
|
||||
sharedFactory := informers.NewSharedInformerFactory(controlPlaneKubeClientSet, 0)
|
||||
serviceLister := sharedFactory.Core().V1().Services().Lister()
|
||||
sharedFactory.Start(stopChan)
|
||||
sharedFactory.WaitForCacheSync(stopChan)
|
||||
sharedFactory.Start(ctx.Done())
|
||||
sharedFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
|
||||
if err := mgr.Add(resourceInterpreter); err != nil {
|
||||
return fmt.Errorf("failed to setup custom resource interpreter: %w", err)
|
||||
if err := resourceInterpreter.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start resource interpreter: %w", err)
|
||||
}
|
||||
rateLimiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
|
||||
clusterClientOption := &util.ClientOption{RateLimiterGetter: rateLimiterGetter.GetRateLimiter}
|
||||
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, resourceInterpreter)
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, clusterClientOption, resourceInterpreter)
|
||||
controllerContext := controllerscontext.Context{
|
||||
Mgr: mgr,
|
||||
ObjectWatcher: objectWatcher,
|
||||
|
@ -282,8 +292,6 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
ClusterSuccessThreshold: opts.ClusterSuccessThreshold,
|
||||
ClusterFailureThreshold: opts.ClusterFailureThreshold,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
ClusterAPIQPS: opts.ClusterAPIQPS,
|
||||
ClusterAPIBurst: opts.ClusterAPIBurst,
|
||||
ConcurrentWorkSyncs: opts.ConcurrentWorkSyncs,
|
||||
RateLimiterOptions: opts.RateLimiterOpts,
|
||||
EnableClusterResourceModeling: opts.EnableClusterResourceModeling,
|
||||
|
@ -291,8 +299,9 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
CertRotationRemainingTimeThreshold: opts.CertRotationRemainingTimeThreshold,
|
||||
KarmadaKubeconfigNamespace: opts.KarmadaKubeconfigNamespace,
|
||||
},
|
||||
StopChan: stopChan,
|
||||
Context: ctx,
|
||||
ResourceInterpreter: resourceInterpreter,
|
||||
ClusterClientOption: clusterClientOption,
|
||||
}
|
||||
|
||||
if err := controllers.StartControllers(controllerContext, controllersDisabledByDefault); err != nil {
|
||||
|
@ -301,7 +310,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
|
||||
// Ensure the InformerManager stops when the stop channel closes
|
||||
go func() {
|
||||
<-stopChan
|
||||
<-ctx.Done()
|
||||
genericmanager.StopInstance()
|
||||
}()
|
||||
|
||||
|
@ -316,10 +325,9 @@ func startClusterStatusController(ctx controllerscontext.Context) (bool, error)
|
|||
PredicateFunc: helper.NewClusterPredicateOnAgent(ctx.Opts.ClusterName),
|
||||
TypedInformerManager: typedmanager.GetInstance(),
|
||||
GenericInformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
ClusterClientSetFunc: util.NewClusterClientSetForAgent,
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
ClusterClientOption: &util.ClientOption{QPS: ctx.Opts.ClusterAPIQPS, Burst: ctx.Opts.ClusterAPIBurst},
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterStatusUpdateFrequency: ctx.Opts.ClusterStatusUpdateFrequency,
|
||||
ClusterLeaseDuration: ctx.Opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: ctx.Opts.ClusterLeaseRenewIntervalFraction,
|
||||
|
@ -341,7 +349,6 @@ func startExecutionController(ctx controllerscontext.Context) (bool, error) {
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(execution.ControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
|
@ -357,9 +364,8 @@ func startWorkStatusController(ctx controllerscontext.Context) (bool, error) {
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(status.WorkStatusControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
ClusterCacheSyncTimeout: ctx.Opts.ClusterCacheSyncTimeout,
|
||||
ConcurrentWorkStatusSyncs: ctx.Opts.ConcurrentWorkSyncs,
|
||||
|
@ -379,14 +385,14 @@ func startServiceExportController(ctx controllerscontext.Context) (bool, error)
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceExportControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForServiceExportControllerOnAgent(ctx.Opts.ClusterName),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
ClusterCacheSyncTimeout: ctx.Opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := mcs.IndexField(ctx.Mgr); err != nil {
|
||||
if err := indexregistry.RegisterWorkIndexByFieldSuspendDispatching(ctx.Context, ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
serviceExportController.RunWorkQueue()
|
||||
|
@ -405,7 +411,7 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
|
|||
Client: ctx.Mgr.GetClient(),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForEndpointSliceCollectControllerOnAgent(opts.ClusterName),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
@ -37,5 +38,7 @@ func main() {
|
|||
controllerruntime.SetLogger(klog.Background())
|
||||
cmd := app.NewAgentCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
|
|
@ -21,9 +21,12 @@ import (
|
|||
|
||||
"github.com/spf13/cobra"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/aggregated-apiserver/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
|
@ -33,12 +36,30 @@ import (
|
|||
// NewAggregatedApiserverCommand creates a *cobra.Command object with default parameters
|
||||
func NewAggregatedApiserverCommand(ctx context.Context) *cobra.Command {
|
||||
opts := options.NewOptions()
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaAggregatedAPIServerComponentName,
|
||||
Long: `The karmada-aggregated-apiserver starts an aggregated server.
|
||||
It is responsible for registering the Cluster API and provides the ability to aggregate APIs,
|
||||
allowing users to access member clusters from the control plane directly.`,
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := opts.Complete(); err != nil {
|
||||
return err
|
||||
|
@ -53,15 +74,6 @@ allowing users to access member clusters from the control plane directly.`,
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaAggregatedAPIServerComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
|
|
@ -33,8 +33,8 @@ import (
|
|||
genericfilters "k8s.io/apiserver/pkg/server/filters"
|
||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/apiserver/pkg/util/compatibility"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilversion "k8s.io/apiserver/pkg/util/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/klog/v2"
|
||||
|
@ -123,7 +123,7 @@ func (o *Options) Run(ctx context.Context) error {
|
|||
restConfig := config.GenericConfig.ClientConfig
|
||||
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(o.KubeAPIQPS, o.KubeAPIBurst)
|
||||
secretLister := config.GenericConfig.SharedInformerFactory.Core().V1().Secrets().Lister()
|
||||
config.GenericConfig.EffectiveVersion = utilversion.NewEffectiveVersion("1.0")
|
||||
config.GenericConfig.EffectiveVersion = compatibility.DefaultBuildEffectiveVersion()
|
||||
|
||||
server, err := config.Complete().New(restConfig, secretLister)
|
||||
if err != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
|
@ -29,6 +30,7 @@ import (
|
|||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
cmd := app.NewAggregatedApiserverCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
exitCode := cli.Run(cmd)
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -31,17 +31,20 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
|
||||
"k8s.io/metrics/pkg/client/custom_metrics"
|
||||
"k8s.io/metrics/pkg/client/external_metrics"
|
||||
"k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
|
@ -68,6 +71,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/controllers/namespace"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/remediation"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/status"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/taint"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/unifiedauth"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/workloadrebalancer"
|
||||
"github.com/karmada-io/karmada/pkg/dependenciesdistributor"
|
||||
|
@ -85,6 +89,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/indexregistry"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
|
||||
"github.com/karmada-io/karmada/pkg/util/overridemanager"
|
||||
|
@ -95,36 +100,51 @@ import (
|
|||
|
||||
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
|
||||
func NewControllerManagerCommand(ctx context.Context) *cobra.Command {
|
||||
opts := options.NewOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaControllerManagerComponentName,
|
||||
Long: `The karmada-controller-manager runs various controllers.
|
||||
The controllers watch Karmada objects and then talk to the underlying clusters' API servers
|
||||
to create regular Kubernetes resources.`,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
|
||||
return Run(ctx, opts)
|
||||
},
|
||||
}
|
||||
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
// Add the flag(--kubeconfig) that is added by controller-runtime
|
||||
// Add the flag(--kubeconfig) that is added by controller-runtime.
|
||||
// (https://github.com/kubernetes-sigs/controller-runtime/blob/v0.11.1/pkg/client/config/config.go#L39),
|
||||
// and update the flag usage.
|
||||
genericFlagSet.AddGoFlagSet(flag.CommandLine)
|
||||
genericFlagSet.Lookup("kubeconfig").Usage = "Path to karmada control plane kubeconfig file."
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet, controllers.ControllerNames(), sets.List(controllersDisabledByDefault))
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaControllerManagerComponentName,
|
||||
Long: `The karmada-controller-manager runs various controllers.
|
||||
The controllers watch Karmada objects and then talk to the underlying
|
||||
clusters' API servers to create regular Kubernetes resources.`,
|
||||
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
|
||||
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
|
||||
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
|
||||
// by setting a logger through log.SetLogger.
|
||||
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
|
||||
controllerruntime.SetLogger(klog.Background())
|
||||
return nil
|
||||
},
|
||||
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
return Run(ctx, opts)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaControllerManagerComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
|
@ -132,6 +152,7 @@ to create regular Kubernetes resources.`,
|
|||
|
||||
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
|
||||
sharedcli.SetUsageAndHelpFunc(cmd, fss, cols)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -173,6 +194,7 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}.GroupKind().String(): opts.ConcurrentNamespaceSyncs,
|
||||
},
|
||||
CacheSyncTimeout: opts.ClusterCacheSyncTimeout.Duration,
|
||||
UsePriorityQueue: ptr.To(features.FeatureGate.Enabled(features.ControllerPriorityQueue)),
|
||||
},
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.DefaultTransform = fedinformer.StripUnusedFields
|
||||
|
@ -189,15 +211,12 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
return err
|
||||
}
|
||||
|
||||
crtlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
|
||||
|
||||
if err := helper.IndexWork(ctx, controllerManager); err != nil {
|
||||
klog.Fatalf("Failed to index Work: %v", err)
|
||||
}
|
||||
|
||||
setupControllers(controllerManager, opts, ctx.Done())
|
||||
setupControllers(ctx, controllerManager, opts)
|
||||
|
||||
// blocks until the context is done.
|
||||
if err := controllerManager.Start(ctx); err != nil {
|
||||
|
@ -228,6 +247,7 @@ func init() {
|
|||
controllers["unifiedAuth"] = startUnifiedAuthController
|
||||
controllers["federatedResourceQuotaSync"] = startFederatedResourceQuotaSyncController
|
||||
controllers["federatedResourceQuotaStatus"] = startFederatedResourceQuotaStatusController
|
||||
controllers["federatedResourceQuotaEnforcement"] = startFederatedResourceQuotaEnforcementController
|
||||
controllers["gracefulEviction"] = startGracefulEvictionController
|
||||
controllers["applicationFailover"] = startApplicationFailoverController
|
||||
controllers["federatedHorizontalPodAutoscaler"] = startFederatedHorizontalPodAutoscalerController
|
||||
|
@ -240,42 +260,51 @@ func init() {
|
|||
controllers["remedy"] = startRemedyController
|
||||
controllers["workloadRebalancer"] = startWorkloadRebalancerController
|
||||
controllers["agentcsrapproving"] = startAgentCSRApprovingController
|
||||
controllers["clustertaintpolicy"] = startClusterTaintPolicyController
|
||||
}
|
||||
|
||||
func startClusterController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
mgr := ctx.Mgr
|
||||
opts := ctx.Opts
|
||||
|
||||
// Indexes are added to help the cluster-controller and TaintManager quickly locate ResourceBinding
|
||||
// and ClusterResourceBinding resources associated with a given cluster when eviction is needed.
|
||||
if err := indexregistry.RegisterResourceBindingIndexByFieldCluster(ctx.Context, mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := indexregistry.RegisterClusterResourceBindingIndexByFieldCluster(ctx.Context, mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
clusterController := &cluster.Controller{
|
||||
Client: mgr.GetClient(),
|
||||
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
|
||||
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
|
||||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
|
||||
FailoverEvictionTimeout: opts.FailoverEvictionTimeout.Duration,
|
||||
EnableTaintManager: ctx.Opts.EnableTaintManager,
|
||||
ClusterTaintEvictionRetryFrequency: 10 * time.Second,
|
||||
ExecutionSpaceRetryFrequency: 10 * time.Second,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
Client: mgr.GetClient(),
|
||||
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
|
||||
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
|
||||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
|
||||
CleanupCheckInterval: 10 * time.Second,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := clusterController.SetupWithManager(mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ctx.Opts.EnableTaintManager {
|
||||
if err := cluster.IndexField(mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Taint-based eviction should only take effect if the Failover feature is enabled
|
||||
if ctx.Opts.EnableTaintManager && features.FeatureGate.Enabled(features.Failover) {
|
||||
taintManager := &cluster.NoExecuteTaintManager{
|
||||
Client: mgr.GetClient(),
|
||||
EventRecorder: mgr.GetEventRecorderFor(cluster.TaintManagerName),
|
||||
ClusterTaintEvictionRetryFrequency: 10 * time.Second,
|
||||
ConcurrentReconciles: 3,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
EnableNoExecuteTaintEviction: ctx.Opts.FailoverConfiguration.EnableNoExecuteTaintEviction,
|
||||
NoExecuteTaintEvictionPurgeMode: ctx.Opts.FailoverConfiguration.NoExecuteTaintEvictionPurgeMode,
|
||||
}
|
||||
if err := taintManager.SetupWithManager(mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
klog.Infof("Skipping registration of TaintManager, please check that TaintManager option and Failover feature-gate are enabled.")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
@ -284,7 +313,6 @@ func startClusterController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
func startClusterStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
mgr := ctx.Mgr
|
||||
opts := ctx.Opts
|
||||
stopChan := ctx.StopChan
|
||||
clusterPredicateFunc := predicate.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent) bool {
|
||||
obj := createEvent.Object.(*clusterv1alpha1.Cluster)
|
||||
|
@ -324,10 +352,9 @@ func startClusterStatusController(ctx controllerscontext.Context) (enabled bool,
|
|||
PredicateFunc: clusterPredicateFunc,
|
||||
TypedInformerManager: typedmanager.GetInstance(),
|
||||
GenericInformerManager: genericmanager.GetInstance(),
|
||||
StopChan: stopChan,
|
||||
ClusterClientSetFunc: util.NewClusterClientSet,
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: &util.ClientOption{QPS: opts.ClusterAPIQPS, Burst: opts.ClusterAPIBurst},
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
|
||||
ClusterLeaseDuration: opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
|
||||
|
@ -344,6 +371,12 @@ func startClusterStatusController(ctx controllerscontext.Context) (enabled bool,
|
|||
}
|
||||
|
||||
func startBindingController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
// To efficiently clean up Work resources created by the bindingController when a cluster or a RB/CRB is deleted,
|
||||
// we index the Work resources to reduce the overhead during each check.
|
||||
if err = indexregistry.RegisterWorkIndexByLabelResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
bindingController := &binding.ResourceBindingController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -358,6 +391,10 @@ func startBindingController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
return false, err
|
||||
}
|
||||
|
||||
if err = indexregistry.RegisterWorkIndexByLabelClusterResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ClusterResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
clusterResourceBindingController := &binding.ClusterResourceBindingController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -375,6 +412,11 @@ func startBindingController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
}
|
||||
|
||||
func startBindingStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
// Indexing Work resources allows efficient retrieval for aggregating status.
|
||||
if err = indexregistry.RegisterWorkIndexByLabelResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
rbStatusController := &status.RBStatusController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -388,6 +430,10 @@ func startBindingStatusController(ctx controllerscontext.Context) (enabled bool,
|
|||
return false, err
|
||||
}
|
||||
|
||||
if err = indexregistry.RegisterWorkIndexByLabelClusterResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ClusterResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
crbStatusController := &status.CRBStatusController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -410,7 +456,7 @@ func startExecutionController(ctx controllerscontext.Context) (enabled bool, err
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(execution.ControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicate(ctx.Mgr),
|
||||
WorkPredicateFunc: helper.WorkWithinPushClusterPredicate(ctx.Mgr),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
|
@ -427,10 +473,11 @@ func startWorkStatusController(ctx controllerscontext.Context) (enabled bool, er
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(status.WorkStatusControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicate(ctx.Mgr),
|
||||
WorkPredicateFunc: helper.WorkWithinPushClusterPredicate(ctx.Mgr),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
ConcurrentWorkStatusSyncs: opts.ConcurrentWorkSyncs,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
|
@ -465,14 +512,16 @@ func startServiceExportController(ctx controllerscontext.Context) (enabled bool,
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceExportControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForServiceExportController(ctx.Mgr),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err = mcs.IndexField(ctx.Mgr); err != nil {
|
||||
// Add an index so ServiceExportController can quickly find and delete related Work resources.
|
||||
if err = indexregistry.RegisterWorkIndexByFieldSuspendDispatching(ctx.Context, ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
serviceExportController.RunWorkQueue()
|
||||
|
@ -491,10 +540,11 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
|
|||
Client: ctx.Mgr.GetClient(),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForEndpointSliceCollectController(ctx.Mgr),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
|
@ -582,6 +632,23 @@ func startFederatedResourceQuotaStatusController(ctx controllerscontext.Context)
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func startFederatedResourceQuotaEnforcementController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.FederatedQuotaEnforcement) {
|
||||
return false, nil
|
||||
}
|
||||
controller := federatedresourcequota.QuotaEnforcementController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.QuotaEnforcementControllerName),
|
||||
Recalculation: federatedresourcequota.QuotaRecalculation{
|
||||
ResyncPeriod: ctx.Opts.FederatedResourceQuotaOptions.ResourceQuotaSyncPeriod,
|
||||
},
|
||||
}
|
||||
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startGracefulEvictionController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
rbGracefulEvictionController := &gracefuleviction.RBGracefulEvictionController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
|
@ -634,7 +701,7 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont
|
|||
go custom_metrics.PeriodicallyInvalidate(
|
||||
apiVersionsGetter,
|
||||
ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ctx.StopChan)
|
||||
ctx.Context.Done())
|
||||
metricsClient := metricsclient.NewRESTMetricsClient(
|
||||
resourceclient.NewForConfigOrDie(ctx.Mgr.GetConfig()),
|
||||
custom_metrics.NewForConfig(ctx.Mgr.GetConfig(), ctx.Mgr.GetRESTMapper(), apiVersionsGetter),
|
||||
|
@ -752,8 +819,24 @@ func startAgentCSRApprovingController(ctx controllerscontext.Context) (enabled b
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func startClusterTaintPolicyController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.Failover) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
clusterTaintPolicyController := taint.ClusterTaintPolicyController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(taint.ControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := clusterTaintPolicyController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// setupControllers initialize controllers and setup one by one.
|
||||
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
|
||||
func setupControllers(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) {
|
||||
restConfig := mgr.GetConfig()
|
||||
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
|
||||
discoverClientSet := discovery.NewDiscoveryClientForConfigOrDie(restConfig)
|
||||
|
@ -766,25 +849,27 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
return
|
||||
}
|
||||
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, opts.ResyncPeriod.Duration, stopChan)
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(ctx, dynamicClientSet, opts.ResyncPeriod.Duration)
|
||||
// We need a service lister to build a resource interpreter with `ClusterIPServiceResolver`
|
||||
// witch allows connection to the customized interpreter webhook without a cluster DNS service.
|
||||
sharedFactory := informers.NewSharedInformerFactory(kubeClientSet, opts.ResyncPeriod.Duration)
|
||||
serviceLister := sharedFactory.Core().V1().Services().Lister()
|
||||
sharedFactory.Start(stopChan)
|
||||
sharedFactory.WaitForCacheSync(stopChan)
|
||||
sharedFactory.Start(ctx.Done())
|
||||
sharedFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
|
||||
if err := mgr.Add(resourceInterpreter); err != nil {
|
||||
klog.Fatalf("Failed to setup custom resource interpreter: %v", err)
|
||||
if err := resourceInterpreter.Start(ctx); err != nil {
|
||||
klog.Fatalf("Failed to start resource interpreter: %v", err)
|
||||
}
|
||||
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, resourceInterpreter)
|
||||
rateLimiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
|
||||
clusterClientOption := &util.ClientOption{RateLimiterGetter: rateLimiterGetter.GetRateLimiter}
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, clusterClientOption, resourceInterpreter)
|
||||
|
||||
resourceDetector := &detector.ResourceDetector{
|
||||
DiscoveryClientSet: discoverClientSet,
|
||||
Client: mgr.GetClient(),
|
||||
InformerManager: controlPlaneInformerManager,
|
||||
ControllerRuntimeCache: mgr.GetCache(),
|
||||
RESTMapper: mgr.GetRESTMapper(),
|
||||
DynamicClient: dynamicClientSet,
|
||||
SkippedResourceConfig: skippedResourceConfig,
|
||||
|
@ -815,7 +900,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
klog.Fatalf("Failed to setup dependencies distributor: %v", err)
|
||||
}
|
||||
}
|
||||
setupClusterAPIClusterDetector(mgr, opts, stopChan)
|
||||
setupClusterAPIClusterDetector(ctx, mgr, opts)
|
||||
controllerContext := controllerscontext.Context{
|
||||
Mgr: mgr,
|
||||
ObjectWatcher: objectWatcher,
|
||||
|
@ -825,14 +910,11 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod,
|
||||
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
|
||||
FailoverEvictionTimeout: opts.FailoverEvictionTimeout,
|
||||
ClusterLeaseDuration: opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
|
||||
ClusterSuccessThreshold: opts.ClusterSuccessThreshold,
|
||||
ClusterFailureThreshold: opts.ClusterFailureThreshold,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
ClusterAPIQPS: opts.ClusterAPIQPS,
|
||||
ClusterAPIBurst: opts.ClusterAPIBurst,
|
||||
SkippedPropagatingNamespaces: opts.SkippedNamespacesRegexps(),
|
||||
ConcurrentWorkSyncs: opts.ConcurrentWorkSyncs,
|
||||
EnableTaintManager: opts.EnableTaintManager,
|
||||
|
@ -840,13 +922,16 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
GracefulEvictionTimeout: opts.GracefulEvictionTimeout,
|
||||
EnableClusterResourceModeling: opts.EnableClusterResourceModeling,
|
||||
HPAControllerConfiguration: opts.HPAControllerConfiguration,
|
||||
FederatedResourceQuotaOptions: opts.FederatedResourceQuotaOptions,
|
||||
FailoverConfiguration: opts.FailoverOptions,
|
||||
},
|
||||
StopChan: stopChan,
|
||||
Context: ctx,
|
||||
DynamicClientSet: dynamicClientSet,
|
||||
KubeClientSet: kubeClientSet,
|
||||
OverrideManager: overrideManager,
|
||||
ControlPlaneInformerManager: controlPlaneInformerManager,
|
||||
ResourceInterpreter: resourceInterpreter,
|
||||
ClusterClientOption: clusterClientOption,
|
||||
}
|
||||
|
||||
if err := controllers.StartControllers(controllerContext, controllersDisabledByDefault); err != nil {
|
||||
|
@ -855,13 +940,13 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
|
||||
// Ensure the InformerManager stops when the stop channel closes
|
||||
go func() {
|
||||
<-stopChan
|
||||
<-ctx.Done()
|
||||
genericmanager.StopInstance()
|
||||
}()
|
||||
}
|
||||
|
||||
// setupClusterAPIClusterDetector initialize Cluster detector with the cluster-api management cluster.
|
||||
func setupClusterAPIClusterDetector(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
|
||||
func setupClusterAPIClusterDetector(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) {
|
||||
if len(opts.ClusterAPIKubeconfig) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -882,7 +967,7 @@ func setupClusterAPIClusterDetector(mgr controllerruntime.Manager, opts *options
|
|||
ControllerPlaneConfig: mgr.GetConfig(),
|
||||
ClusterAPIConfig: clusterAPIRestConfig,
|
||||
ClusterAPIClient: clusterAPIClient,
|
||||
InformerManager: genericmanager.NewSingleClusterInformerManager(dynamic.NewForConfigOrDie(clusterAPIRestConfig), 0, stopChan),
|
||||
InformerManager: genericmanager.NewSingleClusterInformerManager(ctx, dynamic.NewForConfigOrDie(clusterAPIRestConfig), 0),
|
||||
ConcurrentReconciles: 3,
|
||||
}
|
||||
if err := mgr.Add(clusterAPIClusterDetector); err != nil {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2025 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
// FailoverOptions holds the Failover configurations.
|
||||
type FailoverOptions struct {
|
||||
// EnableNoExecuteTaintEviction enables controller response to NoExecute taints on clusters,
|
||||
// which triggers eviction of workloads without explicit tolerations.
|
||||
EnableNoExecuteTaintEviction bool
|
||||
// NoExecuteTaintEvictionPurgeMode controls resource cleanup behavior for NoExecute-triggered
|
||||
// evictions (only active when --enable-no-execute-taint-eviction=true).
|
||||
// Valid modes:
|
||||
// - "Gracefully": first schedules workloads to new clusters and then cleans up original
|
||||
// workloads after successful startup elsewhere to ensure service continuity.
|
||||
// - "Directly": directly evicts workloads first (risking temporary service interruption)
|
||||
// and then triggers rescheduling to other clusters.
|
||||
// Default: "Gracefully".
|
||||
NoExecuteTaintEvictionPurgeMode string
|
||||
}
|
||||
|
||||
// AddFlags adds flags related to FailoverOptions for controller manager to the specified FlagSet.
|
||||
func (o *FailoverOptions) AddFlags(flags *pflag.FlagSet) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
|
||||
flags.BoolVar(&o.EnableNoExecuteTaintEviction, "enable-no-execute-taint-eviction", false, "Enables controller response to NoExecute taints on clusters, which triggers eviction of workloads without explicit tolerations. Given the impact of eviction caused by NoExecute Taint, this parameter is designed to remain disabled by default and requires careful evaluation by administrators before being enabled.\n")
|
||||
flags.StringVar(&o.NoExecuteTaintEvictionPurgeMode, "no-execute-taint-eviction-purge-mode", "Gracefully", "Controls resource cleanup behavior for NoExecute-triggered evictions (only active when --enable-no-execute-taint-eviction=true). Supported values are \"Directly\", and \"Gracefully\". \"Directly\" mode directly evicts workloads first (risking temporary service interruption) and then triggers rescheduling to other clusters, while \"Gracefully\" mode first schedules workloads to new clusters and then cleans up original workloads after successful startup elsewhere to ensure service continuity.")
|
||||
}
|
||||
|
||||
// Validate checks FailoverOptions and return a slice of found errs.
|
||||
func (o *FailoverOptions) Validate() field.ErrorList {
|
||||
errs := field.ErrorList{}
|
||||
if o.EnableNoExecuteTaintEviction &&
|
||||
o.NoExecuteTaintEvictionPurgeMode != "Gracefully" &&
|
||||
o.NoExecuteTaintEvictionPurgeMode != "Directly" {
|
||||
errs = append(errs, field.Invalid(field.NewPath("FailoverOptions").Child("NoExecuteTaintEvictionPurgeMode"),
|
||||
o.NoExecuteTaintEvictionPurgeMode, "Invalid mode"))
|
||||
}
|
||||
return errs
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright 2025 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
// FederatedResourceQuotaOptions holds the FederatedResourceQuota-related options.
|
||||
type FederatedResourceQuotaOptions struct {
|
||||
// federatedResourceQuotaSyncPeriod is the period for syncing federated resource quota usage status
|
||||
// in the system.
|
||||
ResourceQuotaSyncPeriod metav1.Duration
|
||||
}
|
||||
|
||||
// AddFlags adds flags related to FederatedResourceQuotaEnforcement for controller manager to the specified FlagSet.
|
||||
func (o *FederatedResourceQuotaOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
fs.DurationVar(&o.ResourceQuotaSyncPeriod.Duration, "federated-resource-quota-sync-period", time.Minute*5, "The interval for periodic full resynchronization of FederatedResourceQuota resources. This ensures quota recalculations occur at regular intervals to correct potential inaccuracies, particularly when webhook validation side effects.")
|
||||
}
|
||||
|
||||
// Validate checks FederatedResourceQuotaOptions and return a slice of found errs.
|
||||
func (o *FederatedResourceQuotaOptions) Validate() field.ErrorList {
|
||||
if o.ResourceQuotaSyncPeriod.Duration <= 0 {
|
||||
return field.ErrorList{field.Invalid(field.NewPath("federatedResourceQuotaSyncPeriod"), o.ResourceQuotaSyncPeriod, "must be greater than 0")}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -54,8 +54,6 @@ type Options struct {
|
|||
// ClusterStatusUpdateFrequency is the frequency that controller computes and report cluster status.
|
||||
// It must work with ClusterMonitorGracePeriod(--cluster-monitor-grace-period) in karmada-controller-manager.
|
||||
ClusterStatusUpdateFrequency metav1.Duration
|
||||
// FailoverEvictionTimeout is the grace period for deleting scheduling result on failed clusters.
|
||||
FailoverEvictionTimeout metav1.Duration
|
||||
// ClusterLeaseDuration is a duration that candidates for a lease need to wait to force acquire it.
|
||||
// This is measure against time of last observed lease RenewTime.
|
||||
ClusterLeaseDuration metav1.Duration
|
||||
|
@ -146,6 +144,10 @@ type Options struct {
|
|||
// in scenario of dynamic replica assignment based on cluster free resources.
|
||||
// Disable if it does not fit your cases for better performance.
|
||||
EnableClusterResourceModeling bool
|
||||
// FederatedResourceQuotaOptions holds configurations for FederatedResourceQuota reconciliation.
|
||||
FederatedResourceQuotaOptions FederatedResourceQuotaOptions
|
||||
// FailoverOptions holds the Failover configurations.
|
||||
FailoverOptions FailoverOptions
|
||||
}
|
||||
|
||||
// NewOptions builds an empty options.
|
||||
|
@ -195,8 +197,6 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
|
|||
"Specifies the grace period of allowing a running cluster to be unresponsive before marking it unhealthy.")
|
||||
flags.DurationVar(&o.ClusterStartupGracePeriod.Duration, "cluster-startup-grace-period", 60*time.Second,
|
||||
"Specifies the grace period of allowing a cluster to be unresponsive during startup before marking it unhealthy.")
|
||||
flags.DurationVar(&o.FailoverEvictionTimeout.Duration, "failover-eviction-timeout", 5*time.Minute,
|
||||
"Specifies the grace period for deleting scheduling result on failed clusters.")
|
||||
flags.StringVar(&o.SkippedPropagatingAPIs, "skipped-propagating-apis", "", "Semicolon separated resources that should be skipped from propagating in addition to the default skip list(cluster.karmada.io;policy.karmada.io;work.karmada.io). Supported formats are:\n"+
|
||||
"<group> for skip resources with a specific API group(e.g. networking.k8s.io),\n"+
|
||||
"<group>/<version> for skip resources with a specific API version(e.g. networking.k8s.io/v1beta1),\n"+
|
||||
|
@ -232,6 +232,8 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
|
|||
o.RateLimiterOpts.AddFlags(flags)
|
||||
o.ProfileOpts.AddFlags(flags)
|
||||
o.HPAControllerConfiguration.AddFlags(flags)
|
||||
o.FederatedResourceQuotaOptions.AddFlags(flags)
|
||||
o.FailoverOptions.AddFlags(flags)
|
||||
features.FeatureGate.AddFlag(flags)
|
||||
}
|
||||
|
||||
|
|
|
@ -54,5 +54,9 @@ func (o *Options) Validate() field.ErrorList {
|
|||
errs = append(errs, field.Invalid(newPath.Child("SkippedPropagatingNamespaces").Index(index), ns, "Invalid namespace regular expression"))
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, o.FederatedResourceQuotaOptions.Validate()...)
|
||||
errs = append(errs, o.FailoverOptions.Validate()...)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -36,6 +36,11 @@ func New(modifyOptions ModifyOptions) Options {
|
|||
ClusterMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
ClusterMonitorGracePeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
ClusterStartupGracePeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
FederatedResourceQuotaOptions: FederatedResourceQuotaOptions{
|
||||
ResourceQuotaSyncPeriod: metav1.Duration{
|
||||
Duration: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if modifyOptions != nil {
|
||||
|
@ -96,6 +101,15 @@ func TestValidateControllerManagerConfiguration(t *testing.T) {
|
|||
}),
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterStartupGracePeriod"), metav1.Duration{Duration: 0 * time.Second}, "must be greater than 0")},
|
||||
},
|
||||
"invalid FailoverOptions": {
|
||||
opt: New(func(options *Options) {
|
||||
options.FailoverOptions.EnableNoExecuteTaintEviction = true
|
||||
options.FailoverOptions.NoExecuteTaintEvictionPurgeMode = ""
|
||||
}),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(field.NewPath("FailoverOptions").Child("NoExecuteTaintEvictionPurgeMode"), "", "Invalid mode"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // To enable JSON log format support
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/controller-manager/app"
|
||||
|
@ -29,13 +29,9 @@ import (
|
|||
|
||||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
|
||||
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
|
||||
// by setting a logger through log.SetLogger.
|
||||
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
|
||||
controllerruntime.SetLogger(klog.Background())
|
||||
cmd := app.NewControllerManagerCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
exitCode := cli.Run(cmd)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -33,13 +33,17 @@ import (
|
|||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/descheduler"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
|
@ -78,20 +82,30 @@ const (
|
|||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(stopChan <-chan struct{}) *cobra.Command {
|
||||
func NewDeschedulerCommand(ctx context.Context) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaDeschedulerComponentName,
|
||||
Long: `The karmada-descheduler evicts replicas from member clusters
|
||||
if they are failed to be scheduled for a period of time. It relies on
|
||||
if they are failed to be scheduled for a period of time. It relies on
|
||||
karmada-scheduler-estimator to get replica status.`,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
if err := run(opts, stopChan); err != nil {
|
||||
if err := run(ctx, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -104,17 +118,15 @@ karmada-scheduler-estimator to get replica status.`,
|
|||
}
|
||||
return nil
|
||||
},
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaDeschedulerComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
@ -124,9 +136,12 @@ karmada-scheduler-estimator to get replica status.`,
|
|||
return cmd
|
||||
}
|
||||
|
||||
func run(opts *options.Options, stopChan <-chan struct{}) error {
|
||||
func run(ctx context.Context, opts *options.Options) error {
|
||||
klog.Infof("karmada-descheduler version: %s", version.Get())
|
||||
klog.Infof("Please make sure the karmada-scheduler-estimator of all member clusters has been deployed")
|
||||
|
||||
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
|
||||
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
|
||||
|
||||
profileflag.ListenAndServe(opts.ProfileOpts)
|
||||
|
@ -140,12 +155,6 @@ func run(opts *options.Options, stopChan <-chan struct{}) error {
|
|||
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
|
||||
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
<-stopChan
|
||||
cancel()
|
||||
}()
|
||||
|
||||
desched := descheduler.NewDescheduler(karmadaClient, kubeClient, opts)
|
||||
if !opts.LeaderElection.LeaderElect {
|
||||
desched.Run(ctx)
|
||||
|
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -26,11 +28,12 @@ import (
|
|||
|
||||
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
testingutil "github.com/karmada-io/karmada/pkg/util/testing"
|
||||
)
|
||||
|
||||
func TestNewDeschedulerCommand(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
cmd := NewDeschedulerCommand(stopCh)
|
||||
ctx := context.Background()
|
||||
cmd := NewDeschedulerCommand(ctx)
|
||||
|
||||
assert.NotNil(t, cmd)
|
||||
assert.Equal(t, names.KarmadaDeschedulerComponentName, cmd.Use)
|
||||
|
@ -51,8 +54,8 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
|
|||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
cmd := NewDeschedulerCommand(stopCh)
|
||||
ctx := context.Background()
|
||||
cmd := NewDeschedulerCommand(ctx)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.ParseFlags(tc.args)
|
||||
if tc.expectError {
|
||||
|
@ -65,8 +68,10 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServeHealthzAndMetrics(t *testing.T) {
|
||||
healthAddress := "127.0.0.1:8082"
|
||||
metricsAddress := "127.0.0.1:8083"
|
||||
ports, err := testingutil.GetFreePorts("127.0.0.1", 2)
|
||||
require.NoError(t, err)
|
||||
healthAddress := fmt.Sprintf("127.0.0.1:%d", ports[0])
|
||||
metricsAddress := fmt.Sprintf("127.0.0.1:%d", ports[1])
|
||||
|
||||
go serveHealthzAndMetrics(healthAddress, metricsAddress)
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
|
@ -27,8 +28,9 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
stopChan := controllerruntime.SetupSignalHandler().Done()
|
||||
command := app.NewDeschedulerCommand(stopChan)
|
||||
code := cli.Run(command)
|
||||
os.Exit(code)
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
cmd := app.NewDeschedulerCommand(ctx)
|
||||
exitCode := cli.Run(cmd)
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -31,10 +31,12 @@ import (
|
|||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericfilters "k8s.io/apiserver/pkg/server/filters"
|
||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||
utilversion "k8s.io/apiserver/pkg/util/version"
|
||||
"k8s.io/apiserver/pkg/util/compatibility"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
@ -42,6 +44,7 @@ import (
|
|||
|
||||
"github.com/karmada-io/karmada/cmd/karmada-search/app/options"
|
||||
searchscheme "github.com/karmada-io/karmada/pkg/apis/search/scheme"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
generatedopenapi "github.com/karmada-io/karmada/pkg/generated/openapi"
|
||||
|
@ -62,7 +65,17 @@ type Option func(*runtime.Registry)
|
|||
|
||||
// NewKarmadaSearchCommand creates a *cobra.Command object with default parameters
|
||||
func NewKarmadaSearchCommand(ctx context.Context, registryOptions ...Option) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaSearchComponentName,
|
||||
|
@ -80,17 +93,15 @@ capabilities such as global search and resource proxy in a multi-cloud environme
|
|||
}
|
||||
return nil
|
||||
},
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaSearchComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
@ -130,8 +141,11 @@ func run(ctx context.Context, o *options.Options, registryOptions ...Option) err
|
|||
return nil
|
||||
})
|
||||
|
||||
karmadaSharedInformerFactoryCacheSynced := make(chan struct{})
|
||||
server.GenericAPIServer.AddPostStartHookOrDie("start-karmada-informers", func(context genericapiserver.PostStartHookContext) error {
|
||||
config.ExtraConfig.KarmadaSharedInformerFactory.Start(context.Done())
|
||||
config.ExtraConfig.KarmadaSharedInformerFactory.WaitForCacheSync(context.Done())
|
||||
close(karmadaSharedInformerFactoryCacheSynced)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -140,14 +154,15 @@ func run(ctx context.Context, o *options.Options, registryOptions ...Option) err
|
|||
if config.ExtraConfig.Controller != nil {
|
||||
server.GenericAPIServer.AddPostStartHookOrDie("start-karmada-search-controller", func(context genericapiserver.PostStartHookContext) error {
|
||||
// start ResourceRegistry controller
|
||||
config.ExtraConfig.Controller.Start(context.Done())
|
||||
<-karmadaSharedInformerFactoryCacheSynced
|
||||
config.ExtraConfig.Controller.Start(context)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if config.ExtraConfig.ProxyController != nil {
|
||||
server.GenericAPIServer.AddPostStartHookOrDie("start-karmada-proxy-controller", func(context genericapiserver.PostStartHookContext) error {
|
||||
config.ExtraConfig.ProxyController.Start(context.Done())
|
||||
config.ExtraConfig.ProxyController.Start(context)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -181,7 +196,7 @@ func config(o *options.Options, outOfTreeRegistryOptions ...Option) (*search.Con
|
|||
}
|
||||
|
||||
serverConfig.ClientConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(o.KubeAPIQPS, o.KubeAPIBurst)
|
||||
serverConfig.Config.EffectiveVersion = utilversion.NewEffectiveVersion("1.0")
|
||||
serverConfig.Config.EffectiveVersion = compatibility.DefaultBuildEffectiveVersion()
|
||||
|
||||
httpClient, err := rest.HTTPClientFor(serverConfig.ClientConfig)
|
||||
if err != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
|
@ -29,6 +30,7 @@ import (
|
|||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
cmd := app.NewKarmadaSearchCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
exitCode := cli.Run(cmd)
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -22,9 +22,14 @@ import (
|
|||
|
||||
"github.com/spf13/cobra"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/metrics-adapter/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
|
@ -33,7 +38,17 @@ import (
|
|||
|
||||
// NewMetricsAdapterCommand creates a *cobra.Command object with default parameters
|
||||
func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaMetricsAdapterComponentName,
|
||||
|
@ -50,6 +65,19 @@ func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
|
||||
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
|
||||
// by setting a logger through log.SetLogger.
|
||||
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
|
||||
controllerruntime.SetLogger(klog.Background())
|
||||
return nil
|
||||
},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
for _, arg := range args {
|
||||
if len(arg) > 0 {
|
||||
|
@ -60,15 +88,6 @@ func NewMetricsAdapterCommand(ctx context.Context) *cobra.Command {
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaMetricsAdapterComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
generatedopenapi "github.com/karmada-io/karmada/pkg/generated/openapi"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/metricsadapter"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
|
@ -123,7 +124,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
|||
}
|
||||
|
||||
// Config returns config for the metrics-adapter server given Options
|
||||
func (o *Options) Config(stopCh <-chan struct{}) (*metricsadapter.MetricsServer, error) {
|
||||
func (o *Options) Config(ctx context.Context) (*metricsadapter.MetricsServer, error) {
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", o.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to build restConfig: %v", err)
|
||||
|
@ -135,7 +136,8 @@ func (o *Options) Config(stopCh <-chan struct{}) (*metricsadapter.MetricsServer,
|
|||
factory := informerfactory.NewSharedInformerFactory(karmadaClient, 0)
|
||||
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
|
||||
kubeFactory := informers.NewSharedInformerFactory(kubeClient, 0)
|
||||
metricsController := metricsadapter.NewMetricsController(stopCh, restConfig, factory, kubeFactory, &util.ClientOption{QPS: o.ClusterAPIQPS, Burst: o.ClusterAPIBurst})
|
||||
limiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(o.ClusterAPIQPS, o.ClusterAPIBurst)
|
||||
metricsController := metricsadapter.NewMetricsController(ctx, restConfig, factory, kubeFactory, &util.ClientOption{RateLimiterGetter: limiterGetter.GetRateLimiter})
|
||||
metricsAdapter := metricsadapter.NewMetricsAdapter(metricsController, o.CustomMetricsAdapterServerOptions)
|
||||
metricsAdapter.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme))
|
||||
metricsAdapter.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme))
|
||||
|
@ -178,14 +180,14 @@ func (o *Options) Config(stopCh <-chan struct{}) (*metricsadapter.MetricsServer,
|
|||
// Run runs the metrics-adapter with options. This should never exit.
|
||||
func (o *Options) Run(ctx context.Context) error {
|
||||
klog.Infof("karmada-metrics-adapter version: %s", version.Get())
|
||||
legacyregistry.RawMustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
if o.MetricsBindAddress != "0" {
|
||||
go serveMetrics(o.MetricsBindAddress)
|
||||
}
|
||||
|
||||
profileflag.ListenAndServe(o.ProfileOpts)
|
||||
|
||||
stopCh := ctx.Done()
|
||||
metricsServer, err := o.Config(stopCh)
|
||||
metricsServer, err := o.Config(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/metrics-adapter/app"
|
||||
|
@ -29,13 +29,9 @@ import (
|
|||
|
||||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
|
||||
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
|
||||
// by setting a logger through log.SetLogger.
|
||||
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
|
||||
controllerruntime.SetLogger(klog.Background())
|
||||
cmd := app.NewMetricsAdapterCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
exitCode := cli.Run(cmd)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -31,12 +31,16 @@ import (
|
|||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/scheduler-estimator/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/estimator/server"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
|
@ -76,12 +80,29 @@ const (
|
|||
|
||||
// NewSchedulerEstimatorCommand creates a *cobra.Command object with default parameters
|
||||
func NewSchedulerEstimatorCommand(ctx context.Context) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaSchedulerEstimatorComponentName,
|
||||
Long: `The karmada-scheduler-estimator runs an accurate scheduler estimator of a cluster. It
|
||||
Long: `The karmada-scheduler-estimator runs an accurate scheduler estimator of a cluster. It
|
||||
provides the scheduler with more accurate cluster resource information.`,
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
|
@ -94,15 +115,6 @@ provides the scheduler with more accurate cluster resource information.`,
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaSchedulerEstimatorComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
@ -114,6 +126,9 @@ provides the scheduler with more accurate cluster resource information.`,
|
|||
|
||||
func run(ctx context.Context, opts *options.Options) error {
|
||||
klog.Infof("karmada-scheduler-estimator version: %s", version.Get())
|
||||
|
||||
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
|
||||
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
|
||||
|
||||
profileflag.ListenAndServe(opts.ProfileOpts)
|
||||
|
@ -128,7 +143,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
dynamicClient := dynamic.NewForConfigOrDie(restConfig)
|
||||
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(restConfig)
|
||||
|
||||
e, err := server.NewEstimatorServer(kubeClient, dynamicClient, discoveryClient, opts, ctx.Done())
|
||||
e, err := server.NewEstimatorServer(ctx, kubeClient, dynamicClient, discoveryClient, opts)
|
||||
if err != nil {
|
||||
klog.Errorf("Fail to create estimator server: %v", err)
|
||||
return err
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
|
@ -30,5 +31,7 @@ func main() {
|
|||
ctx := controllerruntime.SetupSignalHandler()
|
||||
cmd := app.NewSchedulerEstimatorCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
|
|
@ -34,12 +34,16 @@ import (
|
|||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/scheduler/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/framework/runtime"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
|
@ -90,8 +94,19 @@ func WithPlugin(name string, factory runtime.PluginFactory) Option {
|
|||
}
|
||||
|
||||
// NewSchedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewSchedulerCommand(stopChan <-chan struct{}, registryOptions ...Option) *cobra.Command {
|
||||
func NewSchedulerCommand(ctx context.Context, registryOptions ...Option) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaSchedulerComponentName,
|
||||
|
@ -104,11 +119,18 @@ the most suitable cluster.`,
|
|||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
if err := run(opts, stopChan, registryOptions...); err != nil {
|
||||
if err := run(ctx, opts, registryOptions...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
for _, arg := range args {
|
||||
if len(arg) > 0 {
|
||||
|
@ -119,14 +141,6 @@ the most suitable cluster.`,
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaSchedulerComponentName))
|
||||
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
|
@ -137,8 +151,10 @@ the most suitable cluster.`,
|
|||
return cmd
|
||||
}
|
||||
|
||||
func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Option) error {
|
||||
func run(ctx context.Context, opts *options.Options, registryOptions ...Option) error {
|
||||
klog.Infof("karmada-scheduler version: %s", version.Get())
|
||||
|
||||
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
|
||||
|
||||
profileflag.ListenAndServe(opts.ProfileOpts)
|
||||
|
@ -153,11 +169,6 @@ func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Opt
|
|||
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
|
||||
kubeClientSet := kubernetes.NewForConfigOrDie(restConfig)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
<-stopChan
|
||||
cancel()
|
||||
}()
|
||||
outOfTreeRegistry := make(runtime.Registry)
|
||||
for _, option := range registryOptions {
|
||||
if err := option(outOfTreeRegistry); err != nil {
|
||||
|
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -26,11 +28,12 @@ import (
|
|||
|
||||
"github.com/karmada-io/karmada/cmd/scheduler/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
testingutil "github.com/karmada-io/karmada/pkg/util/testing"
|
||||
)
|
||||
|
||||
func TestNewSchedulerCommand(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
cmd := NewSchedulerCommand(stopCh)
|
||||
ctx := context.Background()
|
||||
cmd := NewSchedulerCommand(ctx)
|
||||
assert.NotNil(t, cmd)
|
||||
assert.Equal(t, names.KarmadaSchedulerComponentName, cmd.Use)
|
||||
assert.NotEmpty(t, cmd.Long)
|
||||
|
@ -51,8 +54,8 @@ func TestSchedulerCommandFlagParsing(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
cmd := NewSchedulerCommand(stopCh)
|
||||
ctx := context.Background()
|
||||
cmd := NewSchedulerCommand(ctx)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.ParseFlags(tc.args)
|
||||
if tc.expectError {
|
||||
|
@ -65,8 +68,10 @@ func TestSchedulerCommandFlagParsing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServeHealthzAndMetrics(t *testing.T) {
|
||||
healthAddress := "127.0.0.1:8082"
|
||||
metricsAddress := "127.0.0.1:8083"
|
||||
ports, err := testingutil.GetFreePorts("127.0.0.1", 2)
|
||||
require.NoError(t, err)
|
||||
healthAddress := fmt.Sprintf("127.0.0.1:%d", ports[0])
|
||||
metricsAddress := fmt.Sprintf("127.0.0.1:%d", ports[1])
|
||||
|
||||
go serveHealthzAndMetrics(healthAddress, metricsAddress)
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
|
@ -27,8 +28,10 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
stopChan := controllerruntime.SetupSignalHandler().Done()
|
||||
command := app.NewSchedulerCommand(stopChan)
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
command := app.NewSchedulerCommand(ctx)
|
||||
code := cli.Run(command)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
// CorednsOptions contains options for coredns detector.
|
||||
type CorednsOptions struct {
|
||||
PeriodSeconds time.Duration
|
||||
Period time.Duration
|
||||
SuccessThreshold time.Duration
|
||||
FailureThreshold time.Duration
|
||||
StaleThreshold time.Duration
|
||||
|
@ -38,7 +38,7 @@ func NewCorednsOptions() *CorednsOptions {
|
|||
|
||||
// AddFlags adds flags of coredns detector to the specified FlagSet.
|
||||
func (o *CorednsOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.PeriodSeconds, "coredns-detect-period", 5*time.Second,
|
||||
fs.DurationVar(&o.Period, "coredns-detect-period", 5*time.Second,
|
||||
"Specifies how often detector detects coredns health status.")
|
||||
fs.DurationVar(&o.SuccessThreshold, "coredns-success-threshold", 30*time.Second,
|
||||
"The duration of successes for the coredns to be considered healthy after recovery.")
|
||||
|
|
|
@ -159,7 +159,7 @@ func createDetectorContext(opts *options.Options) (detectorContext, error) {
|
|||
clusterName: opts.Generic.ClusterName,
|
||||
detectors: opts.Generic.Detectors,
|
||||
corednsConfig: &coredns.Config{
|
||||
PeriodSeconds: opts.Coredns.PeriodSeconds,
|
||||
Period: opts.Coredns.Period,
|
||||
SuccessThreshold: opts.Coredns.SuccessThreshold,
|
||||
FailureThreshold: opts.Coredns.FailureThreshold,
|
||||
StaleThreshold: opts.Coredns.StaleThreshold,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue