Compare commits
780 Commits
Author | SHA1 | Date |
---|---|---|
|
f997f3b236 | |
|
56ca7ae4e9 | |
|
0293499bbb | |
|
a7c36d109e | |
|
dc90876712 | |
|
65a29d4094 | |
|
b030687997 | |
|
d410a0be5e | |
|
7d9d366798 | |
|
357aa96976 | |
|
d4402a750f | |
|
82c383f3f6 | |
|
d8593e9b80 | |
|
0c75b330a9 | |
|
b9a85d4e35 | |
|
82519160c4 | |
|
1258855dd8 | |
|
2746a92030 | |
|
bcbf425b28 | |
|
caa18b1146 | |
|
0a4e1d3bbf | |
|
4b3997f850 | |
|
e95003cefc | |
|
8e2d872e8f | |
|
ad9cae7daf | |
|
0b929ac04d | |
|
232e731240 | |
|
7b3798aeec | |
|
925ffe9371 | |
|
e7ecc935cb | |
|
3b4768350e | |
|
aa0afafdd3 | |
|
0876615df2 | |
|
ff7e09785c | |
|
7e9dd8d372 | |
|
5c64613d0c | |
|
c08ce424aa | |
|
1b8626397f | |
|
78ffbd5280 | |
|
7148248453 | |
|
f5a4ba9186 | |
|
6013c0b6e9 | |
|
ea357c2ea2 | |
|
9889226422 | |
|
981f489dfe | |
|
33ebc2d45b | |
|
5c2fbc293c | |
|
997a0a1f1a | |
|
5469b5e5ef | |
|
27957d4fdb | |
|
2841f04cbd | |
|
d2f22e8a43 | |
|
7b5ac69dec | |
|
17caa1bc7d | |
|
3a294dd3bf | |
|
69bf770da5 | |
|
00d5f45335 | |
|
bdf17e6909 | |
|
708f3e453a | |
|
017379f7e9 | |
|
2e121f16cd | |
|
1c6f1db6df | |
|
40bf0ab3a1 | |
|
f3775f9dff | |
|
5e3e25aa3c | |
|
601081ebf2 | |
|
440eada373 | |
|
9edc1b4940 | |
|
201f2d9175 | |
|
f5bd00b28c | |
|
8b1404681c | |
|
5afc4ecd42 | |
|
2cea078eba | |
|
8fafe59f7e | |
|
4418fb007f | |
|
f85bf8fc89 | |
|
8bf67146c1 | |
|
980ee22886 | |
|
567594441d | |
|
eb2cf6aaab | |
|
632754df63 | |
|
8a477540bb | |
|
b1ea332546 | |
|
952d0f0e4e | |
|
bef7fce5cb | |
|
74aeab08b8 | |
|
6efd86bb31 | |
|
dfdd9a5207 | |
|
e335bc242c | |
|
8f9f9ef722 | |
|
d0f7464246 | |
|
6bcb4774c5 | |
|
e81e311dcd | |
|
0243055f7f | |
|
de58287f3c | |
|
55678eaed4 | |
|
6f93ef0e5a | |
|
414cb996b3 | |
|
7eafdfd269 | |
|
99b7902307 | |
|
5247671fa1 | |
|
a93e9fe1e7 | |
|
94a8a8eef4 | |
|
8bcd2c3473 | |
|
9ba4d2f5ac | |
|
5380f6159e | |
|
9b6adfd2d8 | |
|
cee1d9bb98 | |
|
108073c6bc | |
|
99583d7273 | |
|
c35162b834 | |
|
5718bbd95e | |
|
696e2a9a75 | |
|
64fbe7560d | |
|
28bee90e5e | |
|
bb4ec94ca9 | |
|
3ff577c1a1 | |
|
bd7d74cd1e | |
|
88b8fdb13f | |
|
b7b1d9ea39 | |
|
3bd480e8ba | |
|
f5ee2ce432 | |
|
140f745e03 | |
|
4f8892d55d | |
|
b1138ed602 | |
|
b8a9249f26 | |
|
cfa08e4ca0 | |
|
f80c61373d | |
|
b8f6874c58 | |
|
46495a227c | |
|
295c69f76a | |
|
7c30e24097 | |
|
de65bc7097 | |
|
f67dcc954e | |
|
4bf059ffec | |
|
2d74aee39d | |
|
cb0acc0d61 | |
|
b30c170c96 | |
|
e160bfcf1d | |
|
a2c2057761 | |
|
a2980eb1b6 | |
|
db76ef01e2 | |
|
a4795803d1 | |
|
fd8241598c | |
|
c3e5b1dc7b | |
|
b0560d63dd | |
|
47b5bdcafd | |
|
5f4bd5e765 | |
|
ee054544c4 | |
|
3024d3321e | |
|
a1290871ea | |
|
bcb3b08376 | |
|
f4f63c8d25 | |
|
be98c622e0 | |
|
84359efa64 | |
|
7bf8413888 | |
|
95802b0204 | |
|
96f4744eb2 | |
|
1aeed5a32e | |
|
592fa3224d | |
|
ff1b8fd429 | |
|
001c107025 | |
|
9824e3d8f8 | |
|
0b5fe5ec82 | |
|
292328b23f | |
|
3c6394a391 | |
|
f53dffef80 | |
|
ddfd650db4 | |
|
538aff6c9e | |
|
f281d69594 | |
|
e2330eba0f | |
|
027741bd38 | |
|
868be81689 | |
|
689680162e | |
|
f4919abce5 | |
|
7f988b2dd0 | |
|
cf45b57569 | |
|
d2694e513c | |
|
c60a2e05f6 | |
|
74df2b4e4d | |
|
0e8e6f0e55 | |
|
aee6e859b5 | |
|
e2d70a7534 | |
|
55aead1053 | |
|
b8e9c849de | |
|
42a772f700 | |
|
9966a3fdb7 | |
|
59c2837c1b | |
|
d12ea82bc4 | |
|
664c7f6c09 | |
|
fe06ebc5a0 | |
|
efca312a57 | |
|
ef629a240f | |
|
d44ab1d1a8 | |
|
1b7bdf4d2b | |
|
bd51389f9a | |
|
0793caac7a | |
|
e3f0ec122b | |
|
80677b3f77 | |
|
3e4771b914 | |
|
f86aca8816 | |
|
66d49eb465 | |
|
b11f53008c | |
|
d74b6da145 | |
|
c9efbe26b5 | |
|
c693ff8865 | |
|
e15e496a58 | |
|
941ace03dc | |
|
6347158e3e | |
|
1c4af4bb58 | |
|
5bcd841869 | |
|
a8cafd0496 | |
|
3e97b8ecd8 | |
|
483fbf41d0 | |
|
956ed039df | |
|
b1c6095564 | |
|
591193b358 | |
|
450b642e19 | |
|
ea5bc281b9 | |
|
2ce5c464f8 | |
|
a56fb20287 | |
|
aa02d191bc | |
|
f69d55b61b | |
|
e37fe36cfc | |
|
00dcfc0b6a | |
|
c44235afc5 | |
|
ea9bcecb6a | |
|
d96c8863ff | |
|
3751a16058 | |
|
6093332de5 | |
|
0b75a70173 | |
|
7b9bf2fea1 | |
|
702de1da1e | |
|
97a3d51d83 | |
|
b20715eb4e | |
|
5b8e346eb0 | |
|
e16a8e6f53 | |
|
eb3fed6aeb | |
|
3ed8f15934 | |
|
2af742b2d3 | |
|
22adecf2ce | |
|
f5cbcbb4b2 | |
|
33d46e5919 | |
|
312fbd088b | |
|
a0178b7511 | |
|
80c4221546 | |
|
f804e7323b | |
|
69b123f032 | |
|
571801fc39 | |
|
b85f29890d | |
|
afc4b4fc22 | |
|
6eb2e71a58 | |
|
2f9eb82a15 | |
|
c7d47050d9 | |
|
b04b9a5000 | |
|
74f615ad75 | |
|
8f1e955154 | |
|
cf1a15c9ff | |
|
23848b3c0e | |
|
7b54c00133 | |
|
fe0975788a | |
|
52f445985a | |
|
0714abb806 | |
|
c92b0d8edd | |
|
cd1ae695f5 | |
|
bd5692f08a | |
|
ead9434383 | |
|
58a576332b | |
|
bc4348d4a0 | |
|
343ffdaaf8 | |
|
ff813d91f1 | |
|
ad438dc5b2 | |
|
721b4726af | |
|
c97336c458 | |
|
cc39b22fb9 | |
|
0dc8cb9a31 | |
|
994b068366 | |
|
96b33e3a42 | |
|
42dfade3fe | |
|
453e5a1505 | |
|
06e2cc6eaf | |
|
f969605411 | |
|
e1d9afaa11 | |
|
a12ebddea8 | |
|
5bf846b965 | |
|
8f954fb96d | |
|
34e17a61bd | |
|
fac95ad9ab | |
|
8b03a530d3 | |
|
4478ade8ca | |
|
472822e2b7 | |
|
bd71f13ba8 | |
|
678b794c8f | |
|
9baf285c00 | |
|
21ea6f6eec | |
|
12ad34ccfb | |
|
5964cc0ecb | |
|
307163dabc | |
|
d75b38af78 | |
|
c680adc609 | |
|
92954bd082 | |
|
251fb32845 | |
|
4eaed73480 | |
|
2496cf3c08 | |
|
9d68815af0 | |
|
cd6b9a4474 | |
|
a5ffc84e2a | |
|
e8ddbd7e77 | |
|
8ce34ba61f | |
|
4779e7bce9 | |
|
ca072092bb | |
|
eaf2973188 | |
|
793b22106d | |
|
d1e12dfb7b | |
|
2ac7306bfb | |
|
75cecbb87c | |
|
a004c9488b | |
|
4ae3e675ca | |
|
5e9501ad17 | |
|
595c5b4e1f | |
|
4d3554ba80 | |
|
1801dca105 | |
|
a9de52fe9a | |
|
ab9e018835 | |
|
86837297dc | |
|
3a72ee9d68 | |
|
c0430749e1 | |
|
850c4fb9ef | |
|
4492764fe2 | |
|
5730c74159 | |
|
cd4063e5f6 | |
|
cb26864e00 | |
|
713b6b7c2d | |
|
ade8678a04 | |
|
100b9b7add | |
|
94a9f8bdbb | |
|
afd0ac414e | |
|
a29335eb0c | |
|
0e25236c54 | |
|
12d6522e80 | |
|
a34c412b50 | |
|
fc2593b9aa | |
|
20c2a182f0 | |
|
e67ae000e1 | |
|
c5279a45b3 | |
|
e2f0e6e9f7 | |
|
7ad3d481c9 | |
|
f6eff100ef | |
|
d4cf986b48 | |
|
9dde2b04dd | |
|
96d4577581 | |
|
fa7e6adbd3 | |
|
2a8fe27add | |
|
c08663deb2 | |
|
fc44746412 | |
|
f42dcb79ec | |
|
c5365b2468 | |
|
932ceb3ac9 | |
|
3588f6b3ee | |
|
13bbcaa9b7 | |
|
f41e152f55 | |
|
958e20dd6b | |
|
cad6ad5dfa | |
|
4cf881aa3e | |
|
669dac4a58 | |
|
78aa46a36e | |
|
d14e17c68b | |
|
c5b2d3c65a | |
|
cdccd62c32 | |
|
6f66e8bae7 | |
|
2b7ad8e673 | |
|
8d1041bbce | |
|
486e3add02 | |
|
94a9764ddc | |
|
e89a092a68 | |
|
8cbefd8c87 | |
|
7993912668 | |
|
def5021e9d | |
|
b6e0b45ca1 | |
|
73313d72e5 | |
|
5b7110468d | |
|
d2959507b4 | |
|
d8f8dff520 | |
|
1656b3b818 | |
|
1381a58126 | |
|
70314fe38b | |
|
6a8b90bb69 | |
|
f3e95de6f9 | |
|
6257c985c3 | |
|
4e651a0a58 | |
|
7835614378 | |
|
71cedc7f5f | |
|
c7deed92c2 | |
|
0d35e6d00e | |
|
b45612808e | |
|
c31c8e4a58 | |
|
4cbca9ca99 | |
|
98b6bbb77a | |
|
36c75b73ed | |
|
c160708fa1 | |
|
e19e32428e | |
|
2f6ff56c41 | |
|
9a8cbfd4ff | |
|
1467ceaa21 | |
|
37f2af6459 | |
|
5d48d3c89b | |
|
e8ee7db2df | |
|
d1bf5a0ac9 | |
|
87ee756ae7 | |
|
d58cbe1ea4 | |
|
b3f93ecfa9 | |
|
d1d6a993fe | |
|
18c48313ee | |
|
5c3b177d6e | |
|
8e09bf628e | |
|
707360fd34 | |
|
273e95998d | |
|
a428a17109 | |
|
6aa4f151e8 | |
|
31e3e53129 | |
|
7e309cf535 | |
|
1417bef111 | |
|
48bab1473c | |
|
45cf955fd0 | |
|
25138677a3 | |
|
6d5cf4326d | |
|
4984d59858 | |
|
a5cd153fa0 | |
|
2d8213fefb | |
|
305b3b671e | |
|
a4aa49fd00 | |
|
8e18f30d6a | |
|
4eb2d273ab | |
|
6d83324b41 | |
|
6ac0f2cbed | |
|
d4614ed69c | |
|
596172923b | |
|
1d2515901b | |
|
83a4bb853a | |
|
c12b040d8d | |
|
5865ee6ce2 | |
|
73cce5999c | |
|
a05c7dc006 | |
|
8659599d97 | |
|
51aec83d12 | |
|
7fb59a8b62 | |
|
02598f609b | |
|
71e103fb4e | |
|
38da3abc9b | |
|
2100504161 | |
|
5ce16455bb | |
|
787fd3a64a | |
|
0064b9ac63 | |
|
74522cd394 | |
|
ba5ffbac4f | |
|
49a37a78de | |
|
dbe0d22a1e | |
|
31e7fdc0b5 | |
|
f4773821b7 | |
|
b8639a7f89 | |
|
eda3685732 | |
|
4eaf8ea6e9 | |
|
f543f4ce92 | |
|
e37b2dcf05 | |
|
8412c2069b | |
|
664d27fbcd | |
|
6c7a70f4e5 | |
|
4c7c15ae5b | |
|
85061f5e26 | |
|
9b7151020d | |
|
2667cef349 | |
|
5f258dd8b3 | |
|
bfd88d2507 | |
|
138537bdb2 | |
|
afe172d6a4 | |
|
d9318779fd | |
|
f43597eaa0 | |
|
1964f6dc78 | |
|
e07b0937ef | |
|
9ea0382293 | |
|
1e3795e3b8 | |
|
6f6762579b | |
|
65e7f74c99 | |
|
fcb0be73c1 | |
|
4137c49af4 | |
|
c0657e289e | |
|
38619e9ffe | |
|
a846783aaa | |
|
dfe1089e57 | |
|
c2f7618a23 | |
|
fc1bc17061 | |
|
ba782c85c2 | |
|
f2ff5d0abb | |
|
412952ec45 | |
|
15474e8b03 | |
|
cbfc085283 | |
|
6d0221bd26 | |
|
dc3a3305c7 | |
|
e44362a51c | |
|
0235dc67c9 | |
|
d231a87aed | |
|
85b745b38a | |
|
56b72f1a80 | |
|
8e4603af66 | |
|
6810e88643 | |
|
9a92463c8b | |
|
0713e4b9be | |
|
2a25f5b555 | |
|
276cb0d3d7 | |
|
2b109eb9df | |
|
c0525e8715 | |
|
e3244462d7 | |
|
b939368e08 | |
|
4b437e40f4 | |
|
bc305c6563 | |
|
3b6ac2eec0 | |
|
678362ba9a | |
|
ad76f52c61 | |
|
9db68191c1 | |
|
6dfe381395 | |
|
b192c9c0d3 | |
|
63ad83dfdd | |
|
2b88a74123 | |
|
03c10992fb | |
|
d6cc0c22db | |
|
178f193ec1 | |
|
2dab8e4013 | |
|
d3cf9b90c1 | |
|
2cc457e211 | |
|
47b469a7df | |
|
d226ce11f3 | |
|
abe724ad57 | |
|
8d43fe2365 | |
|
3d19825af9 | |
|
b9256e85f1 | |
|
5652025362 | |
|
6654fee4af | |
|
824bd8d3e0 | |
|
925d6c5732 | |
|
8ac876ae93 | |
|
27d6436edf | |
|
a6e4a4de24 | |
|
a80be33e4a | |
|
83fbe65bb3 | |
|
696e52ef7e | |
|
f121f39e4c | |
|
3b6c0e0fa2 | |
|
ecc12b4ce8 | |
|
e0a414eb13 | |
|
0c8b8f5c70 | |
|
5a53162ac7 | |
|
8e397ceedb | |
|
962d56ff08 | |
|
2569e002af | |
|
83a38a8d05 | |
|
6c8811a446 | |
|
ce5be5efb4 | |
|
59208212f0 | |
|
ac221887b3 | |
|
77042e4622 | |
|
7b29afbc4d | |
|
01e86ee89a | |
|
5d2fe25612 | |
|
4e592c1bee | |
|
00f936b565 | |
|
df7f007f71 | |
|
6199203cd7 | |
|
98aa22ce29 | |
|
006cf70132 | |
|
15800f237e | |
|
475356cbea | |
|
707e4422c2 | |
|
a26a79377f | |
|
41eb4b88ee | |
|
380065217c | |
|
a5ca5cc0e1 | |
|
cfebd4dcb6 | |
|
71359de8f2 | |
|
1120b10b0a | |
|
35fc67faf9 | |
|
78e856ff23 | |
|
729a8d6822 | |
|
ffefe654ae | |
|
b763c681d8 | |
|
c1661cd33a | |
|
a8ea49c4b9 | |
|
9f8d71e892 | |
|
bc6b04236a | |
|
4bf165a639 | |
|
6b2419ee5f | |
|
f0e5bce086 | |
|
223fb461b0 | |
|
69f75dd745 | |
|
f555413215 | |
|
59a6afd194 | |
|
61c452d773 | |
|
f6e035fbbd | |
|
8b4333675c | |
|
b354b9d2b2 | |
|
c062c26abf | |
|
83f4293441 | |
|
33c8b7b6b0 | |
|
e23fa58d0b | |
|
aa46e671bb | |
|
80d3d9e007 | |
|
e75306d143 | |
|
58887bc078 | |
|
3915537241 | |
|
a6cc6ed352 | |
|
69abca81ca | |
|
c5d640c1d3 | |
|
06ab130ae8 | |
|
622a5ef1e5 | |
|
3d25407e3f | |
|
696daab07e | |
|
22eb9a0d5f | |
|
fcd7042d25 | |
|
091d1df9cc | |
|
8949f3351d | |
|
557602e180 | |
|
7cea9dd66f | |
|
33a9490f1d | |
|
ceeeec223e | |
|
2be704299d | |
|
f4e19d8dc0 | |
|
ddb3572e9f | |
|
4982157d65 | |
|
fffd206673 | |
|
216d251d51 | |
|
5db980daa5 | |
|
f751f9fe0d | |
|
1e3d8c05f0 | |
|
d80b7d4864 | |
|
f2259dc884 | |
|
f14e0f920f | |
|
ae2d1c10b7 | |
|
ef223535ed | |
|
9e8e301957 | |
|
dc3df3b3b2 | |
|
96c29149f5 | |
|
162cda4fd7 | |
|
5599b22bdb | |
|
0d9dd1467d | |
|
72da42d27b | |
|
d24b2b9d08 | |
|
954ee76365 | |
|
af610cc58e | |
|
3e3e8fed9d | |
|
2ca1733e33 | |
|
07747ed3f7 | |
|
e3628afc07 | |
|
a115a22f47 | |
|
93c09c1c85 | |
|
820fd06409 | |
|
995c73c4ba | |
|
f0bea0330c | |
|
5bfdf615c7 | |
|
cf46939019 | |
|
4f869218cf | |
|
3e9ef29290 | |
|
9a30bc9ec6 | |
|
ce41488988 | |
|
9641207379 | |
|
2fb9c207a5 | |
|
2df42bd74e | |
|
272447eebc | |
|
446dbe9086 | |
|
36a6a8f06c | |
|
b743a3b921 | |
|
b10c102e00 | |
|
43f29532f9 | |
|
956f008785 | |
|
ce1ca9912f | |
|
8313813f76 | |
|
8f53065887 | |
|
253dc794b1 | |
|
807153fdff | |
|
be674c7382 | |
|
6cba9fe74b | |
|
9f8da2c8a8 | |
|
760852766f | |
|
56194cb968 | |
|
2f180f26cd | |
|
0d938d0270 | |
|
c9ef503c51 | |
|
72b6bd7ddc | |
|
cac020e238 | |
|
c7009b2d64 | |
|
d14e0b5d85 | |
|
a891673c0e | |
|
b3b1aa64fb | |
|
7a85687026 | |
|
2fdd66a423 | |
|
eb9f2950a7 | |
|
ba1e68d22b | |
|
8c6d0a4ee7 | |
|
492e24dd8c | |
|
cf5def1e3a | |
|
5b7df5b231 | |
|
3f305f5c31 | |
|
337c27bb18 | |
|
36051f61de | |
|
eb2a4bd051 | |
|
8fa7c2cc7d | |
|
e93da666ae | |
|
e8a6ab3a56 | |
|
5a7f917bce | |
|
5bd3d0128a | |
|
7112723f3d | |
|
463154b3b8 | |
|
c9ca6aca6a | |
|
6e3316fb5c | |
|
93ffe35d06 | |
|
623a4f916c | |
|
262fcb328a | |
|
8d89d77110 | |
|
8659df84d7 | |
|
5fbcf9b893 | |
|
db55362a3f | |
|
ee146fae37 | |
|
cf7ac419e9 | |
|
82fc37ea04 | |
|
471d850a19 | |
|
44b59a7d5b | |
|
a6df137157 | |
|
6ae8cfd3e9 | |
|
627b464489 | |
|
8457cd2f64 | |
|
35c4adf0b2 | |
|
ff7dc4af44 | |
|
96a3a1a58a | |
|
435729a9e4 | |
|
f9f45b4ba4 | |
|
2cb2ed93b9 | |
|
36cbd011d0 | |
|
2564013b3d | |
|
2e36b85b1e | |
|
f0e060d73e | |
|
8f506d410c | |
|
a8a01a30c2 | |
|
2d6f998622 | |
|
acce8fd5d8 | |
|
3198dfa9c1 | |
|
e4c7befbb9 | |
|
ccea4d22f5 | |
|
07157e61a8 | |
|
55707d8a4f | |
|
f72a312d14 | |
|
16e7a0e7b3 | |
|
6144ea5a9b | |
|
a655c45f6f | |
|
1cbfca0454 | |
|
c5d4aa6a66 | |
|
fc28676516 | |
|
9cf464607f | |
|
4e5237449b | |
|
616a95e0d7 | |
|
1202cc7b80 | |
|
ce69be8874 | |
|
ba2a0732fe | |
|
b3dad9bdc5 | |
|
eea14cb353 | |
|
6af9994ca7 | |
|
7a9e4a18fa | |
|
3323935575 | |
|
47528b417e | |
|
064ccf1140 | |
|
189acaf5a9 | |
|
c25263c19b | |
|
93a68eb11a | |
|
addc75ee6c | |
|
d699f1594d | |
|
5618eb1dde | |
|
8d0e9a7738 | |
|
bdb9fb8f46 | |
|
310e4f989b | |
|
efff5bca9d | |
|
4ab6bff3b0 | |
|
da6266028e | |
|
c026200b23 |
|
@ -0,0 +1,36 @@
|
|||
# Config for the Gemini Pull Request Review Bot.
|
||||
# https://github.com/marketplace/gemini-code-assist
|
||||
|
||||
# Enables fun features such as a poem in the initial pull request summary.
|
||||
# Type: boolean, default: false.
|
||||
have_fun: false
|
||||
|
||||
code_review:
|
||||
# Disables Gemini from acting on PRs.
|
||||
# Type: boolean, default: false.
|
||||
disable: false
|
||||
|
||||
# Minimum severity of comments to post (LOW, MEDIUM, HIGH, CRITICAL).
|
||||
# Type: string, default: MEDIUM.
|
||||
comment_severity_threshold: MEDIUM
|
||||
|
||||
# Max number of review comments (-1 for unlimited).
|
||||
# Type: integer, default: -1.
|
||||
max_review_comments: -1
|
||||
|
||||
pull_request_opened:
|
||||
# Post helpful instructions when PR is opened.
|
||||
# Type: boolean, default: false.
|
||||
help: true
|
||||
|
||||
# Post PR summary when opened.
|
||||
# Type boolean, default: true.
|
||||
summary: true
|
||||
|
||||
# Post code review on PR open.
|
||||
# Type boolean, default: true.
|
||||
code_review: true
|
||||
|
||||
# List of glob patterns to ignore (files and directories).
|
||||
# Type: array of string, default: [].
|
||||
ignore_patterns: []
|
|
@ -2,30 +2,48 @@
|
|||
|
||||
<!--
|
||||
Add one of the following kinds:
|
||||
|
||||
/kind api-change
|
||||
/kind bug
|
||||
/kind cleanup
|
||||
/kind deprecation
|
||||
/kind design
|
||||
/kind documentation
|
||||
/kind failing-test
|
||||
/kind feature
|
||||
/kind documentation
|
||||
/kind cleanup
|
||||
|
||||
Optionally add one or more of the following kinds if applicable:
|
||||
/kind api-change
|
||||
/kind deprecation
|
||||
/kind failing-test
|
||||
/kind flake
|
||||
/kind regression
|
||||
|
||||
-->
|
||||
|
||||
**What this PR does / why we need it**:
|
||||
|
||||
**Which issue(s) this PR fixes**:
|
||||
<!--
|
||||
*Automatically closes linked issue when PR is merged.
|
||||
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.*
|
||||
-->
|
||||
Fixes #
|
||||
|
||||
<!--
|
||||
*Optionally link to the umbrella issue if this PR resolves part of it.
|
||||
Usage: `Part of #<issue number>`, or `Part of (paste link of issue)`.*
|
||||
Part of #
|
||||
-->
|
||||
|
||||
**Special notes for your reviewer**:
|
||||
<!--
|
||||
Such as a test report of this PR.
|
||||
-->
|
||||
|
||||
**Does this PR introduce a user-facing change?**:
|
||||
<!--
|
||||
If no, just write "NONE" in the release-note block below.
|
||||
If yes, a release note is required.
|
||||
Some brief examples of release notes:
|
||||
1. `karmada-controller-manager`: Fixed the issue that xxx
|
||||
2. `karmada-scheduler`: The deprecated flag `--xxx` now has been removed. Users of this flag should xxx.
|
||||
3. `API Change`: Introduced `spec.<field>` to the PropagationPolicy API for xxx.
|
||||
-->
|
||||
```release-note
|
||||
|
||||
|
|
|
@ -19,18 +19,18 @@ updates:
|
|||
|
||||
- package-ecosystem: docker
|
||||
directory: /cluster/images/
|
||||
target-branch: "release-1.11"
|
||||
target-branch: "release-1.15"
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: /cluster/images/
|
||||
target-branch: "release-1.10"
|
||||
target-branch: "release-1.14"
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: /cluster/images/
|
||||
target-branch: "release-1.9"
|
||||
target-branch: "release-1.13"
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
|
|
@ -27,10 +27,10 @@ jobs:
|
|||
- karmada-search
|
||||
- karmada-operator
|
||||
- karmada-metrics-adapter
|
||||
karmada-version: [ release-1.11, release-1.10, release-1.9 ]
|
||||
karmada-version: [ release-1.15, release-1.14, release-1.13 ]
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ matrix.karmada-version }}
|
||||
- name: install Go
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
export REGISTRY="docker.io/karmada"
|
||||
make image-${{ matrix.target }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
|
||||
|
@ -56,15 +56,17 @@ jobs:
|
|||
format: 'sarif'
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
cache: false
|
||||
output: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif'
|
||||
- name: display scan results
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
|
||||
with:
|
||||
image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}'
|
||||
format: 'table'
|
||||
ignore-unfixed: true
|
||||
cache: false
|
||||
vuln-type: 'os,library'
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
|
|
|
@ -31,7 +31,11 @@ jobs:
|
|||
- karmada-metrics-adapter
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -42,7 +46,7 @@ jobs:
|
|||
export REGISTRY="docker.io/karmada"
|
||||
make image-${{ matrix.target }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db
|
||||
|
@ -52,8 +56,9 @@ jobs:
|
|||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
output: 'trivy-results.sarif'
|
||||
cache: false
|
||||
- name: display scan results
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
env:
|
||||
TRIVY_SKIP_DB_UPDATE: true # Avoid updating the vulnerability db as it was cached in the previous step.
|
||||
with:
|
||||
|
@ -61,6 +66,7 @@ jobs:
|
|||
format: 'table'
|
||||
ignore-unfixed: true
|
||||
vuln-type: 'os,library'
|
||||
cache: false
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
|
|
|
@ -19,8 +19,8 @@ jobs:
|
|||
max-parallel: 5
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubeapiserver-version: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ]
|
||||
karmada-version: [ master, release-1.11, release-1.10, release-1.9 ]
|
||||
kubeapiserver-version: [ v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0, v1.32.0, v1.33.0 ]
|
||||
karmada-version: [ master, release-1.15, release-1.14, release-1.13 ]
|
||||
env:
|
||||
KARMADA_APISERVER_VERSION: ${{ matrix.kubeapiserver-version }}
|
||||
steps:
|
||||
|
@ -38,7 +38,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -49,7 +49,7 @@ jobs:
|
|||
with:
|
||||
go-version-file: go.mod
|
||||
- name: setup e2e test environment
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
uses: nick-fields/retry@v3.0.2
|
||||
with:
|
||||
max_attempts: 3
|
||||
timeout_minutes: 20
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
max-parallel: 5
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -35,7 +35,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
with:
|
||||
go-version-file: go.mod
|
||||
- name: setup e2e test environment
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
uses: nick-fields/retry@v3.0.2
|
||||
with:
|
||||
max_attempts: 3
|
||||
timeout_minutes: 20
|
||||
|
|
|
@ -20,7 +20,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -38,7 +38,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -66,7 +66,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -85,7 +85,7 @@ jobs:
|
|||
GOTESTSUM_ENABLED: enabled
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -116,7 +116,7 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -132,7 +132,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -144,6 +144,7 @@ jobs:
|
|||
- name: setup e2e test environment
|
||||
run: |
|
||||
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
|
||||
export KUBE_CACHE_MUTATION_DETECTOR=true
|
||||
hack/local-up-karmada.sh
|
||||
- name: run e2e
|
||||
run: |
|
||||
|
@ -161,3 +162,59 @@ jobs:
|
|||
with:
|
||||
name: karmada_kind_log_${{ matrix.k8s }}
|
||||
path: /tmp/karmada/
|
||||
|
||||
e2e-operator:
|
||||
name: operator e2e test
|
||||
needs: build
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed, if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
# all of these default to true, but feel free to set to "false" if necessary for your workflow
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: setup operator e2e test environment
|
||||
run: |
|
||||
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
|
||||
hack/operator-e2e-environment.sh
|
||||
- name: run e2e
|
||||
run: |
|
||||
export ARTIFACTS_PATH=${{ github.workspace }}/karmada-operator-e2e-logs/${{ matrix.k8s }}/
|
||||
hack/run-e2e-operator.sh
|
||||
- name: upload logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: karmada_operator_e2e_log_${{ matrix.k8s }}
|
||||
path: ${{ github.workspace }}/karmada-operator-e2e-logs/${{ matrix.k8s }}/
|
||||
- name: upload kind logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: karmada_operator_kind_log_${{ matrix.k8s }}
|
||||
path: /tmp/karmada/
|
||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
|
|
@ -31,7 +31,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.7.0
|
||||
uses: sigstore/cosign-installer@v3.9.2
|
||||
with:
|
||||
cosign-release: 'v2.2.3'
|
||||
- name: install QEMU
|
||||
|
|
|
@ -9,9 +9,13 @@ jobs:
|
|||
publish-chart-to-dockerhub:
|
||||
name: publish to DockerHub
|
||||
runs-on: ubuntu-22.04
|
||||
# prevent job running from forked repository, otherwise
|
||||
# 1. running on the forked repository would fail as missing necessary secret.
|
||||
# 2. running on the forked repository would use unnecessary GitHub Action time.
|
||||
if: ${{ github.repository == 'karmada-io/karmada' }}
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
|
|
@ -8,6 +8,10 @@ permissions:
|
|||
jobs:
|
||||
publish-image-to-dockerhub:
|
||||
name: publish to DockerHub
|
||||
# prevent job running from forked repository, otherwise
|
||||
# 1. running on the forked repository would fail as missing necessary secret.
|
||||
# 2. running on the forked repository would use unnecessary GitHub Action time.
|
||||
if: ${{ github.repository == 'karmada-io/karmada' }}
|
||||
permissions:
|
||||
id-token: write # To be able to get OIDC ID token to sign images.
|
||||
strategy:
|
||||
|
@ -27,7 +31,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
@ -38,7 +42,7 @@ jobs:
|
|||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.7.0
|
||||
uses: sigstore/cosign-installer@v3.9.2
|
||||
with:
|
||||
cosign-release: 'v2.2.3'
|
||||
- name: install QEMU
|
||||
|
|
|
@ -7,7 +7,7 @@ on:
|
|||
- 'dependabot/**'
|
||||
|
||||
permissions:
|
||||
contents: read # Required by actions/checkout@v4 to fetch the repository contents.
|
||||
contents: read # Required by actions/checkout@v5 to fetch the repository contents.
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
|
@ -19,7 +19,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossas/fossa-action@v1
|
||||
with:
|
||||
|
|
|
@ -1,13 +1,8 @@
|
|||
# validate any chart changes under charts directory
|
||||
name: Chart Lint
|
||||
|
||||
name: Chart
|
||||
env:
|
||||
HELM_VERSION: v3.11.2
|
||||
KUSTOMIZE_VERSION: 5.4.3
|
||||
KIND_VERSION: v0.22.0
|
||||
KIND_NODE_IMAGE: kindest/node:v1.29.0
|
||||
K8S_VERSION: v1.29.0
|
||||
|
||||
HELM_VERSION: v3.17.3
|
||||
KUSTOMIZE_VERSION: 5.6.0
|
||||
on:
|
||||
push:
|
||||
# Exclude branches created by Dependabot to avoid triggering current workflow
|
||||
|
@ -15,18 +10,26 @@ on:
|
|||
branches-ignore:
|
||||
- 'dependabot/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- "charts/**"
|
||||
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.actor }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
chart-lint-test:
|
||||
test-on-kubernetes-matrix:
|
||||
name: Test on Kubernetes
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -44,6 +47,8 @@ jobs:
|
|||
uses: syntaqx/setup-kustomize@v1
|
||||
with:
|
||||
kustomize-version: ${{ env.KUSTOMIZE_VERSION }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Run chart-testing (template)
|
||||
run: |
|
||||
|
@ -81,7 +86,7 @@ jobs:
|
|||
check-latest: true
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
|
@ -90,24 +95,20 @@ jobs:
|
|||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$( ct list-changed )
|
||||
changed=$( ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --debug --check-version-increment=false
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
wait: 120s
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
node_image: ${{ env.KIND_NODE_IMAGE }}
|
||||
kubectl_version: ${{ env.K8S_VERSION }}
|
||||
run: ct lint --debug --check-version-increment=false --target-branch ${{ github.event.repository.default_branch }}
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --debug --helm-extra-args "--timeout 800s"
|
||||
run: |
|
||||
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
|
||||
hack/setup-dev-base.sh
|
||||
export KUBECONFIG=~/.kube/karmada.config
|
||||
|
||||
ct install --target-branch ${{ github.event.repository.default_branch }} --charts charts/karmada --debug --helm-extra-set-args '--set components={search,metricsAdapter,descheduler},apiServer.hostNetwork=true' --helm-extra-args "--timeout 800s" --skip-clean-up
|
||||
kubectl get pods -A
|
|
@ -15,8 +15,8 @@ concurrency:
|
|||
permissions:
|
||||
contents: read # Required to check out the code
|
||||
jobs:
|
||||
init:
|
||||
name: init
|
||||
test-on-kubernetes-matrix:
|
||||
name: Test on Kubernetes
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -24,10 +24,10 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -44,9 +44,10 @@ jobs:
|
|||
hack/cli-testing-environment.sh
|
||||
|
||||
# run a single e2e
|
||||
export KUBECONFIG=${HOME}/karmada/karmada-apiserver.config
|
||||
export PULL_BASED_CLUSTERS="member1:${HOME}/.kube/member1.config"
|
||||
export KUBECONFIG=${HOME}/.kube/karmada-host.config:${HOME}/karmada/karmada-apiserver.config
|
||||
GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/
|
||||
ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/suites/base
|
||||
- name: export logs
|
||||
if: always()
|
||||
run: |
|
||||
|
@ -69,10 +70,10 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
# Latest three minor releases of Kubernetes
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
|
@ -87,9 +88,10 @@ jobs:
|
|||
hack/cli-testing-init-with-config.sh
|
||||
|
||||
# run a single e2e
|
||||
export KUBECONFIG=${HOME}/karmada/karmada-apiserver.config
|
||||
export PULL_BASED_CLUSTERS="config-member1:${HOME}/.kube/config-member1.config"
|
||||
export KUBECONFIG=${HOME}/.kube/karmada-host.config:${HOME}/karmada/karmada-apiserver.config
|
||||
GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/
|
||||
ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/suites/base
|
||||
- name: export logs for config test
|
||||
if: always()
|
||||
run: |
|
|
@ -24,7 +24,7 @@ jobs:
|
|||
# Here support the latest three minor releases of Kubernetes, this can be considered to be roughly
|
||||
# the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/
|
||||
# Please remember to update the CI Schedule Workflow when we add a new version.
|
||||
k8s: [ v1.29.0, v1.30.0, v1.31.0 ]
|
||||
k8s: [ v1.31.0, v1.32.0, v1.33.0 ]
|
||||
steps:
|
||||
# Free up disk space on Ubuntu
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
docker-images: false
|
||||
swap-storage: false
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Number of commits to fetch. 0 indicates all history for all branches and tags.
|
||||
# We need to guess version via git tags.
|
||||
|
@ -59,7 +59,7 @@ jobs:
|
|||
export KUBECONFIG=${HOME}/.kube/karmada.config
|
||||
kubectl config use-context karmada-apiserver
|
||||
GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/
|
||||
ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/suites/base
|
||||
- name: export logs
|
||||
if: always()
|
||||
run: |
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
- amd64
|
||||
- arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
|
@ -52,7 +52,7 @@ jobs:
|
|||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
steps:
|
||||
- name: download cli
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: _output/release
|
||||
pattern: cli-*
|
||||
|
@ -71,7 +71,7 @@ jobs:
|
|||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-subject-for-cli-provenance.outputs.hashes }}"
|
||||
provenance-name: "karmada-cli.intoto.jsonl"
|
||||
|
@ -84,7 +84,7 @@ jobs:
|
|||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Rename the crds directory
|
||||
run: |
|
||||
mv ./charts/karmada/_crds ./charts/karmada/crds
|
||||
|
@ -113,7 +113,7 @@ jobs:
|
|||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.release-crds-assests.outputs.hashes }}"
|
||||
provenance-name: "karmada-crds.intoto.jsonl"
|
||||
|
@ -126,7 +126,7 @@ jobs:
|
|||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Making helm charts
|
||||
env:
|
||||
VERSION: ${{ github.ref_name }}
|
||||
|
@ -152,7 +152,7 @@ jobs:
|
|||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.release-charts.outputs.hashes }}"
|
||||
provenance-name: "karmada-charts.intoto.jsonl"
|
||||
|
@ -165,9 +165,9 @@ jobs:
|
|||
hashes: ${{ steps.sbom-hash.outputs.hashes}}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Generate sbom for karmada file system
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.32.0
|
||||
with:
|
||||
scan-type: 'fs'
|
||||
format: 'spdx'
|
||||
|
@ -195,7 +195,7 @@ jobs:
|
|||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.sbom-assests.outputs.hashes }}"
|
||||
provenance-name: "karmada-sbom.intoto.jsonl"
|
||||
|
@ -206,6 +206,10 @@ jobs:
|
|||
GH_TOKEN: ${{ github.token }}
|
||||
needs:
|
||||
- release-assests
|
||||
# prevent job running from forked repository, otherwise
|
||||
# 1. running on the forked repository would use unnecessary GitHub Action time.
|
||||
# 2. running on the forked repository would open a PR to publish an inaccurate version of karmada in repo kubernetes-sigs/krew-index.
|
||||
if: ${{ github.repository == 'karmada-io/karmada' }}
|
||||
name: Update krew-index
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
|
@ -216,7 +220,7 @@ jobs:
|
|||
echo "Got the latest tag:$LATEST_TAG"
|
||||
echo "event.tag:"${{ github.event.release.tag_name }}
|
||||
echo "latestTag=$LATEST_TAG" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name
|
||||
- name: Update new version in krew-index
|
||||
if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# fetch-depth:
|
||||
# 0 indicates all history for all branches and tags.
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: install Go
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.22.9
|
||||
1.24.6
|
||||
|
|
189
.golangci.yml
189
.golangci.yml
|
@ -1,10 +1,11 @@
|
|||
# This files contains all configuration options for analysis running.
|
||||
# More details please refer to: https://golangci-lint.run/usage/configuration/
|
||||
|
||||
version: "2"
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# timeout for analysis, e.g. 30s, 5m, default timeout is disabled
|
||||
timeout: 10m
|
||||
|
||||
|
||||
# One of 'readonly' and 'vendor'.
|
||||
# - readonly: the go command is disallowed from the implicit automatic updating of go.mod described above.
|
||||
# Instead, it fails when any changes to go.mod are needed. This setting is most useful to check
|
||||
|
@ -14,95 +15,95 @@ run:
|
|||
modules-download-mode: readonly
|
||||
linters:
|
||||
enable:
|
||||
# linters maintained by golang.org
|
||||
- gofmt
|
||||
- goimports
|
||||
- govet
|
||||
# linters default enabled by golangci-lint .
|
||||
- errcheck
|
||||
- gosimple
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
# other linters supported by golangci-lint.
|
||||
- gci
|
||||
- gocyclo
|
||||
- gosec
|
||||
- misspell
|
||||
- whitespace
|
||||
- revive
|
||||
- depguard
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: "io/ioutil"
|
||||
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||
goimports:
|
||||
local-prefixes: github.com/karmada-io/karmada
|
||||
gocyclo:
|
||||
# minimal cyclomatic complexity to report
|
||||
min-complexity: 15
|
||||
gci:
|
||||
sections:
|
||||
- Standard
|
||||
- Default
|
||||
- Prefix(github.com/karmada-io/karmada)
|
||||
revive:
|
||||
rules:
|
||||
# Disable if-return as it is too strict and not always useful.
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
|
||||
- name: if-return
|
||||
disabled: true
|
||||
- name: package-comments
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: error-strings
|
||||
- name: error-return
|
||||
- name: receiver-naming
|
||||
- name: increment-decrement
|
||||
- name: range
|
||||
- name: error-naming
|
||||
- name: dot-imports
|
||||
- name: errorf
|
||||
- name: exported
|
||||
- name: var-declaration
|
||||
- name: blank-imports
|
||||
- name: indent-error-flow
|
||||
- name: unreachable-code
|
||||
- name: var-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: unused-parameter
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: unexported-return
|
||||
- name: time-naming
|
||||
- name: empty-block
|
||||
|
||||
issues:
|
||||
# The list of ids of default excludes to include or disable. By default it's empty.
|
||||
include:
|
||||
# disable excluding of issues about comments from revive
|
||||
# see https://golangci-lint.run/usage/configuration/#command-line-options for more info
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
# Which dirs to exclude: issues from them won't be reported.
|
||||
# Can use regexp here: `generated.*`, regexp is applied on full path,
|
||||
# including the path prefix if one is set.
|
||||
# Default dirs are skipped independently of this option's value (see exclude-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
||||
# Default: []
|
||||
exclude-dirs:
|
||||
- hack/tools/preferredimports # This code is directly lifted from the Kubernetes codebase, skip checking
|
||||
- (^|/)vendor($|/)
|
||||
- (^|/)third_party($|/)
|
||||
- pkg/util/lifted # This code is lifted from other projects(Kubernetes, Kubefed, and so on), skip checking.
|
||||
# Enables exclude of directories:
|
||||
# - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
# Default: true
|
||||
exclude-dirs-use-default: false
|
||||
- depguard
|
||||
- gocyclo
|
||||
- gosec
|
||||
- misspell
|
||||
- revive
|
||||
- whitespace
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil'
|
||||
gocyclo:
|
||||
# minimal cyclomatic complexity to report
|
||||
min-complexity: 15
|
||||
revive:
|
||||
rules:
|
||||
# Disable if-return as it is too strict and not always useful.
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
|
||||
- name: if-return
|
||||
disabled: true
|
||||
# Disable package-comments for now since most packages in this project are primarily for internal use.
|
||||
# If we decide to provide public packages in the future, we can move them to a separate
|
||||
# repository and revisit adding package-level comments at that time.
|
||||
- name: package-comments
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: error-strings
|
||||
- name: error-return
|
||||
- name: receiver-naming
|
||||
- name: increment-decrement
|
||||
- name: range
|
||||
- name: error-naming
|
||||
- name: dot-imports
|
||||
- name: errorf
|
||||
- name: exported
|
||||
- name: var-declaration
|
||||
- name: blank-imports
|
||||
- name: indent-error-flow
|
||||
- name: unreachable-code
|
||||
- name: var-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: unused-parameter
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: unexported-return
|
||||
- name: time-naming
|
||||
- name: empty-block
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
# Disable QF1008 to retain embedded fields for better readability.
|
||||
- "-QF1008"
|
||||
# Disable ST1000 (staticcheck) for now since most packages in this project are primarily for internal use.
|
||||
# If we decide to provide public packages in the future, we can move them to a separate
|
||||
# repository and revisit adding package-level comments at that time.
|
||||
- "-ST1000"
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- hack/tools/preferredimports
|
||||
- (^|/)vendor($|/)
|
||||
- (^|/)third_party($|/)
|
||||
- pkg/util/lifted
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- Standard
|
||||
- Default
|
||||
- Prefix(github.com/karmada-io/karmada)
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/karmada-io/karmada
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- hack/tools/preferredimports
|
||||
- (^|/)vendor($|/)
|
||||
- (^|/)third_party($|/)
|
||||
- pkg/util/lifted
|
||||
|
|
|
@ -1,18 +1,3 @@
|
|||
# Karmada Maintainers
|
||||
|
||||
Official list of Karmada Maintainers.
|
||||
|
||||
Please keep the below list sorted in ascending order.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation | Email |
|
||||
|-------------|-------------------|---------------|--------------------------|
|
||||
| Hanbo Li | @mrlihanbo | ByteDance | <mrlihanbo@gmail.com> |
|
||||
| Hongcai Ren | @RainbowMango | Huawei | <renhongcai@huawei.com> |
|
||||
| Kevin Wang | @kevin-wangzefeng | Huawei | <wangzefeng@huawei.com> |
|
||||
| Lei Xue | @carmark | Moore Threads | <vfs@live.com> |
|
||||
| Shiyi Xie | @GitHubxsy | Huawei | <xieshiyi1@huawei.com> |
|
||||
| Xiao Zhang | @wawa0210 | DaoCloud | <xiao.zhang@daocloud.io> |
|
||||
| Yifan Shen | @zoroyouxi | ICBC | <shenyf@sdc.icbc.com.cn> |
|
||||
| Yiheng Ci | @lfbear | VIPKID | <ciyiheng@vipkid.com.cn> |
|
||||
See [MAINTAINERS in community repo](https://github.com/karmada-io/community/blob/main/MAINTAINERS.md)
|
||||
|
|
1
OWNERS
1
OWNERS
|
@ -11,4 +11,5 @@ approvers:
|
|||
- Garrybest
|
||||
- kevin-wangzefeng
|
||||
- RainbowMango
|
||||
- whitewindmills
|
||||
- XiShanYongYe-Chang
|
||||
|
|
21
README.md
21
README.md
|
@ -186,12 +186,17 @@ nginx 2/2 2 2 20s
|
|||
|
||||
## Kubernetes compatibility
|
||||
|
||||
| | Kubernetes 1.16 | Kubernetes 1.17 | Kubernetes 1.18 | Kubernetes 1.19 | Kubernetes 1.20 | Kubernetes 1.21 | Kubernetes 1.22 | Kubernetes 1.23 | Kubernetes 1.24 | Kubernetes 1.25 | Kubernetes 1.26 | Kubernetes 1.27 | Kubernetes 1.28 | Kubernetes 1.29 |
|
||||
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| Karmada v1.7 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.8 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.9 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada HEAD (master) | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
Karmada is compatible with a wide range of Kubernetes versions. For detailed compatibility instructions,
|
||||
please refer to the [Kubernetes Compatibility](https://karmada.io/docs/administrator/compatibility/).
|
||||
|
||||
The following table shows the compatibility test results against the latest 10 Kubernetes versions:
|
||||
|
||||
| | Kubernetes 1.33 | Kubernetes 1.32 | Kubernetes 1.31 | Kubernetes 1.30 | Kubernetes 1.29 | Kubernetes 1.28 | Kubernetes 1.27 | Kubernetes 1.26 | Kubernetes 1.25 | Kubernetes 1.24 | Kubernetes 1.23 |
|
||||
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| Karmada v1.12 | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.13 | | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada v1.14 | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Karmada HEAD (master) | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
|
||||
Key:
|
||||
* `✓` Karmada and the Kubernetes version are exactly compatible.
|
||||
|
@ -201,8 +206,8 @@ Key:
|
|||
## Meeting
|
||||
|
||||
Regular Community Meeting:
|
||||
* Tuesday at 14:30 UTC+8 (Chinese)(biweekly). [Convert to your timezone](https://www.thetimezoneconverter.com/?t=14%3A30&tz=GMT%2B8&).
|
||||
* Tuesday at 08:00 Pacific Time (English)(biweekly). [Convert to your timezone](https://www.thetimezoneconverter.com/?t=8:00&tz=PT%20%28Pacific%20Time%29).
|
||||
* Tuesday at 14:30 UTC+8 (Chinese)(biweekly). [Convert to your timezone](https://dateful.com/convert/utc8?t=1430).
|
||||
* Tuesday at 15:00 UTC+0 (English)(biweekly). [Convert to your timezone](https://dateful.com/convert/coordinated-universal-time-utc?t=15).
|
||||
|
||||
Resources:
|
||||
- [Meeting Notes and Agenda](https://docs.google.com/document/d/1y6YLVC-v7cmVAdbjedoyR5WL0-q45DBRXTvz5_I7bkA/edit)
|
||||
|
|
29
ROADMAP.md
29
ROADMAP.md
|
@ -1,29 +1,4 @@
|
|||
# Karmada Roadmap
|
||||
|
||||
This document defines a high level roadmap for Karmada development and upcoming releases.
|
||||
Community and contributor involvement is vital for successfully implementing all desired items for each release.
|
||||
We hope that the items listed below will inspire further engagement from the community to keep Karmada progressing and shipping exciting and valuable features.
|
||||
|
||||
## 2024 H1
|
||||
- Lazy mode of PropagationPolicy
|
||||
- Cluster Problem Detector(CPD) - Part one: Cluster condition-based remedy system
|
||||
- Scheduler Enhancement - enable scheduler estimator supports resource quota
|
||||
- Scheduler Enhancement - Provide a mechanism of re-balance workloads
|
||||
|
||||
## 2024 H2
|
||||
- AI training and batch job support (Including PyTorch, Spark, Flink and so on)
|
||||
- Karmada Dashboard - alpha release
|
||||
- Multi-cluster workflow
|
||||
- Scheduler Enhancement - Optimize scheduling with GPU resources
|
||||
|
||||
## Pending
|
||||
- Cluster addon management
|
||||
- Multi-cluster Application
|
||||
- Multi-cluster monitoring
|
||||
- Multi-cluster logging
|
||||
- Multi-cluster storage
|
||||
- Multi-cluster RBAC
|
||||
- Multi-cluster networking
|
||||
- Data migration across clusters
|
||||
- Image registry across clouds
|
||||
- Multi-cluster Service Mesh solutions
|
||||
This document has been moved to [karmada-io/community](https://github.com/karmada-io/community/blob/main/ROADMAP.md)
|
||||
to include all efforts for this repository and subprojects.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -15,6 +15,7 @@ spec:
|
|||
labels:
|
||||
app: karmada-agent
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: karmada-agent-sa
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
|
@ -23,6 +24,11 @@ spec:
|
|||
- name: karmada-agent
|
||||
image: docker.io/karmada/karmada-agent:latest
|
||||
imagePullPolicy: {{image_pull_policy}}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/karmada-agent
|
||||
- --karmada-kubeconfig=/etc/karmada/config/karmada.config
|
||||
|
@ -30,9 +36,10 @@ spec:
|
|||
- --cluster-name={{member_cluster_name}}
|
||||
- --cluster-api-endpoint={{member_cluster_api_endpoint}}
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=:8080
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --feature-gates=CustomizedClusterResourceModeling=true,MultiClusterService=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
|
|
@ -24,6 +24,14 @@ spec:
|
|||
- name: karmada-aggregated-apiserver
|
||||
image: docker.io/karmada/karmada-aggregated-apiserver:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/karmada-aggregated-apiserver
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
|
@ -34,11 +42,13 @@ spec:
|
|||
- --etcd-certfile=/etc/karmada/pki/etcd-client/tls.crt
|
||||
- --etcd-keyfile=/etc/karmada/pki/etcd-client/tls.key
|
||||
- --tls-cert-file=/etc/karmada/pki/server/tls.crt
|
||||
- --tls-private-key-file=/etc/karmada/pki//server/tls.key
|
||||
- --tls-private-key-file=/etc/karmada/pki/server/tls.key
|
||||
- --audit-log-path=-
|
||||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --logging-format=json
|
||||
- --bind-address=$(POD_IP)
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
|
@ -77,6 +87,10 @@ spec:
|
|||
- name: etcd-client-cert
|
||||
secret:
|
||||
secretName: karmada-aggregated-apiserver-etcd-client-cert
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -100,6 +100,9 @@ spec:
|
|||
- name: service-account-key-pair
|
||||
mountPath: /etc/karmada/pki/service-account-key-pair
|
||||
readOnly: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
volumes:
|
||||
- name: server-cert
|
||||
secret:
|
||||
|
@ -121,7 +124,9 @@ spec:
|
|||
priorityClassName: system-node-critical
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
|
|
|
@ -21,17 +21,28 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-controller-manager
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-controller-manager:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-controller-manager
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=:8080
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --failover-eviction-timeout=30s
|
||||
- --controllers=*,hpaScaleTargetMarker,deploymentReplicasSyncer
|
||||
- --feature-gates=Failover=true,PropagationPolicyPreemption=true,MultiClusterService=true,StatefulFailoverInjection=true
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --enable-no-execute-taint-eviction=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
@ -53,3 +64,7 @@ spec:
|
|||
- name: karmada-config
|
||||
secret:
|
||||
secretName: karmada-controller-manager-config
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
|
|
|
@ -21,16 +21,27 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-descheduler
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-descheduler:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-descheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10358
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10358
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
@ -58,3 +69,7 @@ spec:
|
|||
- name: scheduler-estimator-client-cert
|
||||
secret:
|
||||
secretName: karmada-descheduler-scheduler-estimator-client-cert
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
|
|
|
@ -33,6 +33,9 @@ spec:
|
|||
- operator: Exists
|
||||
containers:
|
||||
- name: etcd
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: registry.k8s.io/etcd:3.5.16-0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
|
@ -88,6 +91,9 @@ spec:
|
|||
mountPath: /etc/karmada/pki/server
|
||||
- name: etcd-client-cert
|
||||
mountPath: /etc/karmada/pki/etcd-client
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
volumes:
|
||||
- name: etcd-data
|
||||
hostPath:
|
||||
|
@ -99,6 +105,7 @@ spec:
|
|||
- name: etcd-client-cert
|
||||
secret:
|
||||
secretName: etcd-etcd-client-cert
|
||||
priorityClassName: system-node-critical
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
|
|
|
@ -22,11 +22,22 @@ spec:
|
|||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: karmada-metrics-adapter
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-metrics-adapter:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --authentication-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --authorization-kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --client-ca-file=/etc/karmada/pki/server/ca.crt
|
||||
|
@ -36,6 +47,8 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
- --logging-format=json
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
|
@ -70,6 +83,10 @@ spec:
|
|||
- name: server-cert
|
||||
secret:
|
||||
secretName: karmada-metrics-adapter-cert
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -21,8 +21,18 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-scheduler-estimator
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-scheduler-estimator:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-scheduler-estimator
|
||||
- --kubeconfig=/etc/{{member_cluster_name}}-kubeconfig
|
||||
|
@ -30,8 +40,9 @@ spec:
|
|||
- --grpc-auth-cert-file=/etc/karmada/pki/server/tls.crt
|
||||
- --grpc-auth-key-file=/etc/karmada/pki/server/tls.key
|
||||
- --grpc-client-ca-file=/etc/karmada/pki/server/ca.crt
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
- --logging-format=json
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
@ -55,10 +66,14 @@ spec:
|
|||
volumes:
|
||||
- name: server-cert
|
||||
secret:
|
||||
secretName: karmada-metrics-adapter-cert
|
||||
secretName: karmada-scheduler-estimator-cert
|
||||
- name: member-kubeconfig
|
||||
secret:
|
||||
secretName: {{member_cluster_name}}-kubeconfig
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -21,6 +21,9 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-scheduler
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-scheduler:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
|
@ -36,15 +39,24 @@ spec:
|
|||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-scheduler
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
- --enable-scheduler-estimator=true
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
volumeMounts:
|
||||
- name: karmada-config
|
||||
|
@ -59,3 +71,7 @@ spec:
|
|||
- name: scheduler-estimator-client-cert
|
||||
secret:
|
||||
secretName: karmada-scheduler-scheduler-estimator-client-cert
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
|
|
|
@ -22,8 +22,18 @@ spec:
|
|||
automountServiceAccountToken: false
|
||||
containers:
|
||||
- name: karmada-search
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-search:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBE_CACHE_MUTATION_DETECTOR
|
||||
value: "{{KUBE_CACHE_MUTATION_DETECTOR}}"
|
||||
command:
|
||||
- /bin/karmada-search
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
|
@ -39,6 +49,8 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
- --logging-format=json
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
|
@ -70,6 +82,10 @@ spec:
|
|||
- name: etcd-client-cert
|
||||
secret:
|
||||
secretName: karmada-search-etcd-client-cert
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -21,17 +21,27 @@ spec:
|
|||
operator: Exists
|
||||
containers:
|
||||
- name: karmada-webhook
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
image: docker.io/karmada/karmada-webhook:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- /bin/karmada-webhook
|
||||
- --kubeconfig=/etc/karmada/config/karmada.config
|
||||
- --bind-address=0.0.0.0
|
||||
- --metrics-bind-address=:8080
|
||||
- --default-not-ready-toleration-seconds=30
|
||||
- --default-unreachable-toleration-seconds=30
|
||||
- --bind-address=$(POD_IP)
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):8000
|
||||
- --secure-port=8443
|
||||
- --cert-dir=/etc/karmada/pki/server
|
||||
- --feature-gates=AllAlpha=true,AllBeta=true
|
||||
- --allow-no-execute-taint-policy=true
|
||||
- --logging-format=json
|
||||
- --v=4
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
|
@ -56,6 +66,10 @@ spec:
|
|||
- name: server-cert
|
||||
secret:
|
||||
secretName: karmada-webhook-cert
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
priorityClassName: system-node-critical
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -58,6 +58,9 @@ spec:
|
|||
- --v=4
|
||||
image: registry.k8s.io/kube-controller-manager:{{karmada_apiserver_version}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
|
@ -91,3 +94,6 @@ spec:
|
|||
- name: service-account-key-pair
|
||||
secret:
|
||||
secretName: kube-controller-manager-service-account-key-pair
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
|
|
@ -296,3 +296,31 @@ webhooks:
|
|||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 3
|
||||
- name: resourcebinding.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["work.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["resourcebindings"]
|
||||
scope: "Namespaced"
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/validate-resourcebinding
|
||||
caBundle: {{caBundle}}
|
||||
failurePolicy: Fail
|
||||
sideEffects: NoneOnDryRun
|
||||
admissionReviewVersions: ["v1"]
|
||||
timeoutSeconds: 3
|
||||
- name: clustertaintpolicy.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["policy.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["clustertaintpolicies"]
|
||||
scope: "Cluster"
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/validate-clustertaintpolicy
|
||||
caBundle: {{caBundle}}
|
||||
failurePolicy: Fail
|
||||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 3
|
||||
|
|
|
@ -5,6 +5,7 @@ reviewers:
|
|||
- jrkeen
|
||||
- pidb
|
||||
- Poor12
|
||||
- zhzhuang-zju
|
||||
approvers:
|
||||
- a7i
|
||||
- chaosi-zju
|
||||
|
|
|
@ -1,6 +1,66 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
karmada:
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2025-06-13T16:23:17.081220385+08:00"
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 2.x.x
|
||||
description: A Helm chart for karmada
|
||||
digest: cd93e64198f364ff2330d718d80b8f321530ab8147521ef2b6263198a35bc7e0
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: chaosi@zju.edu.cn
|
||||
name: chaosi-zju
|
||||
- email: amiralavi7@gmail.com
|
||||
name: a7i
|
||||
name: karmada
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.14.0/karmada-chart-v1.14.0.tgz
|
||||
version: v1.14.0
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2025-03-10T11:24:11.714162019+08:00"
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 2.x.x
|
||||
description: A Helm chart for karmada
|
||||
digest: f1fa71eda8d924258c1b1aff58f14110f1b2c0935accf7b1b98f6dede5495b94
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: chaosi@zju.edu.cn
|
||||
name: chaosi-zju
|
||||
- email: amiralavi7@gmail.com
|
||||
name: a7i
|
||||
name: karmada
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.13.0/karmada-chart-v1.13.0.tgz
|
||||
version: v1.13.0
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2024-12-09T12:16:02.111955134+08:00"
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 2.x.x
|
||||
description: A Helm chart for karmada
|
||||
digest: db3bf17dfb76644d57fbbb8158ba7e731bac5f03245bd525a9a4405e1c3afb05
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: chaosi@zju.edu.cn
|
||||
name: chaosi-zju
|
||||
- email: amiralavi7@gmail.com
|
||||
name: a7i
|
||||
name: karmada
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.12.0/karmada-chart-v1.12.0.tgz
|
||||
version: v1.12.0
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2024-09-21T12:09:38.421759709+08:00"
|
||||
|
@ -282,6 +342,28 @@ entries:
|
|||
- https://github.com/karmada-io/karmada/releases/download/v1.2.0/karmada-chart-v1.2.0.tgz
|
||||
version: v1.2.0
|
||||
karmada-operator:
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2024-12-09T12:16:03.781914346+08:00"
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 1.x.x
|
||||
description: A Helm chart for karmada-operator
|
||||
digest: c0c0d4ff75d539c2daf63973f9ca653e645f2b8c0c59a401c44474bfbdbbc3b9
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: wen.chen@daocloud.io
|
||||
name: calvin0327
|
||||
- email: chaosi@zju.edu.cn
|
||||
name: chaosi-zju
|
||||
- email: amiralavi7@gmail.com
|
||||
name: a7i
|
||||
name: karmada-operator
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.12.0/karmada-operator-chart-v1.12.0.tgz
|
||||
version: v1.12.0
|
||||
- apiVersion: v2
|
||||
appVersion: v1.1.0
|
||||
created: "2024-09-21T15:01:05.712207268+08:00"
|
||||
|
@ -362,4 +444,4 @@ entries:
|
|||
urls:
|
||||
- https://github.com/karmada-io/karmada/releases/download/v1.8.0/karmada-operator-chart-v1.8.0.tgz
|
||||
version: v1.8.0
|
||||
generated: "2024-09-21T15:01:05.710041947+08:00"
|
||||
generated: "2025-06-13T16:23:17.069242033+08:00"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -44,6 +44,13 @@ spec:
|
|||
- /bin/karmada-operator
|
||||
- --leader-elect-resource-namespace={{ .Release.Namespace }}
|
||||
- --v=2
|
||||
{{- range .Values.operator.extraArgs }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.env }}
|
||||
env:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.operator.resources }}
|
||||
resources: {{- toYaml .Values.operator.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -58,7 +58,31 @@ operator:
|
|||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## @param.resources
|
||||
|
||||
## @param operator.env List of environment variables to inject
|
||||
##
|
||||
## - Each entry must be a valid Kubernetes EnvVar object.
|
||||
## - Supports both literal values and valueFrom references (ConfigMap, Secret, fieldRef, etc.).
|
||||
## - If omitted or set to an empty array (`[]`), no env stanza will be included.
|
||||
##
|
||||
## A sample stanza is shown below.
|
||||
##
|
||||
# env:
|
||||
# - name: http_proxy
|
||||
# value: "http://best-awesome-proxy.com:8080"
|
||||
# - name: https_proxy
|
||||
# value: "http://best-awesome-proxy.com:8080"
|
||||
# - name: no_proxy
|
||||
# value: "localhost,127.0.0.1,*.svc,*.cluster.local"
|
||||
|
||||
## @param operator.extraArgs List of extra arguments for the operator binary
|
||||
##
|
||||
## A sample stanza is shown below.
|
||||
##
|
||||
# extraArgs:
|
||||
# - --arg1=val1
|
||||
# - --arg2
|
||||
|
||||
resources: {}
|
||||
# If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
|
|
|
@ -108,9 +108,9 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
> **Note**: There are some RBAC resources that are used by the `preJob` that can not be deleted by the `uninstall` command above. You might have to clean them manually with tools like `kubectl`. You can clean them by commands:
|
||||
|
||||
```console
|
||||
kubectl delete sa/karmada-pre-job -nkarmada-system
|
||||
kubectl delete clusterRole/karmada-pre-job
|
||||
kubectl delete clusterRoleBinding/karmada-pre-job
|
||||
kubectl delete sa/karmada-hook-job -nkarmada-system
|
||||
kubectl delete clusterRole/karmada-hook-job
|
||||
kubectl delete clusterRoleBinding/karmada-hook-job
|
||||
kubectl delete ns karmada-system
|
||||
```
|
||||
|
||||
|
@ -272,6 +272,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada
|
|||
| `scheduler.affinity` | Affinity of the scheduler | `{}` |
|
||||
| `scheduler.tolerations` | Tolerations of the scheduler | `[]` |
|
||||
| `scheduler.strategy` | Strategy of the scheduler | `{"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "0", "maxSurge": "50%"} }` |
|
||||
| `scheduler.enableSchedulerEstimator` | Enable calling cluster scheduler estimator for adjusting replicas | `false` |
|
||||
| `webhook.labels` | Labels of the webhook deployment | `{"app": "karmada-webhook"}` |
|
||||
| `webhook.replicaCount` | Target replicas of the webhook | `1` |
|
||||
| `webhook.podLabels` | Labels of the webhook pods | `{}` |
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: workloadrebalancers.apps.karmada.io
|
||||
spec:
|
||||
group: apps.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: cronfederatedhpas.autoscaling.karmada.io
|
||||
spec:
|
||||
group: autoscaling.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: federatedhpas.autoscaling.karmada.io
|
||||
spec:
|
||||
group: autoscaling.karmada.io
|
||||
|
@ -82,7 +82,9 @@ spec:
|
|||
policies:
|
||||
description: |-
|
||||
policies is a list of potential scaling polices which can be used during scaling.
|
||||
At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
|
||||
If not set, use the default values:
|
||||
- For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
|
||||
- For scale down: allow all pods to be removed in a 15s window.
|
||||
items:
|
||||
description: HPAScalingPolicy is a single policy which must
|
||||
hold true for a specified past interval.
|
||||
|
@ -124,6 +126,24 @@ spec:
|
|||
- For scale down: 300 (i.e. the stabilization window is 300 seconds long).
|
||||
format: int32
|
||||
type: integer
|
||||
tolerance:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: |-
|
||||
tolerance is the tolerance on the ratio between the current and desired
|
||||
metric value under which no updates are made to the desired number of
|
||||
replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
|
||||
set, the default cluster-wide tolerance is applied (by default 10%).
|
||||
|
||||
For example, if autoscaling is configured with a memory consumption target of 100Mi,
|
||||
and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
|
||||
triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
||||
|
||||
This is an alpha field and requires enabling the HPAConfigurableTolerance
|
||||
feature gate.
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
scaleUp:
|
||||
description: |-
|
||||
|
@ -136,7 +156,9 @@ spec:
|
|||
policies:
|
||||
description: |-
|
||||
policies is a list of potential scaling polices which can be used during scaling.
|
||||
At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
|
||||
If not set, use the default values:
|
||||
- For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
|
||||
- For scale down: allow all pods to be removed in a 15s window.
|
||||
items:
|
||||
description: HPAScalingPolicy is a single policy which must
|
||||
hold true for a specified past interval.
|
||||
|
@ -178,6 +200,24 @@ spec:
|
|||
- For scale down: 300 (i.e. the stabilization window is 300 seconds long).
|
||||
format: int32
|
||||
type: integer
|
||||
tolerance:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: |-
|
||||
tolerance is the tolerance on the ratio between the current and desired
|
||||
metric value under which no updates are made to the desired number of
|
||||
replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
|
||||
set, the default cluster-wide tolerance is applied (by default 10%).
|
||||
|
||||
For example, if autoscaling is configured with a memory consumption target of 100Mi,
|
||||
and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
|
||||
triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
||||
|
||||
This is an alpha field and requires enabling the HPAConfigurableTolerance
|
||||
feature gate.
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
|
@ -209,7 +249,6 @@ spec:
|
|||
each pod of the current scale target (e.g. CPU or memory). Such metrics are
|
||||
built in to Kubernetes, and have special scaling options on top of those
|
||||
available to normal per-pod metrics using the "pods" source.
|
||||
This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
|
||||
properties:
|
||||
container:
|
||||
description: container is the name of the container in the
|
||||
|
@ -650,8 +689,6 @@ spec:
|
|||
description: |-
|
||||
type is the type of metric source. It should be one of "ContainerResource", "External",
|
||||
"Object", "Pods" or "Resource", each mapping to a matching field in the object.
|
||||
Note: "ContainerResource" type is available on when the feature-gate
|
||||
HPAContainerMetrics is enabled
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
|
@ -1147,8 +1184,6 @@ spec:
|
|||
description: |-
|
||||
type is the type of metric source. It will be one of "ContainerResource", "External",
|
||||
"Object", "Pods" or "Resource", each corresponds to a matching field in the object.
|
||||
Note: "ContainerResource" type is available on when the feature-gate
|
||||
HPAContainerMetrics is enabled
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: resourceinterpretercustomizations.config.karmada.io
|
||||
spec:
|
||||
group: config.karmada.io
|
||||
|
@ -60,6 +60,59 @@ spec:
|
|||
customizations:
|
||||
description: Customizations describe the interpretation rules.
|
||||
properties:
|
||||
componentResource:
|
||||
description: |-
|
||||
ComponentResource describes the rules for Karmada to discover the resource requirements
|
||||
for multiple components from the given object.
|
||||
This is designed for CRDs with multiple components (e.g., FlinkDeployment), but
|
||||
can also be used for single-component resources like Deployment.
|
||||
If implemented, the controller will use this to obtain per-component replica and resource
|
||||
requirements, and will not call ReplicaResource.
|
||||
If not implemented, the controller will fall back to ReplicaResource for backward compatibility.
|
||||
This will only be used when the feature gate 'MultiplePodTemplatesScheduling' is enabled.
|
||||
properties:
|
||||
luaScript:
|
||||
description: |-
|
||||
LuaScript holds the Lua script that is used to extract the desired replica count and resource
|
||||
requirements for each component of the resource.
|
||||
|
||||
The script should implement a function as follows:
|
||||
|
||||
```
|
||||
luaScript: >
|
||||
function GetComponents(desiredObj)
|
||||
local components = {}
|
||||
|
||||
local jobManagerComponent = {
|
||||
name = "jobmanager",
|
||||
replicas = desiredObj.spec.jobManager.replicas
|
||||
}
|
||||
table.insert(components, jobManagerComponent)
|
||||
|
||||
local taskManagerComponent = {
|
||||
name = "taskmanager",
|
||||
replicas = desiredObj.spec.taskManager.replicas
|
||||
}
|
||||
table.insert(components, taskManagerComponent)
|
||||
|
||||
return components
|
||||
end
|
||||
```
|
||||
|
||||
The content of the LuaScript needs to be a whole function including both
|
||||
declaration and implementation.
|
||||
|
||||
The parameters will be supplied by the system:
|
||||
- desiredObj: the object represents the configuration to be applied
|
||||
to the member cluster.
|
||||
|
||||
The function expects one return value:
|
||||
- components: the resource requirements for each component.
|
||||
The returned value will be set into a ResourceBinding or ClusterResourceBinding.
|
||||
type: string
|
||||
required:
|
||||
- luaScript
|
||||
type: object
|
||||
dependencyInterpretation:
|
||||
description: |-
|
||||
DependencyInterpretation describes the rules for Karmada to analyze the
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: resourceinterpreterwebhookconfigurations.config.karmada.io
|
||||
spec:
|
||||
group: config.karmada.io
|
||||
|
@ -48,7 +48,26 @@ spec:
|
|||
as the resources and operations it applies to.
|
||||
properties:
|
||||
clientConfig:
|
||||
description: ClientConfig defines how to communicate with the hook.
|
||||
description: |-
|
||||
ClientConfig defines how to communicate with the hook.
|
||||
It supports two mutually exclusive configuration modes:
|
||||
|
||||
1. URL - Directly specify the webhook URL with format `scheme://host:port/path`.
|
||||
Example: https://webhook.example.com:8443/my-interpreter
|
||||
|
||||
2. Service - Reference a Kubernetes Service that exposes the webhook.
|
||||
When using Service reference, Karmada resolves the endpoint through following steps:
|
||||
a) First attempts to locate the Service in karmada-apiserver
|
||||
b) If found, constructs URL based on Service type:
|
||||
- ClusterIP/LoadBalancer/NodePort: Uses ClusterIP with port from Service spec
|
||||
(Note: Services with ClusterIP "None" are rejected), Example:
|
||||
`https://<cluster ip>:<port>`
|
||||
- ExternalName: Uses external DNS name format: `https://<external name>:<port>`
|
||||
c) If NOT found in karmada-apiserver, falls back to standard Kubernetes
|
||||
service DNS name format: `https://<service>.<namespace>.svc:<port>`
|
||||
|
||||
Note: When both URL and Service are specified, the Service reference takes precedence
|
||||
and the URL configuration will be ignored.
|
||||
properties:
|
||||
caBundle:
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: multiclusteringresses.networking.karmada.io
|
||||
spec:
|
||||
group: networking.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: multiclusterservices.networking.karmada.io
|
||||
spec:
|
||||
group: networking.karmada.io
|
||||
|
@ -245,6 +245,8 @@ spec:
|
|||
Ports is a list of records of service ports
|
||||
If used, every port defined in the service should have an entry in it
|
||||
items:
|
||||
description: PortStatus represents the error condition
|
||||
of a service port
|
||||
properties:
|
||||
error:
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clusteroverridepolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clusterpropagationpolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
@ -153,16 +153,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -223,6 +226,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
placement:
|
||||
description: Placement represents the rule for select clusters to
|
||||
|
@ -864,6 +944,52 @@ spec:
|
|||
type: object
|
||||
minItems: 1
|
||||
type: array
|
||||
schedulePriority:
|
||||
description: |-
|
||||
SchedulePriority defines how Karmada should resolve the priority and preemption policy
|
||||
for workload scheduling.
|
||||
|
||||
This setting is useful for controlling the scheduling behavior of offline workloads.
|
||||
By setting a higher or lower priority, users can control which workloads are scheduled first.
|
||||
Additionally, it allows specifying a preemption policy where higher-priority workloads can
|
||||
preempt lower-priority ones in scenarios of resource contention.
|
||||
|
||||
Note: This feature is currently in the alpha stage. The priority-based scheduling functionality is
|
||||
controlled by the PriorityBasedScheduling feature gate, and preemption is controlled by the
|
||||
PriorityBasedPreemptiveScheduling feature gate. Currently, only priority-based scheduling is
|
||||
supported. Preemption functionality is not yet available and will be introduced in future
|
||||
releases as the feature matures.
|
||||
properties:
|
||||
priorityClassName:
|
||||
description: |-
|
||||
PriorityClassName specifies which PriorityClass to use. Its behavior depends on PriorityClassSource:
|
||||
|
||||
Behavior of PriorityClassName:
|
||||
|
||||
For KubePriorityClass:
|
||||
- When specified: Uses the named Kubernetes PriorityClass.
|
||||
|
||||
For PodPriorityClass:
|
||||
- Uses PriorityClassName from the PodTemplate.
|
||||
- Not yet implemented.
|
||||
|
||||
For FederatedPriorityClass:
|
||||
- Not yet implemented.
|
||||
type: string
|
||||
priorityClassSource:
|
||||
description: |-
|
||||
PriorityClassSource specifies where Karmada should look for the PriorityClass definition.
|
||||
Available options:
|
||||
- KubePriorityClass: Uses Kubernetes PriorityClass (scheduling.k8s.io/v1)
|
||||
- PodPriorityClass: Uses PriorityClassName from PodTemplate: PodSpec.PriorityClassName (not yet implemented)
|
||||
- FederatedPriorityClass: Uses Karmada FederatedPriorityClass (not yet implemented)
|
||||
enum:
|
||||
- KubePriorityClass
|
||||
type: string
|
||||
required:
|
||||
- priorityClassName
|
||||
- priorityClassSource
|
||||
type: object
|
||||
schedulerName:
|
||||
default: default-scheduler
|
||||
description: |-
|
||||
|
|
|
@ -0,0 +1,257 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clustertaintpolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
names:
|
||||
kind: ClusterTaintPolicy
|
||||
listKind: ClusterTaintPolicyList
|
||||
plural: clustertaintpolicies
|
||||
singular: clustertaintpolicy
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
ClusterTaintPolicy automates taint management on Cluster objects based
|
||||
on declarative conditions.
|
||||
The system evaluates AddOnConditions to determine when to add taints,
|
||||
and RemoveOnConditions to determine when to remove taints.
|
||||
AddOnConditions are evaluated before RemoveOnConditions.
|
||||
Taints are NEVER automatically removed when the ClusterTaintPolicy is deleted.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec represents the desired behavior of ClusterTaintPolicy.
|
||||
properties:
|
||||
addOnConditions:
|
||||
description: |-
|
||||
AddOnConditions defines the conditions to match for triggering
|
||||
the controller to add taints on the cluster object.
|
||||
The match conditions are ANDed.
|
||||
If AddOnConditions is empty, no taints will be added.
|
||||
items:
|
||||
description: |-
|
||||
MatchCondition represents the condition match detail of activating the failover
|
||||
relevant taints on target clusters.
|
||||
properties:
|
||||
conditionType:
|
||||
description: ConditionType specifies the ClusterStatus condition
|
||||
type.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a relationship to a set of values.
|
||||
Valid operators are In, NotIn.
|
||||
type: string
|
||||
statusValues:
|
||||
description: |-
|
||||
StatusValues is an array of metav1.ConditionStatus values.
|
||||
The item specifies the ClusterStatus condition status.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- conditionType
|
||||
- operator
|
||||
- statusValues
|
||||
type: object
|
||||
type: array
|
||||
removeOnConditions:
|
||||
description: |-
|
||||
RemoveOnConditions defines the conditions to match for triggering
|
||||
the controller to remove taints from the cluster object.
|
||||
The match conditions are ANDed.
|
||||
If RemoveOnConditions is empty, no taints will be removed.
|
||||
items:
|
||||
description: |-
|
||||
MatchCondition represents the condition match detail of activating the failover
|
||||
relevant taints on target clusters.
|
||||
properties:
|
||||
conditionType:
|
||||
description: ConditionType specifies the ClusterStatus condition
|
||||
type.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a relationship to a set of values.
|
||||
Valid operators are In, NotIn.
|
||||
type: string
|
||||
statusValues:
|
||||
description: |-
|
||||
StatusValues is an array of metav1.ConditionStatus values.
|
||||
The item specifies the ClusterStatus condition status.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- conditionType
|
||||
- operator
|
||||
- statusValues
|
||||
type: object
|
||||
type: array
|
||||
taints:
|
||||
description: |-
|
||||
Taints specifies the taints that need to be added or removed on
|
||||
the cluster object which match with TargetClusters.
|
||||
If the Taints is modified, the system will process the taints based on
|
||||
the latest value of Taints during the next condition-triggered execution,
|
||||
regardless of whether the taint has been added or removed.
|
||||
items:
|
||||
description: Taint describes the taint that needs to be applied
|
||||
to the cluster.
|
||||
properties:
|
||||
effect:
|
||||
description: Effect represents the taint effect to be applied
|
||||
to a cluster.
|
||||
type: string
|
||||
key:
|
||||
description: Key represents the taint key to be applied to a
|
||||
cluster.
|
||||
type: string
|
||||
value:
|
||||
description: Value represents the taint value corresponding
|
||||
to the taint key.
|
||||
type: string
|
||||
required:
|
||||
- effect
|
||||
- key
|
||||
type: object
|
||||
minItems: 1
|
||||
type: array
|
||||
targetClusters:
|
||||
description: |-
|
||||
TargetClusters specifies the clusters that ClusterTaintPolicy needs
|
||||
to pay attention to.
|
||||
For clusters that no longer match the TargetClusters, the taints
|
||||
will be kept unchanged.
|
||||
If targetClusters is not set, any cluster can be selected.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
exclude:
|
||||
description: ExcludedClusters is the list of clusters to be ignored.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
fieldSelector:
|
||||
description: |-
|
||||
FieldSelector is a filter to select member clusters by fields.
|
||||
The key(field) of the match expression should be 'provider', 'region', or 'zone',
|
||||
and the operator of the match expression should be 'In' or 'NotIn'.
|
||||
If non-nil and non-empty, only the clusters match this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of field selector requirements.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
labelSelector:
|
||||
description: |-
|
||||
LabelSelector is a filter to select member clusters by labels.
|
||||
If non-nil and non-empty, only the clusters match this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- taints
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: federatedresourcequotas.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
@ -16,7 +16,14 @@ spec:
|
|||
singular: federatedresourcequota
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.overall
|
||||
name: OVERALL
|
||||
type: string
|
||||
- jsonPath: .status.overallUsed
|
||||
name: OVERALL_USED
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: FederatedResourceQuota sets aggregate quota restrictions enforced
|
||||
|
@ -54,9 +61,15 @@ spec:
|
|||
type: object
|
||||
staticAssignments:
|
||||
description: |-
|
||||
StaticAssignments represents the subset of desired hard limits for each cluster.
|
||||
Note: for clusters not present in this list, Karmada will set an empty ResourceQuota to them, which means these
|
||||
clusters will have no quotas in the referencing namespace.
|
||||
StaticAssignments specifies ResourceQuota settings for specific clusters.
|
||||
If non-empty, Karmada will create ResourceQuotas in the corresponding clusters.
|
||||
Clusters not listed here or when StaticAssignments is empty will have no ResourceQuotas created.
|
||||
|
||||
This field addresses multi-cluster configuration management challenges by allowing centralized
|
||||
control over ResourceQuotas across clusters.
|
||||
|
||||
Note: The Karmada scheduler currently does NOT use this configuration for scheduling decisions.
|
||||
Future updates may integrate it into the scheduling logic.
|
||||
items:
|
||||
description: StaticClusterAssignment represents the set of desired
|
||||
hard limits for a specific cluster.
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: overridepolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: propagationpolicies.policy.karmada.io
|
||||
spec:
|
||||
group: policy.karmada.io
|
||||
|
@ -150,16 +150,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -220,6 +223,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
placement:
|
||||
description: Placement represents the rule for select clusters to
|
||||
|
@ -861,6 +941,52 @@ spec:
|
|||
type: object
|
||||
minItems: 1
|
||||
type: array
|
||||
schedulePriority:
|
||||
description: |-
|
||||
SchedulePriority defines how Karmada should resolve the priority and preemption policy
|
||||
for workload scheduling.
|
||||
|
||||
This setting is useful for controlling the scheduling behavior of offline workloads.
|
||||
By setting a higher or lower priority, users can control which workloads are scheduled first.
|
||||
Additionally, it allows specifying a preemption policy where higher-priority workloads can
|
||||
preempt lower-priority ones in scenarios of resource contention.
|
||||
|
||||
Note: This feature is currently in the alpha stage. The priority-based scheduling functionality is
|
||||
controlled by the PriorityBasedScheduling feature gate, and preemption is controlled by the
|
||||
PriorityBasedPreemptiveScheduling feature gate. Currently, only priority-based scheduling is
|
||||
supported. Preemption functionality is not yet available and will be introduced in future
|
||||
releases as the feature matures.
|
||||
properties:
|
||||
priorityClassName:
|
||||
description: |-
|
||||
PriorityClassName specifies which PriorityClass to use. Its behavior depends on PriorityClassSource:
|
||||
|
||||
Behavior of PriorityClassName:
|
||||
|
||||
For KubePriorityClass:
|
||||
- When specified: Uses the named Kubernetes PriorityClass.
|
||||
|
||||
For PodPriorityClass:
|
||||
- Uses PriorityClassName from the PodTemplate.
|
||||
- Not yet implemented.
|
||||
|
||||
For FederatedPriorityClass:
|
||||
- Not yet implemented.
|
||||
type: string
|
||||
priorityClassSource:
|
||||
description: |-
|
||||
PriorityClassSource specifies where Karmada should look for the PriorityClass definition.
|
||||
Available options:
|
||||
- KubePriorityClass: Uses Kubernetes PriorityClass (scheduling.k8s.io/v1)
|
||||
- PodPriorityClass: Uses PriorityClassName from PodTemplate: PodSpec.PriorityClassName (not yet implemented)
|
||||
- FederatedPriorityClass: Uses Karmada FederatedPriorityClass (not yet implemented)
|
||||
enum:
|
||||
- KubePriorityClass
|
||||
type: string
|
||||
required:
|
||||
- priorityClassName
|
||||
- priorityClassSource
|
||||
type: object
|
||||
schedulerName:
|
||||
default: default-scheduler
|
||||
description: |-
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: remedies.remedy.karmada.io
|
||||
spec:
|
||||
group: remedy.karmada.io
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: clusterresourcebindings.work.karmada.io
|
||||
spec:
|
||||
group: work.karmada.io
|
||||
|
@ -261,6 +261,199 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
type: array
|
||||
components:
|
||||
description: |-
|
||||
Components represents the requirements of multiple pod templates of the referencing resource.
|
||||
It is designed to support workloads that consist of multiple pod templates,
|
||||
such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment),
|
||||
where each workload is composed of more than one pod template. It is also capable of representing
|
||||
single-component workloads, such as Deployment.
|
||||
|
||||
Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above.
|
||||
It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled.
|
||||
items:
|
||||
description: Component represents the requirements for a specific
|
||||
component.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of this component.
|
||||
It is required when the resource contains multiple components to ensure proper identification,
|
||||
and must also be unique within the same resource.
|
||||
maxLength: 32
|
||||
type: string
|
||||
replicaRequirements:
|
||||
description: ReplicaRequirements represents the requirements
|
||||
required by each replica for this component.
|
||||
properties:
|
||||
nodeClaim:
|
||||
description: NodeClaim represents the node claim HardNodeAffinity,
|
||||
NodeSelector and Tolerations required by each replica.
|
||||
properties:
|
||||
hardNodeAffinity:
|
||||
description: |-
|
||||
A node selector represents the union of the results of one or more label queries over a set of
|
||||
nodes; that is, it represents the OR of the selectors represented by the node selector terms.
|
||||
Note that only PodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
is included here because it has a hard limit on pod scheduling.
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
description: Required. A list of node selector terms.
|
||||
The terms are ORed.
|
||||
items:
|
||||
description: |-
|
||||
A null or empty node selector term matches no objects. The requirements of
|
||||
them are ANDed.
|
||||
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of node selector requirements
|
||||
by node's labels.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
type: object
|
||||
tolerations:
|
||||
description: If specified, the pod's tolerations.
|
||||
items:
|
||||
description: |-
|
||||
The pod this Toleration is attached to tolerates any taint that matches
|
||||
the triple <key,value,effect> using the matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: |-
|
||||
Effect indicates the taint effect to match. Empty means match all taint effects.
|
||||
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: |-
|
||||
Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a key's relationship to the value.
|
||||
Valid operators are Exists and Equal. Defaults to Equal.
|
||||
Exists is equivalent to wildcard for value, so that a pod can
|
||||
tolerate all taints of a particular category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: |-
|
||||
TolerationSeconds represents the period of time the toleration (which must be
|
||||
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
|
||||
it is not set, which means tolerate the taint forever (do not evict). Zero and
|
||||
negative values will be treated as 0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: |-
|
||||
Value is the taint value the toleration matches to.
|
||||
If the operator is Exists, the value should be empty, otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
priorityClassName:
|
||||
description: PriorityClassName represents the resources
|
||||
priorityClassName
|
||||
type: string
|
||||
resourceRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ResourceRequest represents the resources required
|
||||
by each replica.
|
||||
type: object
|
||||
type: object
|
||||
replicas:
|
||||
description: Replicas represents the replica number of the resource's
|
||||
component.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
- replicas
|
||||
type: object
|
||||
type: array
|
||||
conflictResolution:
|
||||
default: Abort
|
||||
description: |-
|
||||
|
@ -315,16 +508,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -385,6 +581,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
gracefulEvictionTasks:
|
||||
description: |-
|
||||
|
@ -454,10 +727,12 @@ spec:
|
|||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never".
|
||||
enum:
|
||||
- Immediately
|
||||
- Directly
|
||||
- Graciously
|
||||
- Gracefully
|
||||
- Never
|
||||
type: string
|
||||
reason:
|
||||
|
@ -1252,6 +1527,19 @@ spec:
|
|||
- kind
|
||||
- name
|
||||
type: object
|
||||
schedulePriority:
|
||||
description: SchedulePriority represents the scheduling priority assigned
|
||||
to workloads.
|
||||
properties:
|
||||
priority:
|
||||
default: 0
|
||||
description: |-
|
||||
Priority specifies the scheduling priority for the binding.
|
||||
Higher values indicate a higher priority.
|
||||
If not explicitly set, the default value is 0.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
schedulerName:
|
||||
description: |-
|
||||
SchedulerName represents which scheduler to proceed the scheduling.
|
||||
|
@ -1281,6 +1569,16 @@ spec:
|
|||
type: string
|
||||
type: array
|
||||
type: object
|
||||
scheduling:
|
||||
description: |-
|
||||
Scheduling controls whether scheduling should be suspended, the scheduler will pause scheduling and not
|
||||
process resource binding when the value is true and resume scheduling when it's false or nil.
|
||||
This is designed for third-party systems to temporarily pause the scheduling of applications, which enabling
|
||||
manage resource allocation, prioritize critical workloads, etc.
|
||||
It is expected that third-party systems use an admission webhook to suspend scheduling at the time of
|
||||
ResourceBinding creation. Once a ResourceBinding has been scheduled, it cannot be paused afterward, as it may
|
||||
lead to ineffective suspension.
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
- resource
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: resourcebindings.work.karmada.io
|
||||
spec:
|
||||
group: work.karmada.io
|
||||
|
@ -261,6 +261,199 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
type: array
|
||||
components:
|
||||
description: |-
|
||||
Components represents the requirements of multiple pod templates of the referencing resource.
|
||||
It is designed to support workloads that consist of multiple pod templates,
|
||||
such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment),
|
||||
where each workload is composed of more than one pod template. It is also capable of representing
|
||||
single-component workloads, such as Deployment.
|
||||
|
||||
Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above.
|
||||
It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled.
|
||||
items:
|
||||
description: Component represents the requirements for a specific
|
||||
component.
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of this component.
|
||||
It is required when the resource contains multiple components to ensure proper identification,
|
||||
and must also be unique within the same resource.
|
||||
maxLength: 32
|
||||
type: string
|
||||
replicaRequirements:
|
||||
description: ReplicaRequirements represents the requirements
|
||||
required by each replica for this component.
|
||||
properties:
|
||||
nodeClaim:
|
||||
description: NodeClaim represents the node claim HardNodeAffinity,
|
||||
NodeSelector and Tolerations required by each replica.
|
||||
properties:
|
||||
hardNodeAffinity:
|
||||
description: |-
|
||||
A node selector represents the union of the results of one or more label queries over a set of
|
||||
nodes; that is, it represents the OR of the selectors represented by the node selector terms.
|
||||
Note that only PodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
is included here because it has a hard limit on pod scheduling.
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
description: Required. A list of node selector terms.
|
||||
The terms are ORed.
|
||||
items:
|
||||
description: |-
|
||||
A null or empty node selector term matches no objects. The requirements of
|
||||
them are ANDed.
|
||||
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of node selector requirements
|
||||
by node's labels.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchFields:
|
||||
description: A list of node selector requirements
|
||||
by node's fields.
|
||||
items:
|
||||
description: |-
|
||||
A node selector requirement is a selector that contains values, a key, and an operator
|
||||
that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. If the operator is Gt or Lt, the values
|
||||
array must have a single element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
type: object
|
||||
tolerations:
|
||||
description: If specified, the pod's tolerations.
|
||||
items:
|
||||
description: |-
|
||||
The pod this Toleration is attached to tolerates any taint that matches
|
||||
the triple <key,value,effect> using the matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: |-
|
||||
Effect indicates the taint effect to match. Empty means match all taint effects.
|
||||
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: |-
|
||||
Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
Operator represents a key's relationship to the value.
|
||||
Valid operators are Exists and Equal. Defaults to Equal.
|
||||
Exists is equivalent to wildcard for value, so that a pod can
|
||||
tolerate all taints of a particular category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: |-
|
||||
TolerationSeconds represents the period of time the toleration (which must be
|
||||
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
|
||||
it is not set, which means tolerate the taint forever (do not evict). Zero and
|
||||
negative values will be treated as 0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: |-
|
||||
Value is the taint value the toleration matches to.
|
||||
If the operator is Exists, the value should be empty, otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
priorityClassName:
|
||||
description: PriorityClassName represents the resources
|
||||
priorityClassName
|
||||
type: string
|
||||
resourceRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ResourceRequest represents the resources required
|
||||
by each replica.
|
||||
type: object
|
||||
type: object
|
||||
replicas:
|
||||
description: Replicas represents the replica number of the resource's
|
||||
component.
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
- replicas
|
||||
type: object
|
||||
type: array
|
||||
conflictResolution:
|
||||
default: Abort
|
||||
description: |-
|
||||
|
@ -315,16 +508,19 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
purgeMode:
|
||||
default: Graciously
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Defaults to "Graciously".
|
||||
Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated),
|
||||
and "Graciously"(deprecated).
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
- Never
|
||||
- Immediately
|
||||
- Graciously
|
||||
- Never
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
|
@ -385,6 +581,83 @@ spec:
|
|||
required:
|
||||
- decisionConditions
|
||||
type: object
|
||||
cluster:
|
||||
description: |-
|
||||
Cluster indicates failover behaviors in case of cluster failure.
|
||||
If this value is nil, the failover behavior in case of cluster failure
|
||||
will be controlled by the controller's no-execute-taint-eviction-purge-mode
|
||||
parameter.
|
||||
If set, the failover behavior in case of cluster failure will be defined
|
||||
by this value.
|
||||
properties:
|
||||
purgeMode:
|
||||
default: Gracefully
|
||||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Directly", "Gracefully".
|
||||
Defaults to "Gracefully".
|
||||
enum:
|
||||
- Directly
|
||||
- Gracefully
|
||||
type: string
|
||||
statePreservation:
|
||||
description: |-
|
||||
StatePreservation defines the policy for preserving and restoring state data
|
||||
during failover events for stateful applications.
|
||||
|
||||
When an application fails over from one cluster to another, this policy enables
|
||||
the extraction of critical data from the original resource configuration.
|
||||
Upon successful migration, the extracted data is then re-injected into the new
|
||||
resource, ensuring that the application can resume operation with its previous
|
||||
state intact.
|
||||
This is particularly useful for stateful applications where maintaining data
|
||||
consistency across failover events is crucial.
|
||||
If not specified, means no state data will be preserved.
|
||||
|
||||
Note: This requires the StatefulFailoverInjection feature gate to be enabled,
|
||||
which is alpha.
|
||||
properties:
|
||||
rules:
|
||||
description: |-
|
||||
Rules contains a list of StatePreservationRule configurations.
|
||||
Each rule specifies a JSONPath expression targeting specific pieces of
|
||||
state data to be preserved during failover events. An AliasLabelName is associated
|
||||
with each rule, serving as a label key when the preserved data is passed
|
||||
to the new cluster.
|
||||
items:
|
||||
description: |-
|
||||
StatePreservationRule defines a single rule for state preservation.
|
||||
It includes a JSONPath expression and an alias name that will be used
|
||||
as a label key when passing state information to the new cluster.
|
||||
properties:
|
||||
aliasLabelName:
|
||||
description: |-
|
||||
AliasLabelName is the name that will be used as a label key when the preserved
|
||||
data is passed to the new cluster. This facilitates the injection of the
|
||||
preserved state back into the application resources during recovery.
|
||||
type: string
|
||||
jsonPath:
|
||||
description: |-
|
||||
JSONPath is the JSONPath template used to identify the state data
|
||||
to be preserved from the original resource configuration.
|
||||
The JSONPath syntax follows the Kubernetes specification:
|
||||
https://kubernetes.io/docs/reference/kubectl/jsonpath/
|
||||
|
||||
Note: The JSONPath expression will start searching from the "status" field of
|
||||
the API resource object by default. For example, to extract the "availableReplicas"
|
||||
from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
|
||||
"{.status.availableReplicas}".
|
||||
type: string
|
||||
required:
|
||||
- aliasLabelName
|
||||
- jsonPath
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- rules
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
gracefulEvictionTasks:
|
||||
description: |-
|
||||
|
@ -454,10 +727,12 @@ spec:
|
|||
description: |-
|
||||
PurgeMode represents how to deal with the legacy applications on the
|
||||
cluster from which the application is migrated.
|
||||
Valid options are "Immediately", "Graciously" and "Never".
|
||||
Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never".
|
||||
enum:
|
||||
- Immediately
|
||||
- Directly
|
||||
- Graciously
|
||||
- Gracefully
|
||||
- Never
|
||||
type: string
|
||||
reason:
|
||||
|
@ -1252,6 +1527,19 @@ spec:
|
|||
- kind
|
||||
- name
|
||||
type: object
|
||||
schedulePriority:
|
||||
description: SchedulePriority represents the scheduling priority assigned
|
||||
to workloads.
|
||||
properties:
|
||||
priority:
|
||||
default: 0
|
||||
description: |-
|
||||
Priority specifies the scheduling priority for the binding.
|
||||
Higher values indicate a higher priority.
|
||||
If not explicitly set, the default value is 0.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
schedulerName:
|
||||
description: |-
|
||||
SchedulerName represents which scheduler to proceed the scheduling.
|
||||
|
@ -1281,6 +1569,16 @@ spec:
|
|||
type: string
|
||||
type: array
|
||||
type: object
|
||||
scheduling:
|
||||
description: |-
|
||||
Scheduling controls whether scheduling should be suspended, the scheduler will pause scheduling and not
|
||||
process resource binding when the value is true and resume scheduling when it's false or nil.
|
||||
This is designed for third-party systems to temporarily pause the scheduling of applications, which enabling
|
||||
manage resource allocation, prioritize critical workloads, etc.
|
||||
It is expected that third-party systems use an admission webhook to suspend scheduling at the time of
|
||||
ResourceBinding creation. Once a ResourceBinding has been scheduled, it cannot be paused afterward, as it may
|
||||
lead to ineffective suspension.
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
- resource
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.5
|
||||
controller-gen.kubebuilder.io/version: v0.18.0
|
||||
name: works.work.karmada.io
|
||||
spec:
|
||||
group: work.karmada.io
|
||||
|
|
|
@ -3,6 +3,7 @@ resources:
|
|||
- bases/multicluster/multicluster.x-k8s.io_serviceimports.yaml
|
||||
- bases/policy/policy.karmada.io_clusteroverridepolicies.yaml
|
||||
- bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml
|
||||
- bases/policy/policy.karmada.io_clustertaintpolicies.yaml
|
||||
- bases/policy/policy.karmada.io_federatedresourcequotas.yaml
|
||||
- bases/policy/policy.karmada.io_overridepolicies.yaml
|
||||
- bases/policy/policy.karmada.io_propagationpolicies.yaml
|
||||
|
|
|
@ -9,6 +9,6 @@ spec:
|
|||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/convert
|
||||
url: "https://{{name}}.{{namespace}}.svc:443/convert"
|
||||
caBundle: "{{caBundle}}"
|
||||
conversionReviewVersions: ["v1"]
|
||||
|
|
|
@ -9,6 +9,6 @@ spec:
|
|||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
url: https://karmada-webhook.karmada-system.svc:443/convert
|
||||
url: "https://{{name}}.{{namespace}}.svc:443/convert"
|
||||
caBundle: "{{caBundle}}"
|
||||
conversionReviewVersions: ["v1"]
|
||||
|
|
|
@ -152,7 +152,6 @@ app: {{$name}}-controller-manager
|
|||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "karmada.scheduler.labels" -}}
|
||||
{{ $name := include "karmada.name" . }}
|
||||
{{- if .Values.scheduler.labels -}}
|
||||
|
@ -173,7 +172,6 @@ app: {{$name}}-scheduler
|
|||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "karmada.descheduler.labels" -}}
|
||||
{{ $name := include "karmada.name" . }}
|
||||
{{- if .Values.descheduler.labels -}}
|
||||
|
@ -207,7 +205,6 @@ app: {{$name}}
|
|||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "karmada.webhook.labels" -}}
|
||||
{{ $name := include "karmada.name" .}}
|
||||
{{- if .Values.webhook.labels }}
|
||||
|
@ -306,6 +303,10 @@ app: {{- include "karmada.name" .}}-search
|
|||
{{- include "karmada.commonLabels" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "karmada.preUpdateJob.labels" -}}
|
||||
{{- include "karmada.commonLabels" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "karmada.staticResourceJob.labels" -}}
|
||||
{{- include "karmada.commonLabels" . -}}
|
||||
{{- end -}}
|
||||
|
@ -356,6 +357,16 @@ app: {{- include "karmada.name" .}}-search
|
|||
secretName: {{ $name }}-cert
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common env for POD_IP
|
||||
*/}}
|
||||
{{- define "karmada.env.podIP" -}}
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper karmada internal etcd image name
|
||||
*/}}
|
||||
|
@ -545,35 +556,82 @@ Return the proper Docker Image Registry Secret Names
|
|||
{{ include "common.images.pullSecrets" (dict "images" (list .Values.cfssl.image .Values.kubectl.image .Values.etcd.internal.image .Values.agent.image .Values.apiServer.image .Values.controllerManager.image .Values.descheduler.image .Values.schedulerEstimator.image .Values.scheduler.image .Values.webhook.image .Values.aggregatedApiServer.image .Values.metricsAdapter.image .Values.search.image .Values.kubeControllerManager.image) "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-controllerManager.
|
||||
Iterates over .Values.controllerManager.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.controllerManager.featureGates" -}}
|
||||
{{- if (not (empty .Values.controllerManager.featureGates)) }}
|
||||
{{- $featureGatesFlag := "" -}}
|
||||
{{- if .Values.controllerManager.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.controllerManager.featureGates -}}
|
||||
{{- if not (empty (toString $value)) }}
|
||||
{{- $featureGatesFlag = cat $featureGatesFlag $key "=" $value "," -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if gt (len $featureGatesFlag) 0 }}
|
||||
{{- $featureGatesFlag := trimSuffix "," $featureGatesFlag | nospace -}}
|
||||
{{- printf "%s=%s" "--feature-gates" $featureGatesFlag -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "karmada.schedulerEstimator.featureGates" -}}
|
||||
{{- $featureGatesArg := index . "featureGatesArg" -}}
|
||||
{{- if (not (empty $featureGatesArg)) }}
|
||||
{{- $featureGatesFlag := "" -}}
|
||||
{{- range $key, $value := $featureGatesArg -}}
|
||||
{{- if not (empty (toString $value)) }}
|
||||
{{- $featureGatesFlag = cat $featureGatesFlag $key "=" $value "," -}}
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-webhook.
|
||||
Iterates over .Values.webhook.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.webhook.featureGates" -}}
|
||||
{{- if .Values.webhook.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.webhook.featureGates -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if gt (len $featureGatesFlag) 0 }}
|
||||
{{- $featureGatesFlag := trimSuffix "," $featureGatesFlag | nospace -}}
|
||||
{{- printf "%s=%s" "--feature-gates" $featureGatesFlag -}}
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-scheduler.
|
||||
Iterates over .Values.scheduler.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.scheduler.featureGates" -}}
|
||||
{{- if .Values.scheduler.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.scheduler.featureGates -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Generate the --feature-gates command line argument for karmada-schedulerEstimator.
|
||||
Iterates over .Values.schedulerEstimator.featureGates and constructs a comma-separated key=value list.
|
||||
If any feature gates are set, outputs: --feature-gates=Foo=true,Bar=false
|
||||
If none are set, outputs nothing.
|
||||
*/ -}}
|
||||
{{- define "karmada.schedulerEstimator.featureGates" -}}
|
||||
{{- if .Values.schedulerEstimator.featureGates -}}
|
||||
{{- $featureGates := list -}}
|
||||
{{- range $key, $value := .Values.schedulerEstimator.featureGates -}}
|
||||
{{- if not (empty (toString $value)) -}}
|
||||
{{- $featureGates = append $featureGates (printf "%s=%s" $key (toString $value)) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $featureGates -}}
|
||||
{{- printf "--feature-gates=%s" (join "," $featureGates) | nospace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -274,4 +274,18 @@ webhooks:
|
|||
sideEffects: None
|
||||
admissionReviewVersions: [ "v1" ]
|
||||
timeoutSeconds: 3
|
||||
- name: resourcebinding.karmada.io
|
||||
rules:
|
||||
- operations: ["CREATE", "UPDATE"]
|
||||
apiGroups: ["work.karmada.io"]
|
||||
apiVersions: ["*"]
|
||||
resources: ["resourcebindings"]
|
||||
scope: "Namespaced"
|
||||
clientConfig:
|
||||
url: https://{{ $name }}-webhook.{{ $namespace }}.svc:443/validate-resourcebinding
|
||||
{{- include "karmada.webhook.caBundle" . | nindent 6 }}
|
||||
failurePolicy: Fail
|
||||
sideEffects: NoneOnDryRun
|
||||
admissionReviewVersions: ["v1"]
|
||||
timeoutSeconds: 3
|
||||
{{- end -}}
|
||||
|
|
|
@ -101,6 +101,7 @@ spec:
|
|||
# They are obtained by the return value of the function CipherSuites() under the go/src/crypto/tls/cipher_suites.go package.
|
||||
# Consistent with the Preferred values of k8s’s default cipher suites.
|
||||
- --cipher-suites=TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||
priorityClassName: {{ .Values.etcd.internal.priorityClassName }}
|
||||
volumes:
|
||||
- name: etcd-cert
|
||||
secret:
|
||||
|
|
|
@ -101,6 +101,8 @@ spec:
|
|||
- name: {{ $name }}
|
||||
image: {{ template "karmada.agent.image" . }}
|
||||
imagePullPolicy: {{ .Values.agent.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-agent
|
||||
- --karmada-kubeconfig=/etc/kubeconfig/kubeconfig
|
||||
|
@ -110,7 +112,8 @@ spec:
|
|||
{{- end }}
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --leader-elect-resource-namespace={{ include "karmada.namespace" . }}
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --v=4
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
@ -130,6 +133,7 @@ spec:
|
|||
mountPath: /etc/kubeconfig
|
||||
resources:
|
||||
{{- toYaml .Values.agent.resources | nindent 12 }}
|
||||
priorityClassName: {{ .Values.agent.priorityClassName }}
|
||||
volumes:
|
||||
- name: kubeconfig
|
||||
secret:
|
||||
|
|
|
@ -43,6 +43,8 @@ spec:
|
|||
- name: apiserver-cert
|
||||
mountPath: /etc/kubernetes/pki
|
||||
readOnly: true
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-aggregated-apiserver
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
|
@ -67,6 +69,7 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
resources:
|
||||
{{- toYaml .Values.aggregatedApiServer.resources | nindent 12 }}
|
||||
readinessProbe:
|
||||
|
@ -97,6 +100,7 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
priorityClassName: {{ .Values.aggregatedApiServer.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.kubeconfig.volume" . | nindent 8 }}
|
||||
- name: apiserver-cert
|
||||
|
|
|
@ -73,6 +73,35 @@ spec:
|
|||
- --max-requests-inflight={{ .Values.apiServer.maxRequestsInflight }}
|
||||
- --max-mutating-requests-inflight={{ .Values.apiServer.maxMutatingRequestsInflight }}
|
||||
- --tls-min-version=VersionTLS13
|
||||
{{- with .Values.apiServer.oidc }}
|
||||
{{- if .caFile }}
|
||||
- --oidc-ca-file={{ .caFile }}
|
||||
{{- end }}
|
||||
{{- if .clientId }}
|
||||
- --oidc-client-id={{ .clientId }}
|
||||
{{- end }}
|
||||
{{- if .groupsClaim }}
|
||||
- --oidc-groups-claim={{ .groupsClaim }}
|
||||
{{- end }}
|
||||
{{- if .groupsPrefix }}
|
||||
- --oidc-groups-prefix={{ .groupsPrefix }}
|
||||
{{- end }}
|
||||
{{- if .issuerUrl }}
|
||||
- --oidc-issuer-url={{ .issuerUrl }}
|
||||
{{- end }}
|
||||
{{- if .requiredClaim }}
|
||||
- --oidc-required-claim={{ .requiredClaim }}
|
||||
{{- end }}
|
||||
{{- if .signingAlgs }}
|
||||
- --oidc-signing-algs={{ .signingAlgs }}
|
||||
{{- end }}
|
||||
{{- if .usernameClaim }}
|
||||
- --oidc-username-claim={{ .usernameClaim }}
|
||||
{{- end }}
|
||||
{{- if .usernamePrefix }}
|
||||
- --oidc-username-prefix={{ .usernamePrefix }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5443
|
||||
|
@ -117,7 +146,6 @@ spec:
|
|||
{{- end }}
|
||||
preemptionPolicy: PreemptLowerPriority
|
||||
priority: 2000001000
|
||||
priorityClassName: system-node-critical
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
@ -133,6 +161,7 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
priorityClassName: {{ .Values.apiServer.priorityClassName }}
|
||||
volumes:
|
||||
- name: apiserver-cert
|
||||
secret:
|
||||
|
|
|
@ -41,6 +41,7 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
priorityClassName: {{ .Values.controllerManager.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.kubeconfig.volume" . | nindent 8 }}
|
||||
initContainers:
|
||||
|
@ -49,13 +50,15 @@ spec:
|
|||
- name: {{ $name }}-controller-manager
|
||||
image: {{ template "karmada.controllerManager.image" . }}
|
||||
imagePullPolicy: {{ .Values.controllerManager.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-controller-manager
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --cluster-status-update-frequency=10s
|
||||
- --leader-elect-resource-namespace={{ $systemNamespace }}
|
||||
- --health-probe-bind-address=0.0.0.0:10357
|
||||
- --metrics-bind-address=:8080
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10357
|
||||
- --v=2
|
||||
{{- if .Values.controllerManager.controllers }}
|
||||
- --controllers={{ .Values.controllerManager.controllers }}
|
||||
|
|
|
@ -47,11 +47,13 @@ spec:
|
|||
- name: {{ $name }}-descheduler
|
||||
image: {{ template "karmada.descheduler.image" . }}
|
||||
imagePullPolicy: {{ .Values.descheduler.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-descheduler
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10358
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10358
|
||||
- --leader-elect-resource-namespace={{ $systemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
|
@ -77,6 +79,7 @@ spec:
|
|||
{{- include "karmada.kubeconfig.volumeMount" . | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.descheduler.resources | nindent 12 }}
|
||||
priorityClassName: {{ .Values.descheduler.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.descheduler.kubeconfig.volume" . | nindent 8 }}
|
||||
{{- include "karmada.scheduler.cert.volume" . | nindent 8 }}
|
||||
|
|
|
@ -41,9 +41,12 @@ spec:
|
|||
- name: apiserver-cert
|
||||
mountPath: /etc/kubernetes/pki
|
||||
readOnly: true
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-metrics-adapter
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --authentication-kubeconfig=/etc/kubeconfig
|
||||
- --authorization-kubeconfig=/etc/kubeconfig
|
||||
- --tls-cert-file=/etc/kubernetes/pki/karmada.crt
|
||||
|
@ -52,6 +55,7 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
resources:
|
||||
{{- toYaml .Values.metricsAdapter.resources | nindent 12 }}
|
||||
readinessProbe:
|
||||
|
@ -82,6 +86,7 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
priorityClassName: {{ .Values.metricsAdapter.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.kubeconfig.volume" . | nindent 8 }}
|
||||
- name: apiserver-cert
|
||||
|
|
|
@ -44,6 +44,8 @@ spec:
|
|||
- name: karmada-scheduler-estimator
|
||||
image: {{ template "karmada.schedulerEstimator.image" $ }}
|
||||
imagePullPolicy: {{ $.Values.schedulerEstimator.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-scheduler-estimator
|
||||
- --kubeconfig=/etc/{{ $clusterName }}-kubeconfig
|
||||
|
@ -51,11 +53,16 @@ spec:
|
|||
- --grpc-auth-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --grpc-auth-key-file=/etc/karmada/pki/karmada.key
|
||||
- --grpc-client-ca-file=/etc/karmada/pki/server-ca.crt
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
{{- with (include "karmada.schedulerEstimator.featureGates" (dict "featureGatesArg" $.Values.schedulerEstimator.featureGates)) }}
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
{{- /*
|
||||
We use '$' to refer to the root context.
|
||||
Inside this 'range' loop, '.' refers to the current item from '.Values.schedulerEstimator.memberClusters'.
|
||||
Using '$' ensures that we can access the top-level '.Values.schedulerEstimator.featureGates'.
|
||||
*/}}
|
||||
{{- with (include "karmada.schedulerEstimator.featureGates" $) }}
|
||||
- {{ . }}
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
@ -78,6 +85,7 @@ spec:
|
|||
mountPath: /etc/{{ $clusterName }}-kubeconfig
|
||||
resources:
|
||||
{{- toYaml $.Values.schedulerEstimator.resources | nindent 12 }}
|
||||
priorityClassName: {{ $.Values.schedulerEstimator.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.scheduler.cert.volume" $ | nindent 8 }}
|
||||
- name: member-kubeconfig
|
||||
|
|
|
@ -47,15 +47,23 @@ spec:
|
|||
- name: {{ $name }}-scheduler
|
||||
image: {{ template "karmada.scheduler.image" .}}
|
||||
imagePullPolicy: {{ .Values.scheduler.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-scheduler
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --metrics-bind-address=0.0.0.0:8080
|
||||
- --health-probe-bind-address=0.0.0.0:10351
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):10351
|
||||
- --leader-elect-resource-namespace={{ $systemNamespace }}
|
||||
- --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt
|
||||
- --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt
|
||||
- --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key
|
||||
{{- if .Values.scheduler.enableSchedulerEstimator }}
|
||||
- --enable-scheduler-estimator=true
|
||||
{{- end }}
|
||||
{{- with (include "karmada.scheduler.featureGates" .) }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
@ -76,6 +84,7 @@ spec:
|
|||
{{- include "karmada.kubeconfig.volumeMount" . | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.scheduler.resources | nindent 12 }}
|
||||
priorityClassName: {{ .Values.scheduler.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.kubeconfig.volume" . | nindent 8 }}
|
||||
{{- include "karmada.scheduler.cert.volume" . | nindent 8 }}
|
||||
|
|
|
@ -56,6 +56,8 @@ spec:
|
|||
- name: kubeconfig-secret
|
||||
subPath: kubeconfig
|
||||
mountPath: /etc/kubeconfig
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-search
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
|
@ -80,6 +82,7 @@ spec:
|
|||
- --audit-log-maxage=0
|
||||
- --audit-log-maxbackup=0
|
||||
- --tls-min-version=VersionTLS13
|
||||
- --bind-address=$(POD_IP)
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /livez
|
||||
|
@ -90,7 +93,8 @@ spec:
|
|||
periodSeconds: 15
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.apiServer.resources | nindent 12 }}
|
||||
{{- toYaml .Values.search.resources | nindent 12 }}
|
||||
priorityClassName: {{ .Values.search.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.search.kubeconfig.volume" . | nindent 8 }}
|
||||
{{- include "karmada.search.etcd.cert.volume" . | nindent 8 }}
|
||||
|
|
|
@ -47,12 +47,19 @@ spec:
|
|||
- name: {{ $name }}-webhook
|
||||
image: {{ template "karmada.webhook.image" . }}
|
||||
imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "karmada.env.podIP" . | nindent 12 }}
|
||||
command:
|
||||
- /bin/karmada-webhook
|
||||
- --kubeconfig=/etc/kubeconfig
|
||||
- --bind-address=0.0.0.0
|
||||
- --bind-address=$(POD_IP)
|
||||
- --metrics-bind-address=$(POD_IP):8080
|
||||
- --health-probe-bind-address=$(POD_IP):8000
|
||||
- --secure-port=8443
|
||||
- --cert-dir=/var/serving-cert
|
||||
{{- with (include "karmada.webhook.featureGates" .) }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
- containerPort: 8080
|
||||
|
@ -70,6 +77,7 @@ spec:
|
|||
scheme: HTTPS
|
||||
resources:
|
||||
{{- toYaml .Values.webhook.resources | nindent 12 }}
|
||||
priorityClassName: {{ .Values.webhook.priorityClassName }}
|
||||
volumes:
|
||||
{{- include "karmada.kubeconfig.volume" . | nindent 8 }}
|
||||
- name: {{ $name }}-webhook-cert-secret
|
||||
|
|
|
@ -81,12 +81,12 @@ spec:
|
|||
{{- toYaml .Values.kubeControllerManager.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/karmada/pki
|
||||
name: apisever-cert
|
||||
name: apiserver-cert
|
||||
readOnly: true
|
||||
{{- include "karmada.kubeconfig.volumeMount" . | nindent 12 }}
|
||||
priorityClassName: system-node-critical
|
||||
priorityClassName: {{ .Values.kubeControllerManager.priorityClassName }}
|
||||
volumes:
|
||||
- name: apisever-cert
|
||||
- name: apiserver-cert
|
||||
secret:
|
||||
secretName: {{ $name }}-cert
|
||||
{{- include "karmada.kubeconfig.volume" . | nindent 8 }}
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
name: {{ $name }}-crds-kustomization
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -28,7 +28,7 @@ metadata:
|
|||
name: {{ $name }}-crds-autoscaling-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -48,7 +48,7 @@ metadata:
|
|||
name: {{ $name }}-crds-config-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -68,7 +68,7 @@ metadata:
|
|||
name: {{ $name }}-crds-multicluster-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -88,7 +88,7 @@ metadata:
|
|||
name: {{ $name }}-crds-networking-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -109,7 +109,7 @@ metadata:
|
|||
name: {{ $name }}-crds-policy-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -129,7 +129,7 @@ metadata:
|
|||
name: {{ $name }}-crds-remedy-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -149,7 +149,7 @@ metadata:
|
|||
name: {{ $name }}-crds-work-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -169,7 +169,7 @@ metadata:
|
|||
name: {{ $name }}-crds-apps-bases
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -182,6 +182,58 @@ data:
|
|||
{{- $.Files.Get $path | nindent 8 }}
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ $name }}-hook-job
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
|
||||
{{- if eq .Values.certs.mode "custom" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
|
@ -190,7 +242,7 @@ metadata:
|
|||
name: {{ $name }}-static-resources
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -216,7 +268,7 @@ metadata:
|
|||
name: {{ $name }}-crds-patches
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
|
@ -388,8 +440,8 @@ spec:
|
|||
mkdir -p /opt/configs
|
||||
mkdir -p /opt/certs
|
||||
cp -r -L /opt/mount/* /opt/configs/
|
||||
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/server-ca.key" -out "/opt/certs/server-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/front-proxy-ca.key" -out "/opt/certs/front-proxy-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
openssl req -x509 -sha256 -new -nodes -days {{ .Values.certs.auto.rootCAExpiryDays }} -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/server-ca.key" -out "/opt/certs/server-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
openssl req -x509 -sha256 -new -nodes -days {{ .Values.certs.auto.rootCAExpiryDays }} -newkey rsa:{{ .Values.certs.auto.rsaSize }} -keyout "/opt/certs/front-proxy-ca.key" -out "/opt/certs/front-proxy-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
echo '{"signing":{"default":{"expiry":{{ printf `"%s"` .Values.certs.auto.expiry }},"usages":["signing","key encipherment","client auth","server auth"]}}}' > "/opt/certs/server-ca-config.json"
|
||||
echo '{"CN":"system:admin","hosts":{{ tpl (toJson .Values.certs.auto.hosts) . }},"names":[{"O":"system:masters"}],"key":{"algo":"rsa","size":{{ .Values.certs.auto.rsaSize }}}}' | cfssl gencert -ca=/opt/certs/server-ca.crt -ca-key=/opt/certs/server-ca.key -config=/opt/certs/server-ca-config.json - | cfssljson -bare /opt/certs/karmada
|
||||
echo '{"signing":{"default":{"expiry":{{ printf `"%s"` .Values.certs.auto.expiry }},"usages":["signing","key encipherment","client auth","server auth"]}}}' > "/opt/certs/front-proxy-ca-config.json"
|
||||
|
@ -446,56 +498,5 @@ spec:
|
|||
- name: configs
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ $name }}-hook-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- if "karmada.preInstallJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preInstallJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ $name }}-hook-job
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $name }}-hook-job
|
||||
namespace: {{ $namespace }}
|
||||
---
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
{{- $name := include "karmada.name" . -}}
|
||||
{{- $namespace := include "karmada.namespace" . -}}
|
||||
{{- if eq .Values.installMode "host" }}
|
||||
{{- if eq .Values.certs.mode "auto" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $name }}-static-resources
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
data:
|
||||
{{- print "webhook-configuration.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.webhook.configuration" . | nindent 8 }}
|
||||
{{- print "system-namespace.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.systemNamespace" . | nindent 8 }}
|
||||
{{- print "karmada-aggregated-apiserver-apiservice.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.apiservice" . | nindent 8 }}
|
||||
{{- print "cluster-proxy-admin-rbac.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.proxyRbac" . | nindent 8 }}
|
||||
{{- print "bootstrap-token-configuration.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.bootstrapToken.configuration" . | nindent 8 }}
|
||||
{{- print "clusterrole.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.clusterrole" . | nindent 8 }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $name }}-crds-patches
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "2"
|
||||
data:
|
||||
{{- print "webhook_in_clusterresourcebindings.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.crd.patch.webhook.clusterresourcebinding" . | nindent 8 }}
|
||||
{{- print "webhook_in_resourcebindings.yaml: " | nindent 6 }} |-
|
||||
{{- include "karmada.crd.patch.webhook.resourcebinding" . | nindent 8 }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ $name }}-pre-upgrade"
|
||||
namespace: {{ $namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "3"
|
||||
"helm.sh/hook-delete-policy": {{ .Values.preUpdateJob.hookDeletePolicy }}
|
||||
{{- if "karmada.preUpdateJob.labels" }}
|
||||
labels:
|
||||
{{- include "karmada.preUpdateJob.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: {{ $name }}-pre-upgrade
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ $name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
{{- include "karmada.imagePullSecrets" . | nindent 6 }}
|
||||
{{- with .Values.preUpdateJob.tolerations}}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.preUpdateJob.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ $name }}-hook-job
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pre-upgrade
|
||||
image: {{ template "karmada.kubectl.image" . }}
|
||||
imagePullPolicy: {{ .Values.kubectl.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
# Fetch certs from existing secret
|
||||
karmada_ca=$(kubectl get secret {{ $name }}-cert -n {{ $namespace }} -o jsonpath='{.data.server-ca\.crt}')
|
||||
kubectl get configmap {{ $name }}-static-resources -n {{ $namespace }} -o yaml | sed -e "s/{{ print "{{ ca_crt }}" }}/${karmada_ca}/g" | kubectl apply -f -
|
||||
kubectl get configmap {{ $name }}-crds-patches -n {{ $namespace }} -o yaml | sed -e "s/{{ print "{{ ca_crt }}" }}/${karmada_ca}/g" | kubectl apply -f -
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -98,6 +98,12 @@ preInstallJob:
|
|||
## Define policies that determine when to delete corresponding hook resources: before-hook-creation,hook-succeeded,hook-failed
|
||||
hookDeletePolicy: "hook-succeeded"
|
||||
|
||||
preUpdateJob:
|
||||
tolerations: []
|
||||
nodeSelector: {}
|
||||
## Define policies that determine when to delete corresponding hook resources: before-hook-creation,hook-succeeded,hook-failed
|
||||
hookDeletePolicy: "hook-succeeded"
|
||||
|
||||
## static-resource job config
|
||||
staticResourceJob:
|
||||
tolerations: []
|
||||
|
@ -128,7 +134,10 @@ certs:
|
|||
mode: auto
|
||||
auto:
|
||||
## @param certs.auto.expiry expiry of the certificate
|
||||
## Note: The expiry value should not exceed the rootCA expiry time (rootCAExpiryDays * 24h)
|
||||
expiry: 43800h
|
||||
## @param certs.auto.rootCAExpiryDays expiry of the root CA certificate in days, defaults to 3650 days (10 years)
|
||||
rootCAExpiryDays: 3650
|
||||
## @param certs.auto.hosts hosts of the certificate
|
||||
hosts: [
|
||||
"kubernetes.default.svc",
|
||||
|
@ -232,6 +241,12 @@ scheduler:
|
|||
maxSurge: 50%
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param scheduler.priorityClassName the priority class name for the karmada-scheduler
|
||||
priorityClassName: "system-node-critical"
|
||||
## @param scheduler.enableSchedulerEstimator enable scheduler estimator
|
||||
enableSchedulerEstimator: false
|
||||
## @param scheduler.featureGates A set of key=value pairs that describe feature gates for karmada-scheduler
|
||||
featureGates: {}
|
||||
|
||||
## webhook config
|
||||
webhook:
|
||||
|
@ -286,8 +301,12 @@ webhook:
|
|||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxSurge: 50%
|
||||
## @param apiServer.podDisruptionBudget
|
||||
## @param webhook.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param webhook.priorityClassName the priority class name for the karmada-webhook
|
||||
priorityClassName: "system-node-critical"
|
||||
## @param webhook.featureGates A set of key=value pairs that describe feature gates for karmada-webhook
|
||||
featureGates: {}
|
||||
|
||||
## controller manager config
|
||||
controllerManager:
|
||||
|
@ -352,6 +371,8 @@ controllerManager:
|
|||
extraCommandArgs: {}
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param controllerManager.priorityClassName the priority class name for the karmada-controller-manager
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## karmada apiserver config
|
||||
apiServer:
|
||||
|
@ -435,6 +456,19 @@ apiServer:
|
|||
maxSurge: 1
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param apiServer.priorityClassName the priority class name for the karmada-apiserver
|
||||
priorityClassName: "system-node-critical"
|
||||
oidc:
|
||||
caFile: ""
|
||||
clientId: ""
|
||||
groupsClaim: ""
|
||||
groupsPrefix: ""
|
||||
issuerUrl: ""
|
||||
# @param apiServer.oidc.requiredClaim comma separated 'key=value' pairs that describe required claims in the ID token
|
||||
requiredClaim: ""
|
||||
signingAlgs: ""
|
||||
usernameClaim: ""
|
||||
usernamePrefix: ""
|
||||
|
||||
## karmada aggregated apiserver config
|
||||
aggregatedApiServer:
|
||||
|
@ -493,6 +527,8 @@ aggregatedApiServer:
|
|||
maxSurge: 50%
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param aggregatedApiServer.priorityClassName the priority class name for the karmada-aggregated-apiserver.
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## karmada metrics adapter config
|
||||
metricsAdapter:
|
||||
|
@ -551,6 +587,8 @@ metricsAdapter:
|
|||
maxSurge: 50%
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param metricsAadpter.priorityClassName the priority class name for the karmada-mertics-adapter
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## kubernetes controller manager config
|
||||
kubeControllerManager:
|
||||
|
@ -612,6 +650,8 @@ kubeControllerManager:
|
|||
podDisruptionBudget: *podDisruptionBudget
|
||||
serviceClusterIPRange: "10.96.0.0/12"
|
||||
clusterCIDR: "10.244.0.0/16"
|
||||
## @param kubeControllerManager.priorityClassName the priority class name for the kube-controller-manager
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## etcd config
|
||||
etcd:
|
||||
|
@ -696,6 +736,8 @@ etcd:
|
|||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
## @param etcd.internal.priorityClassName the priority class name for the etcd
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## agent client config
|
||||
agent:
|
||||
|
@ -775,6 +817,8 @@ agent:
|
|||
maxSurge: 50%
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param agent.priorityClassName the priority class name for the karmada-agent
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## karmada scheduler estimator
|
||||
schedulerEstimator:
|
||||
|
@ -857,6 +901,8 @@ schedulerEstimator:
|
|||
## @param featureGate to schedulerEstimator
|
||||
# FooPluginName: true
|
||||
featureGates: {}
|
||||
## @param schedulerEstimator.priorityClassName the priority class name for the scheduler-estimator
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## descheduler config
|
||||
descheduler:
|
||||
|
@ -915,6 +961,8 @@ descheduler:
|
|||
kubeconfig: karmada-kubeconfig
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param descheduler.priorityClassName the priority class name for the descheduler
|
||||
priorityClassName: "system-node-critical"
|
||||
|
||||
## karmada-search config
|
||||
search:
|
||||
|
@ -975,3 +1023,5 @@ search:
|
|||
kubeconfig: karmada-kubeconfig
|
||||
## @param apiServer.podDisruptionBudget
|
||||
podDisruptionBudget: *podDisruptionBudget
|
||||
## @param search.priorityClassName the priority class name for the karmada-search
|
||||
priorityClassName: "system-node-critical"
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.20.3
|
||||
FROM alpine:3.22.1
|
||||
|
||||
ARG BINARY
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.20.3
|
||||
FROM alpine:3.22.1
|
||||
|
||||
ARG BINARY
|
||||
ARG TARGETPLATFORM
|
||||
|
|
|
@ -28,14 +28,18 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
kubeclientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/agent/app/options"
|
||||
|
@ -61,6 +65,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/indexregistry"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
|
||||
"github.com/karmada-io/karmada/pkg/util/restmapper"
|
||||
|
@ -70,13 +75,32 @@ import (
|
|||
|
||||
// NewAgentCommand creates a *cobra.Command object with default parameters
|
||||
func NewAgentCommand(ctx context.Context) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
genericFlagSet.AddGoFlagSet(flag.CommandLine)
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet, controllers.ControllerNames())
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "karmada-agent",
|
||||
Use: names.KarmadaAgentComponentName,
|
||||
Long: `The karmada-agent is the agent of member clusters. It can register a specific cluster to the Karmada control
|
||||
plane and sync manifests from the Karmada control plane to the member cluster. In addition, it also syncs the status of member
|
||||
cluster and manifests to the Karmada control plane.`,
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
|
@ -97,17 +121,7 @@ cluster and manifests to the Karmada control plane.`,
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
genericFlagSet.AddGoFlagSet(flag.CommandLine)
|
||||
opts.AddFlags(genericFlagSet, controllers.ControllerNames())
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion("karmada-agent"))
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaAgentComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
||||
|
@ -140,8 +154,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error building kubeconfig of karmada control plane: %w", err)
|
||||
}
|
||||
controlPlaneRestConfig.QPS, controlPlaneRestConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
|
||||
|
||||
controlPlaneRestConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
|
||||
clusterConfig, err := controllerruntime.GetConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building kubeconfig of member cluster: %w", err)
|
||||
|
@ -164,28 +177,25 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
ClusterConfig: clusterConfig,
|
||||
}
|
||||
|
||||
id, err := util.ObtainClusterID(clusterKubeClient)
|
||||
registerOption.ClusterID, err = util.ObtainClusterID(clusterKubeClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, name, err := util.IsClusterIdentifyUnique(karmadaClient, id)
|
||||
if err != nil {
|
||||
if err = registerOption.Validate(karmadaClient, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok && opts.ClusterName != name {
|
||||
return fmt.Errorf("the same cluster has been registered with name %s", name)
|
||||
}
|
||||
|
||||
registerOption.ClusterID = id
|
||||
|
||||
clusterSecret, impersonatorSecret, err := util.ObtainCredentialsFromMemberCluster(clusterKubeClient, registerOption)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
registerOption.Secret = *clusterSecret
|
||||
registerOption.ImpersonatorSecret = *impersonatorSecret
|
||||
if clusterSecret != nil {
|
||||
registerOption.Secret = *clusterSecret
|
||||
}
|
||||
if impersonatorSecret != nil {
|
||||
registerOption.ImpersonatorSecret = *impersonatorSecret
|
||||
}
|
||||
err = util.RegisterClusterInControllerPlane(registerOption, controlPlaneKubeClient, generateClusterInControllerPlane)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register with karmada control plane: %w", err)
|
||||
|
@ -216,6 +226,7 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
clusterv1alpha1.SchemeGroupVersion.WithKind("Cluster").GroupKind().String(): opts.ConcurrentClusterSyncs,
|
||||
},
|
||||
CacheSyncTimeout: opts.ClusterCacheSyncTimeout.Duration,
|
||||
UsePriorityQueue: ptr.To(features.FeatureGate.Enabled(features.ControllerPriorityQueue)),
|
||||
},
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.DefaultTransform = fedinformer.StripUnusedFields
|
||||
|
@ -231,11 +242,12 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
return err
|
||||
}
|
||||
|
||||
crtlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectorsForAgent()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
|
||||
|
||||
if err = setupControllers(controllerManager, opts, ctx.Done()); err != nil {
|
||||
if err = setupControllers(ctx, controllerManager, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -247,25 +259,27 @@ func run(ctx context.Context, opts *options.Options) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) error {
|
||||
func setupControllers(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) error {
|
||||
restConfig := mgr.GetConfig()
|
||||
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, 0, stopChan)
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(ctx, dynamicClientSet, 0)
|
||||
controlPlaneKubeClientSet := kubeclientset.NewForConfigOrDie(restConfig)
|
||||
|
||||
// We need a service lister to build a resource interpreter with `ClusterIPServiceResolver`
|
||||
// witch allows connection to the customized interpreter webhook without a cluster DNS service.
|
||||
sharedFactory := informers.NewSharedInformerFactory(controlPlaneKubeClientSet, 0)
|
||||
serviceLister := sharedFactory.Core().V1().Services().Lister()
|
||||
sharedFactory.Start(stopChan)
|
||||
sharedFactory.WaitForCacheSync(stopChan)
|
||||
sharedFactory.Start(ctx.Done())
|
||||
sharedFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
|
||||
if err := mgr.Add(resourceInterpreter); err != nil {
|
||||
return fmt.Errorf("failed to setup custom resource interpreter: %w", err)
|
||||
if err := resourceInterpreter.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start resource interpreter: %w", err)
|
||||
}
|
||||
rateLimiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
|
||||
clusterClientOption := &util.ClientOption{RateLimiterGetter: rateLimiterGetter.GetRateLimiter}
|
||||
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, resourceInterpreter)
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSetForAgent, clusterClientOption, resourceInterpreter)
|
||||
controllerContext := controllerscontext.Context{
|
||||
Mgr: mgr,
|
||||
ObjectWatcher: objectWatcher,
|
||||
|
@ -278,8 +292,6 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
ClusterSuccessThreshold: opts.ClusterSuccessThreshold,
|
||||
ClusterFailureThreshold: opts.ClusterFailureThreshold,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
ClusterAPIQPS: opts.ClusterAPIQPS,
|
||||
ClusterAPIBurst: opts.ClusterAPIBurst,
|
||||
ConcurrentWorkSyncs: opts.ConcurrentWorkSyncs,
|
||||
RateLimiterOptions: opts.RateLimiterOpts,
|
||||
EnableClusterResourceModeling: opts.EnableClusterResourceModeling,
|
||||
|
@ -287,8 +299,9 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
CertRotationRemainingTimeThreshold: opts.CertRotationRemainingTimeThreshold,
|
||||
KarmadaKubeconfigNamespace: opts.KarmadaKubeconfigNamespace,
|
||||
},
|
||||
StopChan: stopChan,
|
||||
Context: ctx,
|
||||
ResourceInterpreter: resourceInterpreter,
|
||||
ClusterClientOption: clusterClientOption,
|
||||
}
|
||||
|
||||
if err := controllers.StartControllers(controllerContext, controllersDisabledByDefault); err != nil {
|
||||
|
@ -297,7 +310,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
|
||||
// Ensure the InformerManager stops when the stop channel closes
|
||||
go func() {
|
||||
<-stopChan
|
||||
<-ctx.Done()
|
||||
genericmanager.StopInstance()
|
||||
}()
|
||||
|
||||
|
@ -312,10 +325,9 @@ func startClusterStatusController(ctx controllerscontext.Context) (bool, error)
|
|||
PredicateFunc: helper.NewClusterPredicateOnAgent(ctx.Opts.ClusterName),
|
||||
TypedInformerManager: typedmanager.GetInstance(),
|
||||
GenericInformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
ClusterClientSetFunc: util.NewClusterClientSetForAgent,
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
ClusterClientOption: &util.ClientOption{QPS: ctx.Opts.ClusterAPIQPS, Burst: ctx.Opts.ClusterAPIBurst},
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterStatusUpdateFrequency: ctx.Opts.ClusterStatusUpdateFrequency,
|
||||
ClusterLeaseDuration: ctx.Opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: ctx.Opts.ClusterLeaseRenewIntervalFraction,
|
||||
|
@ -337,9 +349,8 @@ func startExecutionController(ctx controllerscontext.Context) (bool, error) {
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(execution.ControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
RatelimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := executionController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -353,9 +364,8 @@ func startWorkStatusController(ctx controllerscontext.Context) (bool, error) {
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(status.WorkStatusControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicateOnAgent(),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
ClusterCacheSyncTimeout: ctx.Opts.ClusterCacheSyncTimeout,
|
||||
ConcurrentWorkStatusSyncs: ctx.Opts.ConcurrentWorkSyncs,
|
||||
|
@ -375,11 +385,15 @@ func startServiceExportController(ctx controllerscontext.Context) (bool, error)
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceExportControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForServiceExportControllerOnAgent(ctx.Opts.ClusterName),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
|
||||
ClusterCacheSyncTimeout: ctx.Opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := indexregistry.RegisterWorkIndexByFieldSuspendDispatching(ctx.Context, ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
serviceExportController.RunWorkQueue()
|
||||
if err := serviceExportController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
|
@ -397,11 +411,12 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
|
|||
Client: ctx.Mgr.GetClient(),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForEndpointSliceCollectControllerOnAgent(opts.ClusterName),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
endpointSliceCollectController.RunWorkQueue()
|
||||
if err := endpointSliceCollectController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -147,7 +148,7 @@ func NewOptions() *Options {
|
|||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
ResourceLock: resourcelock.LeasesResourceLock,
|
||||
ResourceNamespace: util.NamespaceKarmadaSystem,
|
||||
ResourceNamespace: names.NamespaceKarmadaSystem,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -163,7 +164,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet, allControllers []string) {
|
|||
strings.Join(allControllers, ", "),
|
||||
))
|
||||
fs.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.")
|
||||
fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", util.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.")
|
||||
fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", names.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.")
|
||||
fs.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-elect-lease-duration", defaultElectionLeaseDuration.Duration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
|
|
|
@ -20,10 +20,10 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
_ "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/agent/app"
|
||||
)
|
||||
|
@ -38,5 +38,7 @@ func main() {
|
|||
controllerruntime.SetLogger(klog.Background())
|
||||
cmd := app.NewAgentCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
|
|
@ -21,23 +21,45 @@ import (
|
|||
|
||||
"github.com/spf13/cobra"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/aggregated-apiserver/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/version/sharedcommand"
|
||||
)
|
||||
|
||||
// NewAggregatedApiserverCommand creates a *cobra.Command object with default parameters
|
||||
func NewAggregatedApiserverCommand(ctx context.Context) *cobra.Command {
|
||||
opts := options.NewOptions()
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "karmada-aggregated-apiserver",
|
||||
Use: names.KarmadaAggregatedAPIServerComponentName,
|
||||
Long: `The karmada-aggregated-apiserver starts an aggregated server.
|
||||
It is responsible for registering the Cluster API and provides the ability to aggregate APIs,
|
||||
allowing users to access member clusters from the control plane directly.`,
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := opts.Complete(); err != nil {
|
||||
return err
|
||||
|
@ -52,16 +74,7 @@ allowing users to access member clusters from the control plane directly.`,
|
|||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion("karmada-aggregated-apiserver"))
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaAggregatedAPIServerComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
||||
|
|
|
@ -33,9 +33,10 @@ import (
|
|||
genericfilters "k8s.io/apiserver/pkg/server/filters"
|
||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/apiserver/pkg/util/compatibility"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilversion "k8s.io/apiserver/pkg/util/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/klog/v2"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
|
@ -120,9 +121,9 @@ func (o *Options) Run(ctx context.Context) error {
|
|||
}
|
||||
|
||||
restConfig := config.GenericConfig.ClientConfig
|
||||
restConfig.QPS, restConfig.Burst = o.KubeAPIQPS, o.KubeAPIBurst
|
||||
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(o.KubeAPIQPS, o.KubeAPIBurst)
|
||||
secretLister := config.GenericConfig.SharedInformerFactory.Core().V1().Secrets().Lister()
|
||||
config.GenericConfig.EffectiveVersion = utilversion.NewEffectiveVersion("1.0")
|
||||
config.GenericConfig.EffectiveVersion = compatibility.DefaultBuildEffectiveVersion()
|
||||
|
||||
server, err := config.Complete().New(restConfig, secretLister)
|
||||
if err != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
|
||||
|
@ -29,6 +30,7 @@ import (
|
|||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
cmd := app.NewAggregatedApiserverCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
exitCode := cli.Run(cmd)
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -29,18 +29,22 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
kubeclientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
|
||||
"k8s.io/metrics/pkg/client/custom_metrics"
|
||||
"k8s.io/metrics/pkg/client/external_metrics"
|
||||
"k8s.io/utils/ptr"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
|
@ -67,6 +71,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/controllers/namespace"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/remediation"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/status"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/taint"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/unifiedauth"
|
||||
"github.com/karmada-io/karmada/pkg/controllers/workloadrebalancer"
|
||||
"github.com/karmada-io/karmada/pkg/dependenciesdistributor"
|
||||
|
@ -84,6 +89,8 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/util/fedinformer/typedmanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
"github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/indexregistry"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/util/objectwatcher"
|
||||
"github.com/karmada-io/karmada/pkg/util/overridemanager"
|
||||
"github.com/karmada-io/karmada/pkg/util/restmapper"
|
||||
|
@ -93,43 +100,59 @@ import (
|
|||
|
||||
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
|
||||
func NewControllerManagerCommand(ctx context.Context) *cobra.Command {
|
||||
opts := options.NewOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "karmada-controller-manager",
|
||||
Long: `The karmada-controller-manager runs various controllers.
|
||||
The controllers watch Karmada objects and then talk to the underlying clusters' API servers
|
||||
to create regular Kubernetes resources.`,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
|
||||
return Run(ctx, opts)
|
||||
},
|
||||
}
|
||||
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
// Add the flag(--kubeconfig) that is added by controller-runtime
|
||||
// Add the flag(--kubeconfig) that is added by controller-runtime.
|
||||
// (https://github.com/kubernetes-sigs/controller-runtime/blob/v0.11.1/pkg/client/config/config.go#L39),
|
||||
// and update the flag usage.
|
||||
genericFlagSet.AddGoFlagSet(flag.CommandLine)
|
||||
genericFlagSet.Lookup("kubeconfig").Usage = "Path to karmada control plane kubeconfig file."
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet, controllers.ControllerNames(), sets.List(controllersDisabledByDefault))
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
cmd := &cobra.Command{
|
||||
Use: names.KarmadaControllerManagerComponentName,
|
||||
Long: `The karmada-controller-manager runs various controllers.
|
||||
The controllers watch Karmada objects and then talk to the underlying
|
||||
clusters' API servers to create regular Kubernetes resources.`,
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion("karmada-controller-manager"))
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
|
||||
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
|
||||
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
|
||||
// by setting a logger through log.SetLogger.
|
||||
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
|
||||
controllerruntime.SetLogger(klog.Background())
|
||||
return nil
|
||||
},
|
||||
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
return Run(ctx, opts)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaControllerManagerComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
||||
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
|
||||
sharedcli.SetUsageAndHelpFunc(cmd, fss, cols)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -143,7 +166,7 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
controlPlaneRestConfig.QPS, controlPlaneRestConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
|
||||
controlPlaneRestConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
|
||||
controllerManager, err := controllerruntime.NewManager(controlPlaneRestConfig, controllerruntime.Options{
|
||||
Logger: klog.Background(),
|
||||
Scheme: gclient.NewSchema(),
|
||||
|
@ -171,6 +194,7 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}.GroupKind().String(): opts.ConcurrentNamespaceSyncs,
|
||||
},
|
||||
CacheSyncTimeout: opts.ClusterCacheSyncTimeout.Duration,
|
||||
UsePriorityQueue: ptr.To(features.FeatureGate.Enabled(features.ControllerPriorityQueue)),
|
||||
},
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.DefaultTransform = fedinformer.StripUnusedFields
|
||||
|
@ -187,15 +211,12 @@ func Run(ctx context.Context, opts *options.Options) error {
|
|||
return err
|
||||
}
|
||||
|
||||
crtlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
|
||||
crtlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ClusterCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.ResourceCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.PoolCollectors()...)
|
||||
ctrlmetrics.Registry.MustRegister(metrics.NewBuildInfoCollector())
|
||||
|
||||
if err := helper.IndexWork(ctx, controllerManager); err != nil {
|
||||
klog.Fatalf("Failed to index Work: %v", err)
|
||||
}
|
||||
|
||||
setupControllers(controllerManager, opts, ctx.Done())
|
||||
setupControllers(ctx, controllerManager, opts)
|
||||
|
||||
// blocks until the context is done.
|
||||
if err := controllerManager.Start(ctx); err != nil {
|
||||
|
@ -226,6 +247,7 @@ func init() {
|
|||
controllers["unifiedAuth"] = startUnifiedAuthController
|
||||
controllers["federatedResourceQuotaSync"] = startFederatedResourceQuotaSyncController
|
||||
controllers["federatedResourceQuotaStatus"] = startFederatedResourceQuotaStatusController
|
||||
controllers["federatedResourceQuotaEnforcement"] = startFederatedResourceQuotaEnforcementController
|
||||
controllers["gracefulEviction"] = startGracefulEvictionController
|
||||
controllers["applicationFailover"] = startApplicationFailoverController
|
||||
controllers["federatedHorizontalPodAutoscaler"] = startFederatedHorizontalPodAutoscalerController
|
||||
|
@ -238,40 +260,51 @@ func init() {
|
|||
controllers["remedy"] = startRemedyController
|
||||
controllers["workloadRebalancer"] = startWorkloadRebalancerController
|
||||
controllers["agentcsrapproving"] = startAgentCSRApprovingController
|
||||
controllers["clustertaintpolicy"] = startClusterTaintPolicyController
|
||||
}
|
||||
|
||||
func startClusterController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
mgr := ctx.Mgr
|
||||
opts := ctx.Opts
|
||||
|
||||
// Indexes are added to help the cluster-controller and TaintManager quickly locate ResourceBinding
|
||||
// and ClusterResourceBinding resources associated with a given cluster when eviction is needed.
|
||||
if err := indexregistry.RegisterResourceBindingIndexByFieldCluster(ctx.Context, mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := indexregistry.RegisterClusterResourceBindingIndexByFieldCluster(ctx.Context, mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
clusterController := &cluster.Controller{
|
||||
Client: mgr.GetClient(),
|
||||
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
|
||||
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
|
||||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
|
||||
FailoverEvictionTimeout: opts.FailoverEvictionTimeout.Duration,
|
||||
EnableTaintManager: ctx.Opts.EnableTaintManager,
|
||||
ClusterTaintEvictionRetryFrequency: 10 * time.Second,
|
||||
ExecutionSpaceRetryFrequency: 10 * time.Second,
|
||||
Client: mgr.GetClient(),
|
||||
EventRecorder: mgr.GetEventRecorderFor(cluster.ControllerName),
|
||||
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
|
||||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
|
||||
CleanupCheckInterval: 10 * time.Second,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := clusterController.SetupWithManager(mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ctx.Opts.EnableTaintManager {
|
||||
if err := cluster.IndexField(mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Taint-based eviction should only take effect if the Failover feature is enabled
|
||||
if ctx.Opts.EnableTaintManager && features.FeatureGate.Enabled(features.Failover) {
|
||||
taintManager := &cluster.NoExecuteTaintManager{
|
||||
Client: mgr.GetClient(),
|
||||
EventRecorder: mgr.GetEventRecorderFor(cluster.TaintManagerName),
|
||||
ClusterTaintEvictionRetryFrequency: 10 * time.Second,
|
||||
ConcurrentReconciles: 3,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
EnableNoExecuteTaintEviction: ctx.Opts.FailoverConfiguration.EnableNoExecuteTaintEviction,
|
||||
NoExecuteTaintEvictionPurgeMode: ctx.Opts.FailoverConfiguration.NoExecuteTaintEvictionPurgeMode,
|
||||
}
|
||||
if err := taintManager.SetupWithManager(mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
klog.Infof("Skipping registration of TaintManager, please check that TaintManager option and Failover feature-gate are enabled.")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
@ -280,7 +313,6 @@ func startClusterController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
func startClusterStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
mgr := ctx.Mgr
|
||||
opts := ctx.Opts
|
||||
stopChan := ctx.StopChan
|
||||
clusterPredicateFunc := predicate.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent) bool {
|
||||
obj := createEvent.Object.(*clusterv1alpha1.Cluster)
|
||||
|
@ -320,10 +352,9 @@ func startClusterStatusController(ctx controllerscontext.Context) (enabled bool,
|
|||
PredicateFunc: clusterPredicateFunc,
|
||||
TypedInformerManager: typedmanager.GetInstance(),
|
||||
GenericInformerManager: genericmanager.GetInstance(),
|
||||
StopChan: stopChan,
|
||||
ClusterClientSetFunc: util.NewClusterClientSet,
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: &util.ClientOption{QPS: opts.ClusterAPIQPS, Burst: opts.ClusterAPIBurst},
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
|
||||
ClusterLeaseDuration: opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
|
||||
|
@ -340,6 +371,12 @@ func startClusterStatusController(ctx controllerscontext.Context) (enabled bool,
|
|||
}
|
||||
|
||||
func startBindingController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
// To efficiently clean up Work resources created by the bindingController when a cluster or a RB/CRB is deleted,
|
||||
// we index the Work resources to reduce the overhead during each check.
|
||||
if err = indexregistry.RegisterWorkIndexByLabelResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
bindingController := &binding.ResourceBindingController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -354,6 +391,10 @@ func startBindingController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
return false, err
|
||||
}
|
||||
|
||||
if err = indexregistry.RegisterWorkIndexByLabelClusterResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ClusterResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
clusterResourceBindingController := &binding.ClusterResourceBindingController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -371,6 +412,11 @@ func startBindingController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
}
|
||||
|
||||
func startBindingStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
// Indexing Work resources allows efficient retrieval for aggregating status.
|
||||
if err = indexregistry.RegisterWorkIndexByLabelResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
rbStatusController := &status.RBStatusController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -384,6 +430,10 @@ func startBindingStatusController(ctx controllerscontext.Context) (enabled bool,
|
|||
return false, err
|
||||
}
|
||||
|
||||
if err = indexregistry.RegisterWorkIndexByLabelClusterResourceBindingID(ctx.Context, ctx.Mgr); err != nil {
|
||||
klog.Errorf("Failed to register index for Work based on ClusterResourceBinding ID: %v", err)
|
||||
return false, err
|
||||
}
|
||||
crbStatusController := &status.CRBStatusController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
|
@ -406,9 +456,9 @@ func startExecutionController(ctx controllerscontext.Context) (enabled bool, err
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(execution.ControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicate(ctx.Mgr),
|
||||
WorkPredicateFunc: helper.WorkWithinPushClusterPredicate(ctx.Mgr),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
RatelimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := executionController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -423,10 +473,11 @@ func startWorkStatusController(ctx controllerscontext.Context) (enabled bool, er
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(status.WorkStatusControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
ObjectWatcher: ctx.ObjectWatcher,
|
||||
PredicateFunc: helper.NewExecutionPredicate(ctx.Mgr),
|
||||
WorkPredicateFunc: helper.WorkWithinPushClusterPredicate(ctx.Mgr),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
ConcurrentWorkStatusSyncs: opts.ConcurrentWorkSyncs,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
|
@ -446,6 +497,7 @@ func startNamespaceController(ctx controllerscontext.Context) (enabled bool, err
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(namespace.ControllerName),
|
||||
SkippedPropagatingNamespaces: ctx.Opts.SkippedPropagatingNamespaces,
|
||||
OverrideManager: ctx.OverrideManager,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := namespaceSyncController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -460,11 +512,17 @@ func startServiceExportController(ctx controllerscontext.Context) (enabled bool,
|
|||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceExportControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForServiceExportController(ctx.Mgr),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
// Add an index so ServiceExportController can quickly find and delete related Work resources.
|
||||
if err = indexregistry.RegisterWorkIndexByFieldSuspendDispatching(ctx.Context, ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
serviceExportController.RunWorkQueue()
|
||||
if err := serviceExportController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
|
@ -482,11 +540,13 @@ func startEndpointSliceCollectController(ctx controllerscontext.Context) (enable
|
|||
Client: ctx.Mgr.GetClient(),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
StopChan: ctx.StopChan,
|
||||
Context: ctx.Context,
|
||||
WorkerNumber: 3,
|
||||
PredicateFunc: helper.NewPredicateForEndpointSliceCollectController(ctx.Mgr),
|
||||
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSet,
|
||||
ClusterClientOption: ctx.ClusterClientOption,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
endpointSliceCollectController.RunWorkQueue()
|
||||
if err := endpointSliceCollectController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
|
@ -500,10 +560,11 @@ func startEndpointSliceDispatchController(ctx controllerscontext.Context) (enabl
|
|||
return false, nil
|
||||
}
|
||||
endpointSliceSyncController := &multiclusterservice.EndpointsliceDispatchController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.EndpointsliceDispatchControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.EndpointsliceDispatchControllerName),
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
InformerManager: genericmanager.GetInstance(),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := endpointSliceSyncController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -513,8 +574,9 @@ func startEndpointSliceDispatchController(ctx controllerscontext.Context) (enabl
|
|||
|
||||
func startEndpointSliceController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
endpointSliceController := &mcs.EndpointSliceController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.EndpointSliceControllerName),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.EndpointSliceControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := endpointSliceController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -524,8 +586,9 @@ func startEndpointSliceController(ctx controllerscontext.Context) (enabled bool,
|
|||
|
||||
func startServiceImportController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
serviceImportController := &mcs.ServiceImportController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceImportControllerName),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(mcs.ServiceImportControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := serviceImportController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -535,8 +598,9 @@ func startServiceImportController(ctx controllerscontext.Context) (enabled bool,
|
|||
|
||||
func startUnifiedAuthController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
unifiedAuthController := &unifiedauth.Controller{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(unifiedauth.ControllerName),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(unifiedauth.ControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := unifiedAuthController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -546,8 +610,9 @@ func startUnifiedAuthController(ctx controllerscontext.Context) (enabled bool, e
|
|||
|
||||
func startFederatedResourceQuotaSyncController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
controller := federatedresourcequota.SyncController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.SyncControllerName),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.SyncControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -557,8 +622,26 @@ func startFederatedResourceQuotaSyncController(ctx controllerscontext.Context) (
|
|||
|
||||
func startFederatedResourceQuotaStatusController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
controller := federatedresourcequota.StatusController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.StatusControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startFederatedResourceQuotaEnforcementController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.FederatedQuotaEnforcement) {
|
||||
return false, nil
|
||||
}
|
||||
controller := federatedresourcequota.QuotaEnforcementController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.StatusControllerName),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedresourcequota.QuotaEnforcementControllerName),
|
||||
Recalculation: federatedresourcequota.QuotaRecalculation{
|
||||
ResyncPeriod: ctx.Opts.FederatedResourceQuotaOptions.ResourceQuotaSyncPeriod,
|
||||
},
|
||||
}
|
||||
if err = controller.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -595,6 +678,7 @@ func startApplicationFailoverController(ctx controllerscontext.Context) (enabled
|
|||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(applicationfailover.RBApplicationFailoverControllerName),
|
||||
ResourceInterpreter: ctx.ResourceInterpreter,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err = rbApplicationFailoverController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -604,6 +688,7 @@ func startApplicationFailoverController(ctx controllerscontext.Context) (enabled
|
|||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(applicationfailover.CRBApplicationFailoverControllerName),
|
||||
ResourceInterpreter: ctx.ResourceInterpreter,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err = crbApplicationFailoverController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -616,7 +701,7 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont
|
|||
go custom_metrics.PeriodicallyInvalidate(
|
||||
apiVersionsGetter,
|
||||
ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ctx.StopChan)
|
||||
ctx.Context.Done())
|
||||
metricsClient := metricsclient.NewRESTMetricsClient(
|
||||
resourceclient.NewForConfigOrDie(ctx.Mgr.GetConfig()),
|
||||
custom_metrics.NewForConfig(ctx.Mgr.GetConfig(), ctx.Mgr.GetRESTMapper(), apiVersionsGetter),
|
||||
|
@ -658,8 +743,9 @@ func startCronFederatedHorizontalPodAutoscalerController(ctx controllerscontext.
|
|||
|
||||
func startHPAScaleTargetMarkerController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
hpaScaleTargetMarker := hpascaletargetmarker.HpaScaleTargetMarker{
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
DynamicClient: ctx.DynamicClientSet,
|
||||
RESTMapper: ctx.Mgr.GetRESTMapper(),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
err = hpaScaleTargetMarker.SetupWithManager(ctx.Mgr)
|
||||
if err != nil {
|
||||
|
@ -671,7 +757,8 @@ func startHPAScaleTargetMarkerController(ctx controllerscontext.Context) (enable
|
|||
|
||||
func startDeploymentReplicasSyncerController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
deploymentReplicasSyncer := deploymentreplicassyncer.DeploymentReplicasSyncer{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
err = deploymentReplicasSyncer.SetupWithManager(ctx.Mgr)
|
||||
if err != nil {
|
||||
|
@ -709,7 +796,8 @@ func startRemedyController(ctx controllerscontext.Context) (enabled bool, err er
|
|||
|
||||
func startWorkloadRebalancerController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
workloadRebalancer := workloadrebalancer.RebalancerController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
err = workloadRebalancer.SetupWithManager(ctx.Mgr)
|
||||
if err != nil {
|
||||
|
@ -720,7 +808,10 @@ func startWorkloadRebalancerController(ctx controllerscontext.Context) (enabled
|
|||
}
|
||||
|
||||
func startAgentCSRApprovingController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
agentCSRApprover := approver.AgentCSRApprovingController{Client: ctx.KubeClientSet}
|
||||
agentCSRApprover := approver.AgentCSRApprovingController{
|
||||
Client: ctx.KubeClientSet,
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
err = agentCSRApprover.SetupWithManager(ctx.Mgr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -728,8 +819,24 @@ func startAgentCSRApprovingController(ctx controllerscontext.Context) (enabled b
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func startClusterTaintPolicyController(ctx controllerscontext.Context) (enabled bool, err error) {
|
||||
if !features.FeatureGate.Enabled(features.Failover) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
clusterTaintPolicyController := taint.ClusterTaintPolicyController{
|
||||
Client: ctx.Mgr.GetClient(),
|
||||
EventRecorder: ctx.Mgr.GetEventRecorderFor(taint.ControllerName),
|
||||
RateLimiterOptions: ctx.Opts.RateLimiterOptions,
|
||||
}
|
||||
if err := clusterTaintPolicyController.SetupWithManager(ctx.Mgr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// setupControllers initialize controllers and setup one by one.
|
||||
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
|
||||
func setupControllers(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) {
|
||||
restConfig := mgr.GetConfig()
|
||||
dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
|
||||
discoverClientSet := discovery.NewDiscoveryClientForConfigOrDie(restConfig)
|
||||
|
@ -742,25 +849,27 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
return
|
||||
}
|
||||
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, opts.ResyncPeriod.Duration, stopChan)
|
||||
controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(ctx, dynamicClientSet, opts.ResyncPeriod.Duration)
|
||||
// We need a service lister to build a resource interpreter with `ClusterIPServiceResolver`
|
||||
// witch allows connection to the customized interpreter webhook without a cluster DNS service.
|
||||
sharedFactory := informers.NewSharedInformerFactory(kubeClientSet, opts.ResyncPeriod.Duration)
|
||||
serviceLister := sharedFactory.Core().V1().Services().Lister()
|
||||
sharedFactory.Start(stopChan)
|
||||
sharedFactory.WaitForCacheSync(stopChan)
|
||||
sharedFactory.Start(ctx.Done())
|
||||
sharedFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
resourceInterpreter := resourceinterpreter.NewResourceInterpreter(controlPlaneInformerManager, serviceLister)
|
||||
if err := mgr.Add(resourceInterpreter); err != nil {
|
||||
klog.Fatalf("Failed to setup custom resource interpreter: %v", err)
|
||||
if err := resourceInterpreter.Start(ctx); err != nil {
|
||||
klog.Fatalf("Failed to start resource interpreter: %v", err)
|
||||
}
|
||||
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, resourceInterpreter)
|
||||
rateLimiterGetter := util.GetClusterRateLimiterGetter().SetDefaultLimits(opts.ClusterAPIQPS, opts.ClusterAPIBurst)
|
||||
clusterClientOption := &util.ClientOption{RateLimiterGetter: rateLimiterGetter.GetRateLimiter}
|
||||
objectWatcher := objectwatcher.NewObjectWatcher(mgr.GetClient(), mgr.GetRESTMapper(), util.NewClusterDynamicClientSet, clusterClientOption, resourceInterpreter)
|
||||
|
||||
resourceDetector := &detector.ResourceDetector{
|
||||
DiscoveryClientSet: discoverClientSet,
|
||||
Client: mgr.GetClient(),
|
||||
InformerManager: controlPlaneInformerManager,
|
||||
ControllerRuntimeCache: mgr.GetCache(),
|
||||
RESTMapper: mgr.GetRESTMapper(),
|
||||
DynamicClient: dynamicClientSet,
|
||||
SkippedResourceConfig: skippedResourceConfig,
|
||||
|
@ -791,7 +900,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
klog.Fatalf("Failed to setup dependencies distributor: %v", err)
|
||||
}
|
||||
}
|
||||
setupClusterAPIClusterDetector(mgr, opts, stopChan)
|
||||
setupClusterAPIClusterDetector(ctx, mgr, opts)
|
||||
controllerContext := controllerscontext.Context{
|
||||
Mgr: mgr,
|
||||
ObjectWatcher: objectWatcher,
|
||||
|
@ -801,14 +910,11 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod,
|
||||
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
|
||||
FailoverEvictionTimeout: opts.FailoverEvictionTimeout,
|
||||
ClusterLeaseDuration: opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
|
||||
ClusterSuccessThreshold: opts.ClusterSuccessThreshold,
|
||||
ClusterFailureThreshold: opts.ClusterFailureThreshold,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
ClusterAPIQPS: opts.ClusterAPIQPS,
|
||||
ClusterAPIBurst: opts.ClusterAPIBurst,
|
||||
SkippedPropagatingNamespaces: opts.SkippedNamespacesRegexps(),
|
||||
ConcurrentWorkSyncs: opts.ConcurrentWorkSyncs,
|
||||
EnableTaintManager: opts.EnableTaintManager,
|
||||
|
@ -816,13 +922,16 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
GracefulEvictionTimeout: opts.GracefulEvictionTimeout,
|
||||
EnableClusterResourceModeling: opts.EnableClusterResourceModeling,
|
||||
HPAControllerConfiguration: opts.HPAControllerConfiguration,
|
||||
FederatedResourceQuotaOptions: opts.FederatedResourceQuotaOptions,
|
||||
FailoverConfiguration: opts.FailoverOptions,
|
||||
},
|
||||
StopChan: stopChan,
|
||||
Context: ctx,
|
||||
DynamicClientSet: dynamicClientSet,
|
||||
KubeClientSet: kubeClientSet,
|
||||
OverrideManager: overrideManager,
|
||||
ControlPlaneInformerManager: controlPlaneInformerManager,
|
||||
ResourceInterpreter: resourceInterpreter,
|
||||
ClusterClientOption: clusterClientOption,
|
||||
}
|
||||
|
||||
if err := controllers.StartControllers(controllerContext, controllersDisabledByDefault); err != nil {
|
||||
|
@ -831,13 +940,13 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
|
||||
// Ensure the InformerManager stops when the stop channel closes
|
||||
go func() {
|
||||
<-stopChan
|
||||
<-ctx.Done()
|
||||
genericmanager.StopInstance()
|
||||
}()
|
||||
}
|
||||
|
||||
// setupClusterAPIClusterDetector initialize Cluster detector with the cluster-api management cluster.
|
||||
func setupClusterAPIClusterDetector(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
|
||||
func setupClusterAPIClusterDetector(ctx context.Context, mgr controllerruntime.Manager, opts *options.Options) {
|
||||
if len(opts.ClusterAPIKubeconfig) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -858,7 +967,7 @@ func setupClusterAPIClusterDetector(mgr controllerruntime.Manager, opts *options
|
|||
ControllerPlaneConfig: mgr.GetConfig(),
|
||||
ClusterAPIConfig: clusterAPIRestConfig,
|
||||
ClusterAPIClient: clusterAPIClient,
|
||||
InformerManager: genericmanager.NewSingleClusterInformerManager(dynamic.NewForConfigOrDie(clusterAPIRestConfig), 0, stopChan),
|
||||
InformerManager: genericmanager.NewSingleClusterInformerManager(ctx, dynamic.NewForConfigOrDie(clusterAPIRestConfig), 0),
|
||||
ConcurrentReconciles: 3,
|
||||
}
|
||||
if err := mgr.Add(clusterAPIClusterDetector); err != nil {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2025 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
// FailoverOptions holds the Failover configurations.
|
||||
type FailoverOptions struct {
|
||||
// EnableNoExecuteTaintEviction enables controller response to NoExecute taints on clusters,
|
||||
// which triggers eviction of workloads without explicit tolerations.
|
||||
EnableNoExecuteTaintEviction bool
|
||||
// NoExecuteTaintEvictionPurgeMode controls resource cleanup behavior for NoExecute-triggered
|
||||
// evictions (only active when --enable-no-execute-taint-eviction=true).
|
||||
// Valid modes:
|
||||
// - "Gracefully": first schedules workloads to new clusters and then cleans up original
|
||||
// workloads after successful startup elsewhere to ensure service continuity.
|
||||
// - "Directly": directly evicts workloads first (risking temporary service interruption)
|
||||
// and then triggers rescheduling to other clusters.
|
||||
// Default: "Gracefully".
|
||||
NoExecuteTaintEvictionPurgeMode string
|
||||
}
|
||||
|
||||
// AddFlags adds flags related to FailoverOptions for controller manager to the specified FlagSet.
|
||||
func (o *FailoverOptions) AddFlags(flags *pflag.FlagSet) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
|
||||
flags.BoolVar(&o.EnableNoExecuteTaintEviction, "enable-no-execute-taint-eviction", false, "Enables controller response to NoExecute taints on clusters, which triggers eviction of workloads without explicit tolerations. Given the impact of eviction caused by NoExecute Taint, this parameter is designed to remain disabled by default and requires careful evaluation by administrators before being enabled.\n")
|
||||
flags.StringVar(&o.NoExecuteTaintEvictionPurgeMode, "no-execute-taint-eviction-purge-mode", "Gracefully", "Controls resource cleanup behavior for NoExecute-triggered evictions (only active when --enable-no-execute-taint-eviction=true). Supported values are \"Directly\", and \"Gracefully\". \"Directly\" mode directly evicts workloads first (risking temporary service interruption) and then triggers rescheduling to other clusters, while \"Gracefully\" mode first schedules workloads to new clusters and then cleans up original workloads after successful startup elsewhere to ensure service continuity.")
|
||||
}
|
||||
|
||||
// Validate checks FailoverOptions and return a slice of found errs.
|
||||
func (o *FailoverOptions) Validate() field.ErrorList {
|
||||
errs := field.ErrorList{}
|
||||
if o.EnableNoExecuteTaintEviction &&
|
||||
o.NoExecuteTaintEvictionPurgeMode != "Gracefully" &&
|
||||
o.NoExecuteTaintEvictionPurgeMode != "Directly" {
|
||||
errs = append(errs, field.Invalid(field.NewPath("FailoverOptions").Child("NoExecuteTaintEvictionPurgeMode"),
|
||||
o.NoExecuteTaintEvictionPurgeMode, "Invalid mode"))
|
||||
}
|
||||
return errs
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright 2025 The Karmada Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
// FederatedResourceQuotaOptions holds the FederatedResourceQuota-related options.
|
||||
type FederatedResourceQuotaOptions struct {
|
||||
// federatedResourceQuotaSyncPeriod is the period for syncing federated resource quota usage status
|
||||
// in the system.
|
||||
ResourceQuotaSyncPeriod metav1.Duration
|
||||
}
|
||||
|
||||
// AddFlags adds flags related to FederatedResourceQuotaEnforcement for controller manager to the specified FlagSet.
|
||||
func (o *FederatedResourceQuotaOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
fs.DurationVar(&o.ResourceQuotaSyncPeriod.Duration, "federated-resource-quota-sync-period", time.Minute*5, "The interval for periodic full resynchronization of FederatedResourceQuota resources. This ensures quota recalculations occur at regular intervals to correct potential inaccuracies, particularly when webhook validation side effects.")
|
||||
}
|
||||
|
||||
// Validate checks FederatedResourceQuotaOptions and return a slice of found errs.
|
||||
func (o *FederatedResourceQuotaOptions) Validate() field.ErrorList {
|
||||
if o.ResourceQuotaSyncPeriod.Duration <= 0 {
|
||||
return field.ErrorList{field.Invalid(field.NewPath("federatedResourceQuotaSyncPeriod"), o.ResourceQuotaSyncPeriod, "must be greater than 0")}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -53,8 +54,6 @@ type Options struct {
|
|||
// ClusterStatusUpdateFrequency is the frequency that controller computes and report cluster status.
|
||||
// It must work with ClusterMonitorGracePeriod(--cluster-monitor-grace-period) in karmada-controller-manager.
|
||||
ClusterStatusUpdateFrequency metav1.Duration
|
||||
// FailoverEvictionTimeout is the grace period for deleting scheduling result on failed clusters.
|
||||
FailoverEvictionTimeout metav1.Duration
|
||||
// ClusterLeaseDuration is a duration that candidates for a lease need to wait to force acquire it.
|
||||
// This is measure against time of last observed lease RenewTime.
|
||||
ClusterLeaseDuration metav1.Duration
|
||||
|
@ -145,6 +144,10 @@ type Options struct {
|
|||
// in scenario of dynamic replica assignment based on cluster free resources.
|
||||
// Disable if it does not fit your cases for better performance.
|
||||
EnableClusterResourceModeling bool
|
||||
// FederatedResourceQuotaOptions holds configurations for FederatedResourceQuota reconciliation.
|
||||
FederatedResourceQuotaOptions FederatedResourceQuotaOptions
|
||||
// FailoverOptions holds the Failover configurations.
|
||||
FailoverOptions FailoverOptions
|
||||
}
|
||||
|
||||
// NewOptions builds an empty options.
|
||||
|
@ -153,8 +156,8 @@ func NewOptions() *Options {
|
|||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
ResourceLock: resourcelock.LeasesResourceLock,
|
||||
ResourceNamespace: util.NamespaceKarmadaSystem,
|
||||
ResourceName: "karmada-controller-manager",
|
||||
ResourceNamespace: names.NamespaceKarmadaSystem,
|
||||
ResourceName: names.KarmadaControllerManagerComponentName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +171,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
|
|||
flags.DurationVar(&o.ClusterStatusUpdateFrequency.Duration, "cluster-status-update-frequency", 10*time.Second,
|
||||
"Specifies how often karmada-controller-manager posts cluster status to karmada-apiserver.")
|
||||
flags.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.")
|
||||
flags.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", util.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.")
|
||||
flags.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", names.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.")
|
||||
flags.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-elect-lease-duration", defaultElectionLeaseDuration.Duration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
|
@ -194,8 +197,6 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
|
|||
"Specifies the grace period of allowing a running cluster to be unresponsive before marking it unhealthy.")
|
||||
flags.DurationVar(&o.ClusterStartupGracePeriod.Duration, "cluster-startup-grace-period", 60*time.Second,
|
||||
"Specifies the grace period of allowing a cluster to be unresponsive during startup before marking it unhealthy.")
|
||||
flags.DurationVar(&o.FailoverEvictionTimeout.Duration, "failover-eviction-timeout", 5*time.Minute,
|
||||
"Specifies the grace period for deleting scheduling result on failed clusters.")
|
||||
flags.StringVar(&o.SkippedPropagatingAPIs, "skipped-propagating-apis", "", "Semicolon separated resources that should be skipped from propagating in addition to the default skip list(cluster.karmada.io;policy.karmada.io;work.karmada.io). Supported formats are:\n"+
|
||||
"<group> for skip resources with a specific API group(e.g. networking.k8s.io),\n"+
|
||||
"<group>/<version> for skip resources with a specific API version(e.g. networking.k8s.io/v1beta1),\n"+
|
||||
|
@ -231,6 +232,8 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
|
|||
o.RateLimiterOpts.AddFlags(flags)
|
||||
o.ProfileOpts.AddFlags(flags)
|
||||
o.HPAControllerConfiguration.AddFlags(flags)
|
||||
o.FederatedResourceQuotaOptions.AddFlags(flags)
|
||||
o.FailoverOptions.AddFlags(flags)
|
||||
features.FeatureGate.AddFlag(flags)
|
||||
}
|
||||
|
||||
|
|
|
@ -54,5 +54,9 @@ func (o *Options) Validate() field.ErrorList {
|
|||
errs = append(errs, field.Invalid(newPath.Child("SkippedPropagatingNamespaces").Index(index), ns, "Invalid namespace regular expression"))
|
||||
}
|
||||
}
|
||||
|
||||
errs = append(errs, o.FederatedResourceQuotaOptions.Validate()...)
|
||||
errs = append(errs, o.FailoverOptions.Validate()...)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -36,6 +36,11 @@ func New(modifyOptions ModifyOptions) Options {
|
|||
ClusterMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
ClusterMonitorGracePeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
ClusterStartupGracePeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
FederatedResourceQuotaOptions: FederatedResourceQuotaOptions{
|
||||
ResourceQuotaSyncPeriod: metav1.Duration{
|
||||
Duration: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if modifyOptions != nil {
|
||||
|
@ -96,6 +101,15 @@ func TestValidateControllerManagerConfiguration(t *testing.T) {
|
|||
}),
|
||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterStartupGracePeriod"), metav1.Duration{Duration: 0 * time.Second}, "must be greater than 0")},
|
||||
},
|
||||
"invalid FailoverOptions": {
|
||||
opt: New(func(options *Options) {
|
||||
options.FailoverOptions.EnableNoExecuteTaintEviction = true
|
||||
options.FailoverOptions.NoExecuteTaintEvictionPurgeMode = ""
|
||||
}),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(field.NewPath("FailoverOptions").Child("NoExecuteTaintEvictionPurgeMode"), "", "Invalid mode"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
|
|
@ -20,23 +20,18 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
_ "k8s.io/component-base/logs/json/register" // for JSON log format registration
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/component-base/logs"
|
||||
_ "k8s.io/component-base/logs/json/register" // To enable JSON log format support
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
_ "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/controller-manager/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := controllerruntime.SetupSignalHandler()
|
||||
// Starting from version 0.15.0, controller-runtime expects its consumers to set a logger through log.SetLogger.
|
||||
// If SetLogger is not called within the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogSink and report an error. Here's to silence the "log.SetLogger(...) was never called; logs will not be displayed" error
|
||||
// by setting a logger through log.SetLogger.
|
||||
// More info refer to: https://github.com/karmada-io/karmada/pull/4885.
|
||||
controllerruntime.SetLogger(klog.Background())
|
||||
cmd := app.NewControllerManagerCommand(ctx)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
exitCode := cli.Run(cmd)
|
||||
// Ensure any buffered log entries are flushed
|
||||
logs.FlushLogs()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
|
|
@ -31,17 +31,23 @@ import (
|
|||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsv1 "k8s.io/component-base/logs/api/v1"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/descheduler"
|
||||
"github.com/karmada-io/karmada/pkg/features"
|
||||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
versionmetrics "github.com/karmada-io/karmada/pkg/metrics"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
"github.com/karmada-io/karmada/pkg/version"
|
||||
"github.com/karmada-io/karmada/pkg/version/sharedcommand"
|
||||
)
|
||||
|
@ -76,20 +82,30 @@ const (
|
|||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(stopChan <-chan struct{}) *cobra.Command {
|
||||
func NewDeschedulerCommand(ctx context.Context) *cobra.Command {
|
||||
logConfig := logsv1.NewLoggingConfiguration()
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
logs.AddFlags(logsFlagSet, logs.SkipLoggingConfigurationFlags())
|
||||
logsv1.AddFlags(logConfig, logsFlagSet)
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts := options.NewOptions()
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "karmada-descheduler",
|
||||
Use: names.KarmadaDeschedulerComponentName,
|
||||
Long: `The karmada-descheduler evicts replicas from member clusters
|
||||
if they are failed to be scheduled for a period of time. It relies on
|
||||
if they are failed to be scheduled for a period of time. It relies on
|
||||
karmada-scheduler-estimator to get replica status.`,
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
// validate options
|
||||
if errs := opts.Validate(); len(errs) != 0 {
|
||||
return errs.ToAggregate()
|
||||
}
|
||||
if err := run(opts, stopChan); err != nil {
|
||||
if err := run(ctx, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -102,18 +118,16 @@ karmada-scheduler-estimator to get replica status.`,
|
|||
}
|
||||
return nil
|
||||
},
|
||||
PersistentPreRunE: func(_ *cobra.Command, _ []string) error {
|
||||
if err := logsv1.ValidateAndApply(logConfig, features.FeatureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.InitLogs()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
genericFlagSet := fss.FlagSet("generic")
|
||||
opts.AddFlags(genericFlagSet)
|
||||
|
||||
// Set klog flags
|
||||
logsFlagSet := fss.FlagSet("logs")
|
||||
klogflag.Add(logsFlagSet)
|
||||
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion("karmada-descheduler"))
|
||||
cmd.AddCommand(sharedcommand.NewCmdVersion(names.KarmadaDeschedulerComponentName))
|
||||
cmd.Flags().AddFlagSet(genericFlagSet)
|
||||
cmd.Flags().AddFlagSet(logsFlagSet)
|
||||
|
||||
|
@ -122,9 +136,12 @@ karmada-scheduler-estimator to get replica status.`,
|
|||
return cmd
|
||||
}
|
||||
|
||||
func run(opts *options.Options, stopChan <-chan struct{}) error {
|
||||
func run(ctx context.Context, opts *options.Options) error {
|
||||
klog.Infof("karmada-descheduler version: %s", version.Get())
|
||||
klog.Infof("Please make sure the karmada-scheduler-estimator of all member clusters has been deployed")
|
||||
|
||||
ctrlmetrics.Registry.MustRegister(versionmetrics.NewBuildInfoCollector())
|
||||
|
||||
serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress)
|
||||
|
||||
profileflag.ListenAndServe(opts.ProfileOpts)
|
||||
|
@ -133,17 +150,11 @@ func run(opts *options.Options, stopChan <-chan struct{}) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error building kubeconfig: %s", err.Error())
|
||||
}
|
||||
restConfig.QPS, restConfig.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
|
||||
restConfig.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(opts.KubeAPIQPS, opts.KubeAPIBurst)
|
||||
|
||||
karmadaClient := karmadaclientset.NewForConfigOrDie(restConfig)
|
||||
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
<-stopChan
|
||||
cancel()
|
||||
}()
|
||||
|
||||
desched := descheduler.NewDescheduler(karmadaClient, kubeClient, opts)
|
||||
if !opts.LeaderElection.LeaderElect {
|
||||
desched.Run(ctx)
|
||||
|
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -25,14 +27,16 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/karmada-io/karmada/cmd/descheduler/app/options"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
testingutil "github.com/karmada-io/karmada/pkg/util/testing"
|
||||
)
|
||||
|
||||
func TestNewDeschedulerCommand(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
cmd := NewDeschedulerCommand(stopCh)
|
||||
ctx := context.Background()
|
||||
cmd := NewDeschedulerCommand(ctx)
|
||||
|
||||
assert.NotNil(t, cmd)
|
||||
assert.Equal(t, "karmada-descheduler", cmd.Use)
|
||||
assert.Equal(t, names.KarmadaDeschedulerComponentName, cmd.Use)
|
||||
assert.NotEmpty(t, cmd.Long)
|
||||
}
|
||||
|
||||
|
@ -50,8 +54,8 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
|
|||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
cmd := NewDeschedulerCommand(stopCh)
|
||||
ctx := context.Background()
|
||||
cmd := NewDeschedulerCommand(ctx)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.ParseFlags(tc.args)
|
||||
if tc.expectError {
|
||||
|
@ -64,8 +68,10 @@ func TestDeschedulerCommandFlagParsing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServeHealthzAndMetrics(t *testing.T) {
|
||||
healthAddress := "127.0.0.1:8082"
|
||||
metricsAddress := "127.0.0.1:8083"
|
||||
ports, err := testingutil.GetFreePorts("127.0.0.1", 2)
|
||||
require.NoError(t, err)
|
||||
healthAddress := fmt.Sprintf("127.0.0.1:%d", ports[0])
|
||||
metricsAddress := fmt.Sprintf("127.0.0.1:%d", ports[1])
|
||||
|
||||
go serveHealthzAndMetrics(healthAddress, metricsAddress)
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
componentbaseconfig "k8s.io/component-base/config"
|
||||
|
||||
"github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -89,8 +89,8 @@ func NewOptions() *Options {
|
|||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
ResourceLock: resourcelock.LeasesResourceLock,
|
||||
ResourceNamespace: util.NamespaceKarmadaSystem,
|
||||
ResourceName: "karmada-descheduler",
|
||||
ResourceNamespace: names.NamespaceKarmadaSystem,
|
||||
ResourceName: names.KarmadaDeschedulerComponentName,
|
||||
LeaseDuration: defaultElectionLeaseDuration,
|
||||
RenewDeadline: defaultElectionRenewDeadline,
|
||||
RetryPeriod: defaultElectionRetryPeriod,
|
||||
|
@ -104,7 +104,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
|||
return
|
||||
}
|
||||
fs.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Enable leader election, which must be true when running multi instances.")
|
||||
fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", util.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.")
|
||||
fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", names.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.")
|
||||
fs.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-elect-lease-duration", defaultElectionLeaseDuration.Duration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
|
@ -128,8 +128,8 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&o.SchedulerEstimatorKeyFile, "scheduler-estimator-key-file", "", "SSL key file used to secure scheduler estimator communication.")
|
||||
fs.StringVar(&o.SchedulerEstimatorCaFile, "scheduler-estimator-ca-file", "", "SSL Certificate Authority file used to secure scheduler estimator communication.")
|
||||
fs.BoolVar(&o.InsecureSkipEstimatorVerify, "insecure-skip-estimator-verify", false, "Controls whether verifies the scheduler estimator's certificate chain and host name.")
|
||||
fs.StringVar(&o.SchedulerEstimatorServiceNamespace, "scheduler-estimator-service-namespace", util.NamespaceKarmadaSystem, "The namespace to be used for discovering scheduler estimator services.")
|
||||
fs.StringVar(&o.SchedulerEstimatorServicePrefix, "scheduler-estimator-service-prefix", "karmada-scheduler-estimator", "The prefix of scheduler estimator service name")
|
||||
fs.StringVar(&o.SchedulerEstimatorServiceNamespace, "scheduler-estimator-service-namespace", names.NamespaceKarmadaSystem, "The namespace to be used for discovering scheduler estimator services.")
|
||||
fs.StringVar(&o.SchedulerEstimatorServicePrefix, "scheduler-estimator-service-prefix", names.KarmadaSchedulerEstimatorComponentName, "The prefix of scheduler estimator service name")
|
||||
fs.DurationVar(&o.DeschedulingInterval.Duration, "descheduling-interval", defaultDeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.DurationVar(&o.UnschedulableThreshold.Duration, "unschedulable-threshold", defaultUnschedulableThreshold, "The period of pod unschedulable condition. This value is considered as a classification standard of unschedulable replicas.")
|
||||
fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:8080.")
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue