Compare commits
801 Commits
cluster-au
...
master
Author | SHA1 | Date |
---|---|---|
|
c7a51426e2 | |
|
c1352dad7c | |
|
36804f199c | |
|
172a22c195 | |
|
4bd2e67a1d | |
|
72bf359268 | |
|
26d6b38699 | |
|
ff6e93bfc3 | |
|
1938d3971a | |
|
3d748040d9 | |
|
2eb5adda2c | |
|
0d36de3aa2 | |
|
1d5f0471bc | |
|
1fbc7a9d48 | |
|
e8941e59a0 | |
|
9a256e5c83 | |
|
f9b93ec395 | |
|
0d14eca879 | |
|
aae2a010f1 | |
|
c187e7f147 | |
|
9bc422016f | |
|
8482fd7ac1 | |
|
0fb7d53506 | |
|
637d9ad908 | |
|
0ed3da32c9 | |
|
df9718c409 | |
|
6f0f000a20 | |
|
2945e9562d | |
|
1d421cbe93 | |
|
338987711f | |
|
bd2ff7b070 | |
|
8d76026c21 | |
|
115c8168be | |
|
12e6e2e182 | |
|
8f9a24aafe | |
|
2abd557186 | |
|
cc01c8756f | |
|
88a3b42883 | |
|
055aa33780 | |
|
40b429081f | |
|
4086830636 | |
|
8e47b51d39 | |
|
f3c58dae9c | |
|
14ce6111ba | |
|
3f9526837e | |
|
b1780e6401 | |
|
65c4d6f702 | |
|
563f074dd1 | |
|
815da21233 | |
|
82178880ba | |
|
008a3b916e | |
|
77cb4c8bf8 | |
|
9424deef46 | |
|
c93df03bca | |
|
b44c40d4ec | |
|
ae181a24d0 | |
|
2ae7495c3f | |
|
7912e2d0f6 | |
|
ce81a6a43c | |
|
792fba7ed1 | |
|
ffe6219f6d | |
|
5b25b5642c | |
|
4f177c9c8b | |
|
7a1e49ac1f | |
|
5149494fc9 | |
|
4560f69eaf | |
|
353b44637b | |
|
897989f231 | |
|
d5c1e15385 | |
|
2814dcafaf | |
|
77e3f571bf | |
|
8e0d47c61e | |
|
af75d6e901 | |
|
20a59a9f41 | |
|
c942ff37ad | |
|
ecb4297c9d | |
|
771b9ee591 | |
|
31caf5b0bf | |
|
c509bb2ef7 | |
|
b167235b94 | |
|
dd40212a46 | |
|
1332499614 | |
|
fb1dddb713 | |
|
a3fb18b195 | |
|
ffd18e39f5 | |
|
19b62950ed | |
|
ee360d45bf | |
|
8c51ff6f82 | |
|
08b281df4c | |
|
4a5b307039 | |
|
73badca7af | |
|
2bdd964632 | |
|
187f023315 | |
|
88bedd4137 | |
|
0ff374134f | |
|
31eb0a137d | |
|
6c12c60942 | |
|
8014ae253d | |
|
efdd034d91 | |
|
8e7d62b26e | |
|
5bc430e9a8 | |
|
b07e1e4c70 | |
|
61328095ae | |
|
0002157b3a | |
|
134d636520 | |
|
504a985ec2 | |
|
a880a2bca3 | |
|
4a989c0268 | |
|
5ae532f340 | |
|
98f86a71e6 | |
|
189e1397e9 | |
|
76f2bf8ae3 | |
|
39487100ce | |
|
67da65c813 | |
|
a187bc14d6 | |
|
79114057db | |
|
9e38ce69aa | |
|
9987e76e91 | |
|
f03a67ed81 | |
|
0912f9b4fb | |
|
eb48666180 | |
|
35029fed7a | |
|
8a975ee130 | |
|
18a9339925 | |
|
836f0a078c | |
|
919665f93d | |
|
18b96f2bc5 | |
|
3fdf196403 | |
|
9220470d9f | |
|
606aef22cb | |
|
2f6c19a171 | |
|
46bf4e4ace | |
|
782af09511 | |
|
8b9624d7f1 | |
|
af73650623 | |
|
70f79316f6 | |
|
bcf3866fb8 | |
|
daa15703a6 | |
|
110cb78422 | |
|
2a9103ea4c | |
|
53f7463ee6 | |
|
347db2d102 | |
|
fc8599b820 | |
|
8ae1ad7343 | |
|
4c154a7da8 | |
|
e1cb498992 | |
|
abe3e86e90 | |
|
e79b5b8635 | |
|
c0443a7e7c | |
|
8d90da9ac5 | |
|
8fd9e1f04d | |
|
2511e4485c | |
|
f038712a13 | |
|
e69b1a90d0 | |
|
886516c2cf | |
|
d49fb8672d | |
|
a361d25ce6 | |
|
d69fa69882 | |
|
c54764998b | |
|
741b24e8e9 | |
|
3a1842b83b | |
|
9c501ed6b9 | |
|
6250a5169c | |
|
aed9602edf | |
|
81a348d0e3 | |
|
69f24464fa | |
|
6063b5fa17 | |
|
05008b2488 | |
|
8341862f85 | |
|
c579b37f9e | |
|
3a2933a24c | |
|
9cf529cbc4 | |
|
db2f4684c5 | |
|
432cb11830 | |
|
96b13193e3 | |
|
4ec085aec3 | |
|
e4bac533d5 | |
|
e239358fa9 | |
|
bb8fe52ddc | |
|
fd76cb274b | |
|
4e8bd0ada5 | |
|
ea1c308130 | |
|
3b22e163c2 | |
|
cec8d3ead8 | |
|
fabcbe5b38 | |
|
adf59d447b | |
|
1c7958698e | |
|
2c7d8dc378 | |
|
168a26ef42 | |
|
0d8f587118 | |
|
ca0628ac82 | |
|
e3ca0a4a98 | |
|
adc7e12e1e | |
|
d1d65772e3 | |
|
086fd4426f | |
|
83b370ec5a | |
|
d0a297372a | |
|
442ad2d5b2 | |
|
708f44213b | |
|
0ee2d8a2a7 | |
|
b2a081db94 | |
|
7da6fb2961 | |
|
bf74f7afd3 | |
|
b1ed5ce4bc | |
|
6a2b950b7b | |
|
a9c37f71dc | |
|
f3023467d2 | |
|
26f59b5f21 | |
|
3374be70d0 | |
|
92f087b386 | |
|
c7f7cb5de8 | |
|
2abc138872 | |
|
c85f22f7dd | |
|
24ef1f4319 | |
|
e03f7068d5 | |
|
6b55dc9009 | |
|
8d45fcd183 | |
|
8e8979c91a | |
|
baa6c52c47 | |
|
e42d50d191 | |
|
6076fb36f1 | |
|
a1ab8bc55f | |
|
2b33c4c790 | |
|
2ca75135fb | |
|
2937e9c3da | |
|
3fd510bb5a | |
|
3039f3cc92 | |
|
66b4c962d6 | |
|
2a3764d007 | |
|
ff5595519e | |
|
a65fdb4031 | |
|
7bbb443a0f | |
|
b7fa3cd01d | |
|
4f18830d51 | |
|
087e946e1a | |
|
8a9a4b8c96 | |
|
8806d180c2 | |
|
036a482bc9 | |
|
94d55a5f7b | |
|
11e7560180 | |
|
c5eecc6c4c | |
|
9eac8fc5c5 | |
|
15883dce79 | |
|
d6376c48f6 | |
|
7df0c2fcbc | |
|
6ebeb83f1d | |
|
2af23c885b | |
|
b37a3eb264 | |
|
eb153611ff | |
|
6f86a9852f | |
|
770b76797f | |
|
b98a5ffc16 | |
|
f32d6cd542 | |
|
34115f8aa0 | |
|
d8050d79bc | |
|
53b0f037c7 | |
|
eea2dcd400 | |
|
d110d05b4b | |
|
28581fc17e | |
|
5d8f61a690 | |
|
79a1375afe | |
|
deec4b7fbd | |
|
6ad982c932 | |
|
454e70ce15 | |
|
6ab7e2eb78 | |
|
99121ebf42 | |
|
8bc75e5b58 | |
|
9f0ec719a4 | |
|
bf23616cf0 | |
|
0a9e1630a3 | |
|
9cdcc284ea | |
|
906a28f6a9 | |
|
88d4d5b836 | |
|
e57ff80356 | |
|
13c1dd8730 | |
|
46cb6059af | |
|
c6ce144445 | |
|
41630404f3 | |
|
ea764b4ef7 | |
|
24494f3c06 | |
|
397324d76e | |
|
2291b74a2d | |
|
6a6a912b41 | |
|
51a38bdc1a | |
|
1c79d5db69 | |
|
8c720f0c0d | |
|
9211cd4d41 | |
|
f8ac5394ce | |
|
27e1d0f17a | |
|
cd8b7dc4e2 | |
|
5d95cfde3b | |
|
130af548b5 | |
|
c897c97623 | |
|
420df58223 | |
|
3eba409c62 | |
|
20ebe030c3 | |
|
d4f4169873 | |
|
a9921418f6 | |
|
700f1fbfca | |
|
50763a264d | |
|
27a9a16b80 | |
|
bc5657f108 | |
|
e64e081bc5 | |
|
260c306ab4 | |
|
46fb73a1dc | |
|
986d7c875a | |
|
69a9fe0b9b | |
|
6771ca4848 | |
|
46acf7e536 | |
|
71ddfb7be0 | |
|
55ce673590 | |
|
ac1c7b5463 | |
|
3a1973872f | |
|
e51dcfb60b | |
|
6eebb82f0d | |
|
5819634304 | |
|
66feee1483 | |
|
8ad920634c | |
|
1b92813df4 | |
|
99584890b4 | |
|
01cd259f54 | |
|
6cbf801235 | |
|
18f10c1e00 | |
|
da002f58d8 | |
|
75f90f2dc7 | |
|
c450973f2b | |
|
9cc45e2a24 | |
|
25ad4c2c26 | |
|
22dc4e06f6 | |
|
43d6fbd747 | |
|
25af21c515 | |
|
cc3a9f5d10 | |
|
87a67e3aa0 | |
|
dd125d4ef1 | |
|
b766a060cf | |
|
afc3eafae5 | |
|
f5df60f4c2 | |
|
f1a44d89cf | |
|
4bc861d097 | |
|
7c28f52f93 | |
|
46e19bfe4e | |
|
cf68c8f9c7 | |
|
6a99f2e925 | |
|
1ead15ee80 | |
|
15cb8d163d | |
|
3713acbb33 | |
|
256bb7c142 | |
|
bf1d3832aa | |
|
c604166e31 | |
|
9691d2b264 | |
|
981ec32278 | |
|
ef9d3ac0be | |
|
6651816743 | |
|
3ac3fcdb4e | |
|
93e21d05e2 | |
|
3e92831089 | |
|
1de2160986 | |
|
358e3157ce | |
|
05b97efa12 | |
|
4578a1a211 | |
|
29888c3ce3 | |
|
8657345226 | |
|
71ef17fd6c | |
|
6c6db404af | |
|
00f627fbb9 | |
|
3e43170446 | |
|
4a1b362ca5 | |
|
9148a69e87 | |
|
204ac56883 | |
|
a8c5030035 | |
|
ea396b5f2b | |
|
dc91330f6a | |
|
c107f2bba5 | |
|
ecb572a945 | |
|
6c7c0c1ffd | |
|
3d138309b9 | |
|
13dba1751c | |
|
d97c4b22df | |
|
460797ba4a | |
|
a6a54e8806 | |
|
365c3d1d0c | |
|
63c7d13622 | |
|
fd0f93a94d | |
|
49b271f75a | |
|
19cb11766d | |
|
8b31ea0140 | |
|
5d4f6f1b80 | |
|
7d475d181c | |
|
10f98da57e | |
|
8a954bc021 | |
|
005a42b9af | |
|
9bcecb96c8 | |
|
52334dec72 | |
|
21422e5419 | |
|
81d42aacef | |
|
27d00f486c | |
|
db597b1acd | |
|
55eb65255e | |
|
c9ee74c39b | |
|
7b6996469b | |
|
e713b51bd6 | |
|
2ca5b44652 | |
|
5e1fc195a3 | |
|
8251159ba3 | |
|
0c522556c5 | |
|
94ae175e94 | |
|
63309979ba | |
|
e95e35c94e | |
|
1687d46e68 | |
|
4a233bf7df | |
|
aa1d413ea3 | |
|
8892f21919 | |
|
52cd68a498 | |
|
72c2f93c7c | |
|
f90590b90f | |
|
696af986ed | |
|
a226478f53 | |
|
15295c7b1b | |
|
8da9a7b4af | |
|
370c8eb78e | |
|
2bbe859154 | |
|
990ab04d85 | |
|
455d29039b | |
|
10bb546f9e | |
|
5268053d1e | |
|
dc57f7c089 | |
|
7b5e10156e | |
|
4aa465764c | |
|
1f65569b0d | |
|
71d3595cb7 | |
|
105429c31e | |
|
9a5e3d9f3d | |
|
9937f8f308 | |
|
e34783615d | |
|
9043687eb7 | |
|
003e6cd67c | |
|
f04fd5b231 | |
|
29d9088a99 | |
|
4400ed9b90 | |
|
0f5fe4254e | |
|
de4a5b7090 | |
|
ae3a367a0e | |
|
0a9528b202 | |
|
0d35a21caf | |
|
214215f320 | |
|
bac35046fb | |
|
bcbc466e4d | |
|
780e68f6d2 | |
|
8f19fae91b | |
|
96ec0cf67c | |
|
a3242f4f05 | |
|
bd363cdeac | |
|
33871fa816 | |
|
bef1f89a76 | |
|
6f4577ec8c | |
|
4d294562e5 | |
|
f1d00648d4 | |
|
e1e1c32e32 | |
|
241ad7af1e | |
|
10c2a3514e | |
|
738d7dd16d | |
|
57519980c4 | |
|
3e9d11b732 | |
|
173a4bde19 | |
|
24f68f98e2 | |
|
5e7a559aa8 | |
|
233d5c6e4d | |
|
4307e441d5 | |
|
004418d101 | |
|
0bd0b12c67 | |
|
5433dfaab4 | |
|
0d53a9b741 | |
|
7951dbce46 | |
|
3c7689f68c | |
|
481f8db116 | |
|
18ed036502 | |
|
b806889485 | |
|
b6452f20bc | |
|
3fa4bab088 | |
|
685a12bd11 | |
|
f96ed67821 | |
|
90eabc6a4d | |
|
9cac6a49d1 | |
|
7115527077 | |
|
2aba67154c | |
|
02e3d19449 | |
|
46ace331ba | |
|
830864830a | |
|
a58d346c09 | |
|
06dc9355d1 | |
|
a652aef200 | |
|
c599dc631f | |
|
83a2b64e9d | |
|
a946d3d7c9 | |
|
e89352182e | |
|
215f1054ba | |
|
0fd973a45e | |
|
ba945068fc | |
|
e97c112cb2 | |
|
0b16bbdee1 | |
|
2e50af6d12 | |
|
4e03407238 | |
|
21316a6843 | |
|
d3d76aa11d | |
|
c5789ae3d5 | |
|
ee21995455 | |
|
07358b982d | |
|
ef38fcc917 | |
|
cf063d39b5 | |
|
ec8b605db6 | |
|
ee6f436ef4 | |
|
4b98746f36 | |
|
91d20d533e | |
|
c76a771248 | |
|
158e2811c7 | |
|
0ebb2403f3 | |
|
bd34c39dc4 | |
|
b2fd8a6d8a | |
|
81d27d488a | |
|
2f003e3524 | |
|
4af3163322 | |
|
cc430980d2 | |
|
5735b8ae19 | |
|
9c0357a6f2 | |
|
428802560a | |
|
0a34bf5d3a | |
|
ed621282b5 | |
|
72665b3d1c | |
|
a4d2d6c6e8 | |
|
047e6ad5c8 | |
|
6214833a80 | |
|
4ea61cbc7a | |
|
1969f47f62 | |
|
c34f019bc0 | |
|
b62defb98b | |
|
cc4ac5fa01 | |
|
54fe60ed4d | |
|
d37a07b908 | |
|
b3e4e51513 | |
|
2ad8eb72f2 | |
|
c5f1d25912 | |
|
793c538728 | |
|
4f58055eeb | |
|
68c7d1a84e | |
|
7a98be012e | |
|
55376555a6 | |
|
2837c59d95 | |
|
aa479c92d7 | |
|
1ffc13fc0d | |
|
61b5645bc0 | |
|
6b6e02d3d0 | |
|
073c50f7f8 | |
|
edfda70d9e | |
|
b50491c7b0 | |
|
edcb1f2eaf | |
|
3792af29c0 | |
|
cac225e7a3 | |
|
b9c373435f | |
|
0bee380482 | |
|
f5a6533afa | |
|
43da9b67a7 | |
|
373318b716 | |
|
1aa6f39ad4 | |
|
ac7c5abc24 | |
|
d55fdb24bf | |
|
0b4aae5197 | |
|
f042779d6e | |
|
aae2e8f2a7 | |
|
f4ee7f6423 | |
|
6f7415e481 | |
|
ea72adf17e | |
|
2a7d53fd22 | |
|
6cacce1331 | |
|
8d8e29e3a1 | |
|
71aafda3d3 | |
|
05bd1fb003 | |
|
4f98ba196d | |
|
b8db30c2fb | |
|
c47cb7083c | |
|
4f13cabcb4 | |
|
ad6d6c9871 | |
|
cf115af954 | |
|
3fbacf0d0f | |
|
b66b44621e | |
|
bab77cd338 | |
|
055affac85 | |
|
66cf05c014 | |
|
6ef6f0f9a8 | |
|
4ceccf68cf | |
|
ce71abf7ed | |
|
1c3006af5e | |
|
123420974c | |
|
a769eb1547 | |
|
ca812b36a3 | |
|
65c14d5526 | |
|
b7b30d484a | |
|
3314cf932d | |
|
47402af89e | |
|
2191145b61 | |
|
08e72509a3 | |
|
e359469da4 | |
|
9fba59bbf7 | |
|
10e2339cf2 | |
|
c87e68c01d | |
|
39a5851054 | |
|
fe761bffa6 | |
|
dc77991907 | |
|
78c8173b97 | |
|
95acfef302 | |
|
0f3afebc6a | |
|
1f8f2adf63 | |
|
4d947c7b66 | |
|
14e634746c | |
|
03c290b188 | |
|
3cdfdcc049 | |
|
1e962173a4 | |
|
d7dadcbdcb | |
|
e94bd58774 | |
|
3ba0a7b6e0 | |
|
53849fa60c | |
|
6324acbf6e | |
|
9d70c9095b | |
|
e2bba56680 | |
|
dffe7ac57b | |
|
694cce8ba9 | |
|
1899678640 | |
|
3c1d3f043e | |
|
b1265b279c | |
|
3068ddf5c7 | |
|
47f07e529d | |
|
e02272a4f1 | |
|
a142467cde | |
|
dcaac6df9c | |
|
8e980c074e | |
|
a6a1e1c787 | |
|
6645e94627 | |
|
3392d4f312 | |
|
86eb5513d1 | |
|
f4f67f5994 | |
|
bd15f93bdb | |
|
d8f60fe19e | |
|
86ee2b723a | |
|
133fc60ad9 | |
|
29b611df84 | |
|
e27059ea48 | |
|
d676e072e3 | |
|
ec8b0dac2e | |
|
108abb0c4e | |
|
2d5481da70 | |
|
3291baee04 | |
|
c50040a07c | |
|
26c8bbdeee | |
|
83ce5dd13c | |
|
3c187264cd | |
|
131e95433c | |
|
1d42eb55ea | |
|
cdb90ec4d5 | |
|
49a0c57c79 | |
|
7851e31b43 | |
|
bbcb221106 | |
|
ff3bdd10cb | |
|
f6064ee8e3 | |
|
0b3c289d62 | |
|
9e9f3659f9 | |
|
64ca097c1e | |
|
03b8aeb69f | |
|
9559204f61 | |
|
03e6b2797d | |
|
7e8c41d175 | |
|
c24634507b | |
|
1c26181efd | |
|
1acafd838e | |
|
5b08a02227 | |
|
a3bcef81bf | |
|
abf3e443bf | |
|
5245a5b934 | |
|
3d94088504 | |
|
9d450f8568 | |
|
f8a68efe63 | |
|
eb7a849ea1 | |
|
041e9d4028 | |
|
955d0cab50 | |
|
793bf445ec | |
|
8117b14dca | |
|
c8e87769f4 | |
|
e179f01240 | |
|
c5706a6535 | |
|
ca860369b8 | |
|
8ebead10c3 | |
|
5fb97bf03b | |
|
9cb48ea0c7 | |
|
b8719d3c87 | |
|
082e230b92 | |
|
027795a97c | |
|
9f296c031b | |
|
31f561693d | |
|
97dd5fe4ee | |
|
44711f669a | |
|
ea52310b69 | |
|
37de8cafda | |
|
0ddb3d32f6 | |
|
391de4f160 | |
|
adda3d40bb | |
|
b36e3879a2 | |
|
57580b9a70 | |
|
668ef5ae26 | |
|
0fc79f75f5 | |
|
825f356797 | |
|
06504c2a4c | |
|
4b236492c0 | |
|
d7c325abf7 | |
|
3156829a20 | |
|
e04536d580 | |
|
5504fc5e1d | |
|
a6437956ce | |
|
64a921ca46 | |
|
8e26139039 | |
|
e7811b86fa | |
|
5cd491a5a1 | |
|
221148d668 | |
|
83b693cb64 | |
|
72206c17aa | |
|
415a60f846 | |
|
3d9f1df73c | |
|
66d5e4a5f3 | |
|
65177a2f17 | |
|
8c1671f0e9 | |
|
b676bb91ef | |
|
a587c552af | |
|
87b234f794 | |
|
823bc9d59b | |
|
8530d99795 | |
|
998e45a5d3 | |
|
c5de5c91a3 | |
|
93d1e33d1e | |
|
57312afca1 | |
|
100be41fdb | |
|
5651d9d92d | |
|
cd48b2f8f4 | |
|
40f0c9b312 | |
|
b8851df26f | |
|
0d46bfa691 | |
|
b9bd71c1ab | |
|
e8a3df63b8 | |
|
39882551f7 | |
|
dfb13fb81b | |
|
3e4adddc3a | |
|
989067050a | |
|
03e2795c9f | |
|
b6015193ef | |
|
da7f5817e5 | |
|
db889a1a80 | |
|
e097cb47ef | |
|
3fc72cfbbb | |
|
9ba9113ec9 | |
|
391174612b | |
|
b48163a6fb | |
|
6e8a5483c6 | |
|
dc3491b15e | |
|
39890d7729 | |
|
47fad26c3e | |
|
b45735d63f | |
|
ce01f0210f | |
|
38facfc3dd | |
|
697254c508 | |
|
451df430df | |
|
eeb2ccb61b | |
|
2e770b4a57 | |
|
2db8e36f28 | |
|
b7be1dd3e5 | |
|
883fa4f6a7 | |
|
fd02293651 | |
|
68f75a7299 | |
|
833af67cbd | |
|
b4d8de06d9 | |
|
7943e68bc8 | |
|
2edd0261b5 | |
|
4c98e459f1 | |
|
11740d1398 | |
|
f335e9db73 | |
|
c191b6dcad | |
|
d919930546 | |
|
bd74115003 | |
|
dd6f11b10e | |
|
9d44562d0e | |
|
f544d94fe7 | |
|
024b8d2345 | |
|
2b62a7d6df | |
|
470ced91f5 | |
|
e76466f3b9 | |
|
6bf126af78 | |
|
a5b765b9f9 | |
|
f1a1bab379 | |
|
198211d072 | |
|
a249ca9290 | |
|
d4820d773c | |
|
da04e81a6d | |
|
e26ef60838 | |
|
7f6a0b1460 | |
|
9337c39b36 | |
|
bc83411e2e |
|
@ -7,6 +7,7 @@ updates:
|
|||
open-pull-requests-limit: 0 # setting this to 0 means only allowing security updates, see https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
|
||||
labels:
|
||||
- "area/vertical-pod-autoscaler"
|
||||
- "release-note-none"
|
||||
- package-ecosystem: docker
|
||||
directory: "/vertical-pod-autoscaler/pkg/recommender"
|
||||
schedule:
|
||||
|
@ -17,6 +18,7 @@ updates:
|
|||
open-pull-requests-limit: 3
|
||||
labels:
|
||||
- "area/vertical-pod-autoscaler"
|
||||
- "release-note-none"
|
||||
- package-ecosystem: docker
|
||||
directory: "/vertical-pod-autoscaler/pkg/updater"
|
||||
schedule:
|
||||
|
@ -27,6 +29,7 @@ updates:
|
|||
open-pull-requests-limit: 3
|
||||
labels:
|
||||
- "area/vertical-pod-autoscaler"
|
||||
- "release-note-none"
|
||||
- package-ecosystem: docker
|
||||
directory: "/vertical-pod-autoscaler/pkg/admission-controller"
|
||||
schedule:
|
||||
|
@ -37,9 +40,20 @@ updates:
|
|||
open-pull-requests-limit: 3
|
||||
labels:
|
||||
- "area/vertical-pod-autoscaler"
|
||||
- "release-note-none"
|
||||
- package-ecosystem: gomod
|
||||
directory: "/addon-resizer"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "addon-resizer-release-1.8"
|
||||
open-pull-requests-limit: 3
|
||||
labels:
|
||||
- "release-note-none"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 3
|
||||
labels:
|
||||
- "area/dependency"
|
||||
- "release-note-none"
|
||||
|
|
|
@ -9,20 +9,25 @@ env:
|
|||
|
||||
permissions:
|
||||
contents: read
|
||||
checks: write
|
||||
|
||||
jobs:
|
||||
test-and-verify:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.22.2'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4.2.2
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/k8s.io/autoscaler
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5.5.0
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
cache-dependency-path: |
|
||||
${{ env.GOPATH}}/src/k8s.io/autoscaler/cluster-autoscaler/go.sum
|
||||
${{ env.GOPATH}}/src/k8s.io/autoscaler/vertical-pod-autoscaler/go.sum
|
||||
${{ env.GOPATH}}/src/k8s.io/autoscaler/vertical-pod-autoscaler/e2e/go.sum
|
||||
|
||||
- name: Apt-get
|
||||
run: sudo apt-get install libseccomp-dev -qq
|
||||
|
||||
|
@ -38,6 +43,12 @@ jobs:
|
|||
env:
|
||||
GO111MODULE: auto
|
||||
|
||||
- name: golangci-lint - vertical-pod-autoscaler
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
args: --timeout=30m
|
||||
working-directory: ${{ env.GOPATH }}/src/k8s.io/autoscaler/vertical-pod-autoscaler
|
||||
|
||||
- name: Test
|
||||
working-directory: ${{ env.GOPATH }}/src/k8s.io/autoscaler
|
||||
run: hack/for-go-proj.sh test
|
||||
|
|
|
@ -11,9 +11,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4.2.2
|
||||
- id: filter
|
||||
uses: dorny/paths-filter@v2.2.0
|
||||
uses: dorny/paths-filter@v2.11.1
|
||||
with:
|
||||
filters: |
|
||||
charts:
|
||||
|
@ -28,11 +28,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Fetch history
|
||||
run: git fetch --prune --unshallow
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.0.1
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint
|
||||
# Only build a kind cluster if there are chart changes to test.
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
fi
|
||||
- if: steps.list-changed.outputs.changed == 'true'
|
||||
name: Create kind cluster
|
||||
uses: helm/kind-action@v1.1.0
|
||||
uses: helm/kind-action@v1.12.0
|
||||
- if: steps.list-changed.outputs.changed == 'true'
|
||||
name: Run chart-testing (install)
|
||||
run: ct install
|
||||
|
@ -57,7 +57,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Run helm-docs
|
||||
uses: docker://jnorwood/helm-docs:v1.3.0
|
||||
- name: Check for changes
|
||||
|
|
|
@ -8,7 +8,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
|||
git config user.email "${GITHUB_ACTOR}@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
|
@ -26,7 +26,7 @@ jobs:
|
|||
CR_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CR_RELEASE_NAME_TEMPLATE: "cluster-autoscaler-chart-{{ .Version }}"
|
||||
name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
name: Release Charts
|
||||
on:
|
||||
push:
|
||||
|
|
|
@ -13,3 +13,10 @@ repos:
|
|||
files: (README\.md\.gotmpl|(Chart|requirements|values)\.yaml)$
|
||||
repo: https://github.com/norwoodj/helm-docs
|
||||
rev: v1.3.0
|
||||
- hooks:
|
||||
- id : update-flags
|
||||
name: Update Cluster-Autoscaler Flags Table
|
||||
entry: bash cluster-autoscaler/hack/update-faq-flags.sh
|
||||
language: system
|
||||
files: cluster-autoscaler/main\.go
|
||||
repo: local
|
||||
|
|
|
@ -23,7 +23,7 @@ We'd love to accept your patches! Before we can take them, we have to jump a cou
|
|||
|
||||
All changes must be code reviewed. Coding conventions and standards are explained in the official
|
||||
[developer docs](https://github.com/kubernetes/community/tree/master/contributors/devel). Expect
|
||||
reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments)
|
||||
reviewers to request that you avoid common [go style mistakes](https://go.dev/wiki/CodeReviewComments)
|
||||
in your PRs.
|
||||
|
||||
### Merge Approval
|
||||
|
|
10
OWNERS
10
OWNERS
|
@ -1,10 +1,8 @@
|
|||
approvers:
|
||||
- mwielgus
|
||||
- maciekpytel
|
||||
- gjtempleton
|
||||
- sig-autoscaling-leads
|
||||
reviewers:
|
||||
- mwielgus
|
||||
- maciekpytel
|
||||
- gjtempleton
|
||||
- sig-autoscaling-leads
|
||||
emeritus_approvers:
|
||||
- bskiba # 2022-09-30
|
||||
- mwielgus
|
||||
- maciekpytel
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
aliases:
|
||||
sig-autoscaling-leads:
|
||||
- gjtempleton
|
||||
- jackfrancis
|
||||
- raywainman
|
||||
- towca
|
||||
sig-autoscaling-vpa-approvers:
|
||||
- kwiesmueller
|
||||
- jbartosik
|
||||
- voelzmo
|
||||
- raywainman
|
||||
- adrianmoisey
|
||||
- omerap12
|
||||
sig-autoscaling-vpa-reviewers:
|
||||
- kwiesmueller
|
||||
- jbartosik
|
||||
- voelzmo
|
||||
- raywainman
|
||||
- adrianmoisey
|
||||
- omerap12
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.23.2
|
||||
FROM golang:1.24.0
|
||||
|
||||
ENV GOPATH /gopath/
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
approvers:
|
||||
- gjtempleton
|
||||
- jackfrancis
|
||||
reviewers:
|
||||
- gjtempleton
|
||||
- Shubham82
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v2
|
||||
appVersion: 1.31.0
|
||||
appVersion: 1.33.0
|
||||
description: Scales Kubernetes worker nodes within autoscaling groups.
|
||||
engine: gotpl
|
||||
home: https://github.com/kubernetes/autoscaler
|
||||
|
@ -11,4 +11,4 @@ name: cluster-autoscaler
|
|||
sources:
|
||||
- https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler
|
||||
type: application
|
||||
version: 9.43.2
|
||||
version: 9.48.0
|
||||
|
|
|
@ -75,10 +75,11 @@ To create a valid configuration, follow instructions for your cloud provider:
|
|||
- [Cluster API](#cluster-api)
|
||||
- [Exoscale](#exoscale)
|
||||
- [Hetzner Cloud](#hetzner-cloud)
|
||||
- [Civo](#civo)
|
||||
|
||||
### Templating the autoDiscovery.clusterName
|
||||
|
||||
The cluster name can be templated in the `autoDiscovery.clusterName` variable. This is useful when the cluster name is dynamically generated based on other values coming from external systems like Argo CD or Flux. This also allows you to use global Helm values to set the cluster name, e.g., `autoDiscovery.clusterName=\{\{ .Values.global.clusterName }}`, so that you don't need to set it in more than 1 location in the values file.
|
||||
The cluster name can be templated in the `autoDiscovery.clusterName` variable. This is useful when the cluster name is dynamically generated based on other values coming from external systems like Argo CD or Flux. This also allows you to use global Helm values to set the cluster name, e.g., `autoDiscovery.clusterName={{ .Values.global.clusterName }}`, so that you don't need to set it in more than 1 location in the values file.
|
||||
|
||||
### AWS - Using auto-discovery of tagged instance groups
|
||||
|
||||
|
@ -182,11 +183,13 @@ $ helm install my-release autoscaler/cluster-autoscaler \
|
|||
|
||||
Note that `your-ig-prefix` should be a _prefix_ matching one or more MIGs, and _not_ the full name of the MIG. For example, to match multiple instance groups - `k8s-node-group-a-standard`, `k8s-node-group-b-gpu`, you would use a prefix of `k8s-node-group-`.
|
||||
|
||||
Prefixes will be rendered using `tpl` function so you can use any value of your choice if that's a valid prefix. For instance (ignore escaping characters): `gke-{{ .Values.autoDiscovery.clusterName }}`
|
||||
|
||||
In the event you want to explicitly specify MIGs instead of using auto-discovery, set members of the `autoscalingGroups` array directly - e.g.
|
||||
|
||||
```
|
||||
# where 'n' is the index, starting at 0
|
||||
--set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroupManagers/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE
|
||||
--set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroups/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE
|
||||
```
|
||||
|
||||
### Azure
|
||||
|
@ -282,6 +285,23 @@ Each autoscaling group requires an additional `instanceType` and `region` key to
|
|||
|
||||
Read [cluster-autoscaler/cloudprovider/hetzner/README.md](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/hetzner/README.md) for further information on the setup without helm.
|
||||
|
||||
### Civo
|
||||
|
||||
The following parameters are required:
|
||||
|
||||
- `cloudProvider=civo`
|
||||
- `autoscalingGroups=...`
|
||||
|
||||
When installing the helm chart to the namespace `kube-system`, you can set `secretKeyRefNameOverride` to `civo-api-access`.
|
||||
Otherwise specify the following parameters:
|
||||
|
||||
- `civoApiUrl=https://api.civo.com`
|
||||
- `civoApiKey=...`
|
||||
- `civoClusterID=...`
|
||||
- `civoRegion=...`
|
||||
|
||||
Read [cluster-autoscaler/cloudprovider/civo/README.md](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/civo/README.md) for further information on the setup without helm.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall `my-release`:
|
||||
|
@ -308,7 +328,14 @@ For Kubernetes clusters that use Amazon EKS, the service account can be configur
|
|||
|
||||
In order to accomplish this, you will first need to create a new IAM role with the above mentions policies. Take care in [configuring the trust relationship](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html#iam-role-configuration) to restrict access just to the service account used by cluster autoscaler.
|
||||
|
||||
Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing.
|
||||
Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing. Alternatively, you can embed templates in values (ignore escaping characters):
|
||||
|
||||
```yaml
|
||||
rbac:
|
||||
serviceAccount:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: "{{ .Values.aws.myroleARN }}"
|
||||
```
|
||||
|
||||
### Azure - Using azure workload identity
|
||||
|
||||
|
@ -340,6 +367,14 @@ extraVolumeMounts:
|
|||
readOnly: true
|
||||
```
|
||||
|
||||
### Custom arguments
|
||||
|
||||
You can use the `customArgs` value to give any argument to cluster autoscaler command.
|
||||
|
||||
Typical use case is to give an environment variable as an argument which will be interpolated at execution time.
|
||||
|
||||
This is helpful when you need to inject values from configmap or secret.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
The chart will succeed even if the container arguments are incorrect. A few minutes after starting `kubectl logs -l "app=aws-cluster-autoscaler" --tail=50` should loop through something like
|
||||
|
@ -402,7 +437,7 @@ vpa:
|
|||
| autoscalingGroups | list | `[]` | For AWS, Azure AKS, Exoscale or Magnum. At least one element is required if not using `autoDiscovery`. For example: <pre> - name: asg1<br /> maxSize: 2<br /> minSize: 1 </pre> For Hetzner Cloud, the `instanceType` and `region` keys are also required. <pre> - name: mypool<br /> maxSize: 2<br /> minSize: 1<br /> instanceType: CPX21<br /> region: FSN1 </pre> |
|
||||
| autoscalingGroupsnamePrefix | list | `[]` | For GCE. At least one element is required if not using `autoDiscovery`. For example: <pre> - name: ig01<br /> maxSize: 10<br /> minSize: 0 </pre> |
|
||||
| awsAccessKeyID | string | `""` | AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) |
|
||||
| awsRegion | string | `"us-east-1"` | AWS region (required if `cloudProvider=aws`) |
|
||||
| awsRegion | string | `""` | AWS region (required if `cloudProvider=aws`) |
|
||||
| awsSecretAccessKey | string | `""` | AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) |
|
||||
| azureClientID | string | `""` | Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. Required if `cloudProvider=azure` |
|
||||
| azureClientSecret | string | `""` | Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. Required if `cloudProvider=azure` |
|
||||
|
@ -412,15 +447,21 @@ vpa:
|
|||
| azureTenantID | string | `""` | Azure tenant where the resources are located. Required if `cloudProvider=azure` |
|
||||
| azureUseManagedIdentityExtension | bool | `false` | Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID, resource group, and azure AKS cluster name are set. You can only use one authentication method at a time, either azureUseWorkloadIdentityExtension or azureUseManagedIdentityExtension should be set. |
|
||||
| azureUseWorkloadIdentityExtension | bool | `false` | Whether to use Azure's workload identity extension for credentials. See the project here: https://github.com/Azure/azure-workload-identity for more details. You can only use one authentication method at a time, either azureUseWorkloadIdentityExtension or azureUseManagedIdentityExtension should be set. |
|
||||
| azureUserAssignedIdentityID | string | `""` | When vmss has multiple user assigned identity assigned, azureUserAssignedIdentityID specifies which identity to be used |
|
||||
| azureVMType | string | `"vmss"` | Azure VM type. |
|
||||
| civoApiKey | string | `""` | API key for the Civo API. Required if `cloudProvider=civo` |
|
||||
| civoApiUrl | string | `"https://api.civo.com"` | URL for the Civo API. Required if `cloudProvider=civo` |
|
||||
| civoClusterID | string | `""` | Cluster ID for the Civo cluster. Required if `cloudProvider=civo` |
|
||||
| civoRegion | string | `""` | Region for the Civo cluster. Required if `cloudProvider=civo` |
|
||||
| cloudConfigPath | string | `""` | Configuration file for cloud provider. |
|
||||
| cloudProvider | string | `"aws"` | The cloud provider where the autoscaler runs. Currently only `gce`, `aws`, `azure`, `magnum` and `clusterapi` are supported. `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. `magnum` for OpenStack Magnum, `clusterapi` for Cluster API. |
|
||||
| cloudProvider | string | `"aws"` | The cloud provider where the autoscaler runs. Currently only `gce`, `aws`, `azure`, `magnum`, `clusterapi` and `civo` are supported. `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. `magnum` for OpenStack Magnum, `clusterapi` for Cluster API. `civo` for Civo Cloud. |
|
||||
| clusterAPICloudConfigPath | string | `"/etc/kubernetes/mgmt-kubeconfig"` | Path to kubeconfig for connecting to Cluster API Management Cluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or incluster-kubeconfig` |
|
||||
| clusterAPIConfigMapsNamespace | string | `""` | Namespace on the workload cluster to store Leader election and status configmaps |
|
||||
| clusterAPIKubeconfigSecret | string | `""` | Secret containing kubeconfig for connecting to Cluster API managed workloadcluster Required if `cloudProvider=clusterapi` and `clusterAPIMode=kubeconfig-kubeconfig,kubeconfig-incluster or incluster-kubeconfig` |
|
||||
| clusterAPIMode | string | `"incluster-incluster"` | Cluster API mode, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#connecting-cluster-autoscaler-to-cluster-api-management-and-workload-clusters Syntax: workloadClusterMode-ManagementClusterMode for `kubeconfig-kubeconfig`, `incluster-kubeconfig` and `single-kubeconfig` you always must mount the external kubeconfig using either `extraVolumeSecrets` or `extraMounts` and `extraVolumes` if you dont set `clusterAPIKubeconfigSecret`and thus use an in-cluster config or want to use a non capi generated kubeconfig you must do so for the workload kubeconfig as well |
|
||||
| clusterAPIWorkloadKubeconfigPath | string | `"/etc/kubernetes/value"` | Path to kubeconfig for connecting to Cluster API managed workloadcluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or kubeconfig-incluster` |
|
||||
| containerSecurityContext | object | `{}` | [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) |
|
||||
| customArgs | list | `[]` | Additional custom container arguments. Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler parameters and their default values. List of arguments as strings. |
|
||||
| deployment.annotations | object | `{}` | Annotations to add to the Deployment object. |
|
||||
| dnsPolicy | string | `"ClusterFirst"` | Defaults to `ClusterFirst`. Valid values are: `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. |
|
||||
| envFromConfigMap | string | `""` | ConfigMap name to use as envFrom. |
|
||||
|
@ -439,7 +480,7 @@ vpa:
|
|||
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
||||
| image.pullSecrets | list | `[]` | Image pull secrets |
|
||||
| image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | Image repository |
|
||||
| image.tag | string | `"v1.31.0"` | Image tag |
|
||||
| image.tag | string | `"v1.33.0"` | Image tag |
|
||||
| initContainers | list | `[]` | Any additional init containers. |
|
||||
| kubeTargetVersionOverride | string | `""` | Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. |
|
||||
| kwokConfigMapName | string | `"kwok-provider-config"` | configmap for configuring kwok provider |
|
||||
|
@ -457,6 +498,7 @@ vpa:
|
|||
| prometheusRule.interval | string | `nil` | How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). |
|
||||
| prometheusRule.namespace | string | `"monitoring"` | Namespace which Prometheus is running in. |
|
||||
| prometheusRule.rules | list | `[]` | Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). |
|
||||
| rbac.additionalRules | list | `[]` | Additional rules for role/clusterrole |
|
||||
| rbac.clusterScoped | bool | `true` | if set to false will only provision RBAC to alter resources in the current namespace. Most useful for Cluster-API |
|
||||
| rbac.create | bool | `true` | If `true`, create and use RBAC resources. |
|
||||
| rbac.pspEnabled | bool | `false` | If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. Must be used with `rbac.create` set to `true`. |
|
||||
|
@ -467,7 +509,7 @@ vpa:
|
|||
| replicaCount | int | `1` | Desired number of pods |
|
||||
| resources | object | `{}` | Pod resource requests and limits. |
|
||||
| revisionHistoryLimit | int | `10` | The number of revisions to keep. |
|
||||
| secretKeyRefNameOverride | string | `""` | Overrides the name of the Secret to use when loading the secretKeyRef for AWS and Azure env variables |
|
||||
| secretKeyRefNameOverride | string | `""` | Overrides the name of the Secret to use when loading the secretKeyRef for AWS, Azure and Civo env variables |
|
||||
| securityContext | object | `{}` | [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) |
|
||||
| service.annotations | object | `{}` | Annotations to add to service |
|
||||
| service.clusterIP | string | `""` | IP address to assign to service |
|
||||
|
|
|
@ -75,10 +75,11 @@ To create a valid configuration, follow instructions for your cloud provider:
|
|||
- [Cluster API](#cluster-api)
|
||||
- [Exoscale](#exoscale)
|
||||
- [Hetzner Cloud](#hetzner-cloud)
|
||||
- [Civo](#civo)
|
||||
|
||||
### Templating the autoDiscovery.clusterName
|
||||
|
||||
The cluster name can be templated in the `autoDiscovery.clusterName` variable. This is useful when the cluster name is dynamically generated based on other values coming from external systems like Argo CD or Flux. This also allows you to use global Helm values to set the cluster name, e.g., `autoDiscovery.clusterName=\{\{ .Values.global.clusterName }}`, so that you don't need to set it in more than 1 location in the values file.
|
||||
The cluster name can be templated in the `autoDiscovery.clusterName` variable. This is useful when the cluster name is dynamically generated based on other values coming from external systems like Argo CD or Flux. This also allows you to use global Helm values to set the cluster name, e.g., `autoDiscovery.clusterName={{`{{ .Values.global.clusterName }}`}}`, so that you don't need to set it in more than 1 location in the values file.
|
||||
|
||||
### AWS - Using auto-discovery of tagged instance groups
|
||||
|
||||
|
@ -182,11 +183,13 @@ $ helm install my-release autoscaler/cluster-autoscaler \
|
|||
|
||||
Note that `your-ig-prefix` should be a _prefix_ matching one or more MIGs, and _not_ the full name of the MIG. For example, to match multiple instance groups - `k8s-node-group-a-standard`, `k8s-node-group-b-gpu`, you would use a prefix of `k8s-node-group-`.
|
||||
|
||||
Prefixes will be rendered using `tpl` function so you can use any value of your choice if that's a valid prefix. For instance (ignore escaping characters): `gke-{{`{{ .Values.autoDiscovery.clusterName }}`}}`
|
||||
|
||||
In the event you want to explicitly specify MIGs instead of using auto-discovery, set members of the `autoscalingGroups` array directly - e.g.
|
||||
|
||||
```
|
||||
# where 'n' is the index, starting at 0
|
||||
--set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroupManagers/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE
|
||||
--set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroups/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE
|
||||
```
|
||||
|
||||
### Azure
|
||||
|
@ -282,6 +285,23 @@ Each autoscaling group requires an additional `instanceType` and `region` key to
|
|||
|
||||
Read [cluster-autoscaler/cloudprovider/hetzner/README.md](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/hetzner/README.md) for further information on the setup without helm.
|
||||
|
||||
### Civo
|
||||
|
||||
The following parameters are required:
|
||||
|
||||
- `cloudProvider=civo`
|
||||
- `autoscalingGroups=...`
|
||||
|
||||
When installing the helm chart to the namespace `kube-system`, you can set `secretKeyRefNameOverride` to `civo-api-access`.
|
||||
Otherwise specify the following parameters:
|
||||
|
||||
- `civoApiUrl=https://api.civo.com`
|
||||
- `civoApiKey=...`
|
||||
- `civoClusterID=...`
|
||||
- `civoRegion=...`
|
||||
|
||||
Read [cluster-autoscaler/cloudprovider/civo/README.md](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/civo/README.md) for further information on the setup without helm.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall `my-release`:
|
||||
|
@ -308,7 +328,14 @@ For Kubernetes clusters that use Amazon EKS, the service account can be configur
|
|||
|
||||
In order to accomplish this, you will first need to create a new IAM role with the above mentions policies. Take care in [configuring the trust relationship](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html#iam-role-configuration) to restrict access just to the service account used by cluster autoscaler.
|
||||
|
||||
Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing.
|
||||
Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing. Alternatively, you can embed templates in values (ignore escaping characters):
|
||||
|
||||
```yaml
|
||||
rbac:
|
||||
serviceAccount:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: "{{`{{ .Values.aws.myroleARN `}}}}"
|
||||
```
|
||||
|
||||
### Azure - Using azure workload identity
|
||||
|
||||
|
@ -340,6 +367,14 @@ extraVolumeMounts:
|
|||
readOnly: true
|
||||
```
|
||||
|
||||
### Custom arguments
|
||||
|
||||
You can use the `customArgs` value to give any argument to cluster autoscaler command.
|
||||
|
||||
Typical use case is to give an environment variable as an argument which will be interpolated at execution time.
|
||||
|
||||
This is helpful when you need to inject values from configmap or secret.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
The chart will succeed even if the container arguments are incorrect. A few minutes after starting `kubectl logs -l "app=aws-cluster-autoscaler" --tail=50` should loop through something like
|
||||
|
|
|
@ -111,6 +111,7 @@ rules:
|
|||
- csinodes
|
||||
- csidrivers
|
||||
- csistoragecapacities
|
||||
- volumeattachments
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
|
@ -172,4 +173,7 @@ rules:
|
|||
- patch
|
||||
- update
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.additionalRules }}
|
||||
{{ toYaml .Values.rbac.additionalRules | indent 2 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
|
@ -86,7 +86,7 @@ spec:
|
|||
{{- else if eq .Values.cloudProvider "gce" }}
|
||||
{{- if .Values.autoscalingGroupsnamePrefix }}
|
||||
{{- range .Values.autoscalingGroupsnamePrefix }}
|
||||
- --node-group-auto-discovery=mig:namePrefix={{ .name }},min={{ .minSize }},max={{ .maxSize }}
|
||||
- --node-group-auto-discovery=mig:namePrefix={{ tpl .name $ }},min={{ .minSize }},max={{ .maxSize }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.cloudProvider "oci" }}
|
||||
|
@ -103,7 +103,7 @@ spec:
|
|||
- --cluster-name={{ tpl (.Values.magnumClusterName) . }}
|
||||
{{- end }}
|
||||
{{- else if eq .Values.cloudProvider "clusterapi" }}
|
||||
{{- if or .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels .Values.autoDiscovery.namepace }}
|
||||
{{- if or .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels .Values.autoDiscovery.namespace }}
|
||||
- --node-group-auto-discovery=clusterapi:{{ template "cluster-autoscaler.capiAutodiscoveryConfig" . }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.clusterAPIMode "incluster-kubeconfig"}}
|
||||
|
@ -132,6 +132,9 @@ spec:
|
|||
- --{{ $key | mustRegexFind "^[^_]+" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range .Values.customArgs }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
|
@ -141,9 +144,9 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
{{- if and (eq .Values.cloudProvider "aws") (ne .Values.awsRegion "") }}
|
||||
{{- if and (eq .Values.cloudProvider "aws") (ne (tpl .Values.awsRegion $) "") }}
|
||||
- name: AWS_REGION
|
||||
value: "{{ .Values.awsRegion }}"
|
||||
value: "{{ tpl .Values.awsRegion $ }}"
|
||||
{{- if .Values.awsAccessKeyID }}
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
|
@ -182,6 +185,11 @@ spec:
|
|||
{{- else if .Values.azureUseManagedIdentityExtension }}
|
||||
- name: ARM_USE_MANAGED_IDENTITY_EXTENSION
|
||||
value: "true"
|
||||
- name: ARM_USER_ASSIGNED_IDENTITY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: UserAssignedIdentityID
|
||||
name: {{ template "cluster-autoscaler.fullname" . }}
|
||||
{{- else }}
|
||||
- name: ARM_TENANT_ID
|
||||
valueFrom:
|
||||
|
@ -218,6 +226,27 @@ spec:
|
|||
{{- else if eq .Values.cloudProvider "kwok" }}
|
||||
- name: KWOK_PROVIDER_CONFIGMAP
|
||||
value: "{{.Values.kwokConfigMapName | default "kwok-provider-config"}}"
|
||||
{{- else if eq .Values.cloudProvider "civo" }}
|
||||
- name: CIVO_API_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: api-url
|
||||
name: {{ default (include "cluster-autoscaler.fullname" .) .Values.secretKeyRefNameOverride }}
|
||||
- name: CIVO_API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: api-key
|
||||
name: {{ default (include "cluster-autoscaler.fullname" .) .Values.secretKeyRefNameOverride }}
|
||||
- name: CIVO_CLUSTER_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: cluster-id
|
||||
name: {{ default (include "cluster-autoscaler.fullname" .) .Values.secretKeyRefNameOverride }}
|
||||
- name: CIVO_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: region
|
||||
name: {{ default (include "cluster-autoscaler.fullname" .) .Values.secretKeyRefNameOverride }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.extraEnv }}
|
||||
- name: {{ $key }}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
{{- if .Values.podDisruptionBudget -}}
|
||||
apiVersion: {{ template "podDisruptionBudget.apiVersion" . }}
|
||||
{{- if and .Values.podDisruptionBudget.minAvailable .Values.podDisruptionBudget.maxUnavailable }}
|
||||
{{- fail "Only one of podDisruptionBudget.minAvailable or podDisruptionBudget.maxUnavailable should be set." }}
|
||||
{{- end }}apiVersion: {{ template "podDisruptionBudget.apiVersion" . }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -10,7 +12,10 @@ spec:
|
|||
selector:
|
||||
matchLabels:
|
||||
{{ include "cluster-autoscaler.instance-name" . | indent 6 }}
|
||||
{{- if .Values.podDisruptionBudget }}
|
||||
{{ toYaml .Values.podDisruptionBudget | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- if and .Values.podDisruptionBudget.minAvailable (not .Values.podDisruptionBudget.maxUnavailable) }}
|
||||
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if and .Values.podDisruptionBudget.maxUnavailable (not .Values.podDisruptionBudget.minAvailable) }}
|
||||
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
|
@ -83,5 +83,8 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- update
|
||||
{{- if .Values.rbac.additionalRules }}
|
||||
{{ toYaml .Values.rbac.additionalRules | indent 2}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
{{- $isAzure := eq .Values.cloudProvider "azure" }}
|
||||
{{- $isAws := eq .Values.cloudProvider "aws" }}
|
||||
{{- $awsCredentialsProvided := and .Values.awsAccessKeyID .Values.awsSecretAccessKey }}
|
||||
{{- $isCivo := eq .Values.cloudProvider "civo" }}
|
||||
|
||||
{{- if or $isAzure (and $isAws $awsCredentialsProvided) }}
|
||||
{{- if or $isAzure (and $isAws $awsCredentialsProvided) $isCivo }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
@ -17,9 +18,15 @@ data:
|
|||
SubscriptionID: "{{ .Values.azureSubscriptionID | b64enc }}"
|
||||
TenantID: "{{ .Values.azureTenantID | b64enc }}"
|
||||
VMType: "{{ .Values.azureVMType | b64enc }}"
|
||||
UserAssignedIdentityID: "{{ .Values.azureUserAssignedIdentityID | b64enc }}"
|
||||
{{- else if $isAws }}
|
||||
AwsAccessKeyId: "{{ .Values.awsAccessKeyID | b64enc }}"
|
||||
AwsSecretAccessKey: "{{ .Values.awsSecretAccessKey | b64enc }}"
|
||||
{{- else if $isCivo }}
|
||||
api-url: "{{ .Values.civoApiUrl | b64enc }}"
|
||||
api-key: "{{ .Values.civoApiKey | b64enc }}"
|
||||
cluster-id: "{{ .Values.civoClusterID | b64enc }}"
|
||||
region: "{{ .Values.civoRegion | b64enc }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -6,8 +6,12 @@ metadata:
|
|||
{{ include "cluster-autoscaler.labels" . | indent 4 }}
|
||||
name: {{ template "cluster-autoscaler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.rbac.serviceAccount.annotations }}
|
||||
annotations: {{ toYaml .Values.rbac.serviceAccount.annotations | nindent 4 }}
|
||||
|
||||
{{- with .Values.rbac.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- range $k, $v := . }}
|
||||
{{- printf "%s: %s" (tpl $k $) (tpl $v $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.rbac.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
|
|
|
@ -73,7 +73,7 @@ autoscalingGroupsnamePrefix: []
|
|||
awsAccessKeyID: ""
|
||||
|
||||
# awsRegion -- AWS region (required if `cloudProvider=aws`)
|
||||
awsRegion: us-east-1
|
||||
awsRegion: ""
|
||||
|
||||
# awsSecretAccessKey -- AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials))
|
||||
awsSecretAccessKey: ""
|
||||
|
@ -101,6 +101,9 @@ azureTenantID: ""
|
|||
# azureUseManagedIdentityExtension -- Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID, resource group, and azure AKS cluster name are set. You can only use one authentication method at a time, either azureUseWorkloadIdentityExtension or azureUseManagedIdentityExtension should be set.
|
||||
azureUseManagedIdentityExtension: false
|
||||
|
||||
# azureUserAssignedIdentityID -- When vmss has multiple user assigned identity assigned, azureUserAssignedIdentityID specifies which identity to be used
|
||||
azureUserAssignedIdentityID: ""
|
||||
|
||||
# azureUseWorkloadIdentityExtension -- Whether to use Azure's workload identity extension for credentials. See the project here: https://github.com/Azure/azure-workload-identity for more details. You can only use one authentication method at a time, either azureUseWorkloadIdentityExtension or azureUseManagedIdentityExtension should be set.
|
||||
azureUseWorkloadIdentityExtension: false
|
||||
|
||||
|
@ -110,13 +113,30 @@ azureVMType: "vmss"
|
|||
# azureEnableForceDelete -- Whether to force delete VMs or VMSS instances when scaling down.
|
||||
azureEnableForceDelete: false
|
||||
|
||||
# civoApiUrl -- URL for the Civo API.
|
||||
# Required if `cloudProvider=civo`
|
||||
civoApiUrl: "https://api.civo.com"
|
||||
|
||||
# civoApiKey -- API key for the Civo API.
|
||||
# Required if `cloudProvider=civo`
|
||||
civoApiKey: ""
|
||||
|
||||
# civoClusterID -- Cluster ID for the Civo cluster.
|
||||
# Required if `cloudProvider=civo`
|
||||
civoClusterID: ""
|
||||
|
||||
# civoRegion -- Region for the Civo cluster.
|
||||
# Required if `cloudProvider=civo`
|
||||
civoRegion: ""
|
||||
|
||||
# cloudConfigPath -- Configuration file for cloud provider.
|
||||
cloudConfigPath: ""
|
||||
|
||||
# cloudProvider -- The cloud provider where the autoscaler runs.
|
||||
# Currently only `gce`, `aws`, `azure`, `magnum` and `clusterapi` are supported.
|
||||
# Currently only `gce`, `aws`, `azure`, `magnum`, `clusterapi` and `civo` are supported.
|
||||
# `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS.
|
||||
# `magnum` for OpenStack Magnum, `clusterapi` for Cluster API.
|
||||
# `civo` for Civo Cloud.
|
||||
cloudProvider: aws
|
||||
|
||||
# clusterAPICloudConfigPath -- Path to kubeconfig for connecting to Cluster API Management Cluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or incluster-kubeconfig`
|
||||
|
@ -192,10 +212,18 @@ extraArgs:
|
|||
# scale-down-delay-after-delete: 0s
|
||||
# scale-down-delay-after-failure: 3m
|
||||
# scale-down-unneeded-time: 10m
|
||||
# node-deletion-delay-timeout: 2m
|
||||
# node-deletion-batcher-interval: 0s
|
||||
# skip-nodes-with-system-pods: true
|
||||
# balancing-ignore-label_1: first-label-to-ignore
|
||||
# balancing-ignore-label_2: second-label-to-ignore
|
||||
|
||||
# customArgs -- Additional custom container arguments.
|
||||
# Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler
|
||||
# parameters and their default values.
|
||||
# List of arguments as strings.
|
||||
customArgs: []
|
||||
|
||||
# extraEnv -- Additional container environment variables.
|
||||
extraEnv: {}
|
||||
|
||||
|
@ -257,7 +285,7 @@ image:
|
|||
# image.repository -- Image repository
|
||||
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
||||
# image.tag -- Image tag
|
||||
tag: v1.31.0
|
||||
tag: v1.33.0
|
||||
# image.pullPolicy -- Image pull policy
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -338,6 +366,17 @@ rbac:
|
|||
name: ""
|
||||
# rbac.serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account.
|
||||
automountServiceAccountToken: true
|
||||
# rbac.additionalRules -- Additional rules for role/clusterrole
|
||||
additionalRules: []
|
||||
# - apiGroups:
|
||||
# - infrastructure.cluster.x-k8s.io
|
||||
# resources:
|
||||
# - kubemarkmachinetemplates
|
||||
# verbs:
|
||||
# - get
|
||||
# - list
|
||||
# - watch
|
||||
|
||||
|
||||
# replicaCount -- Desired number of pods
|
||||
replicaCount: 1
|
||||
|
@ -438,5 +477,5 @@ vpa:
|
|||
# vpa.containerPolicy -- [ContainerResourcePolicy](https://github.com/kubernetes/autoscaler/blob/vertical-pod-autoscaler/v0.13.0/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go#L159). The containerName is always et to the deployment's container name. This value is required if VPA is enabled.
|
||||
containerPolicy: {}
|
||||
|
||||
# secretKeyRefNameOverride -- Overrides the name of the Secret to use when loading the secretKeyRef for AWS and Azure env variables
|
||||
# secretKeyRefNameOverride -- Overrides the name of the Secret to use when loading the secretKeyRef for AWS, Azure and Civo env variables
|
||||
secretKeyRefNameOverride: ""
|
||||
|
|
|
@ -11,9 +11,20 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-arm64
|
||||
FROM $BASEIMAGE
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG GOARCH
|
||||
ARG LDFLAGS_FLAG
|
||||
ARG TAGS_FLAG
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o cluster-autoscaler-$GOARCH $LDFLAGS_FLAG $TAGS_FLAG
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
ARG GOARCH
|
||||
COPY --from=builder /workspace/cluster-autoscaler-$GOARCH /cluster-autoscaler
|
||||
|
||||
COPY cluster-autoscaler-arm64 /cluster-autoscaler
|
||||
WORKDIR /
|
||||
CMD ["/cluster-autoscaler"]
|
|
@ -629,6 +629,19 @@ When using this class, Cluster Autoscaler performs following actions:
|
|||
Adds a Provisioned=True condition to the ProvReq if capacity is available.
|
||||
Adds a BookingExpired=True condition when the 10-minute reservation period expires.
|
||||
|
||||
Since Cluster Autoscaler version 1.33, it is possible to configure the autoscaler
|
||||
to process only subset of check capacity ProvisioningRequests and ignore the rest.
|
||||
It should be done with caution by specifying `--check-capacity-processor-instance=<name>` flag.
|
||||
Then, ProvReq Parameters map should contain a key "processorInstance" with a value equal to the configured instance name.
|
||||
|
||||
This allows to run two Cluster Autoscalers in the cluster, but the second instance (likely this with configured instance name)
|
||||
**should only** handle check capacity ProvisioningRequests and not overlap node groups with the main instance.
|
||||
It is responsibility of the user to ensure the capacity checks are not overlapping.
|
||||
Best-effort atomic ProvisioningRequests processing is disabled in the instance that has this flag set.
|
||||
|
||||
For backwards compatibility, it is possible to differentiate the ProvReqs by prefixing provisioningClassName with the instance name,
|
||||
but it is **not recommended** and will be removed in CA 1.35.
|
||||
|
||||
* `best-effort-atomic-scale-up.autoscaling.x-k8s.io` (supported from Cluster Autoscaler version 1.30.2 or later).
|
||||
When using this class, Cluster Autoscaler performs following actions:
|
||||
|
||||
|
@ -735,12 +748,12 @@ setting the following flag in your Cluster Autoscaler configuration:
|
|||
3. **Batch Size**: Set the maximum number of CheckCapacity ProvisioningRequests
|
||||
to process in a single iteration by setting the following flag in your Cluster
|
||||
Autoscaler configuration:
|
||||
`--max-batch-size=<batch-size>`. The default value is 10.
|
||||
`--check-capacity-provisioning-request-max-batch-size=<batch-size>`. The default value is 10.
|
||||
|
||||
4. **Batch Timebox**: Set the maximum time in seconds that Cluster Autoscaler will
|
||||
spend processing CheckCapacity ProvisioningRequests in a single iteration by
|
||||
setting the following flag in your Cluster Autoscaler configuration:
|
||||
`--batch-timebox=<timebox>`. The default value is 10s.
|
||||
`--check-capacity-provisioning-request-batch-timebox=<timebox>`. The default value is 10s.
|
||||
|
||||
****************
|
||||
|
||||
|
@ -924,7 +937,7 @@ Expanders can be selected by passing the name to the `--expander` flag, i.e.
|
|||
|
||||
Currently Cluster Autoscaler has 5 expanders:
|
||||
|
||||
* `random` - this is the default expander, and should be used when you don't have a particular
|
||||
* `random` - should be used when you don't have a particular
|
||||
need for the node groups to scale differently.
|
||||
|
||||
* `most-pods` - selects the node group that would be able to schedule the most pods when scaling
|
||||
|
@ -932,7 +945,7 @@ up. This is useful when you are using nodeSelector to make sure certain pods lan
|
|||
Note that this won't cause the autoscaler to select bigger nodes vs. smaller, as it can add multiple
|
||||
smaller nodes at once.
|
||||
|
||||
* `least-waste` - selects the node group that will have the least idle CPU (if tied, unused memory)
|
||||
* `least-waste` - this is the default expander, selects the node group that will have the least idle CPU (if tied, unused memory)
|
||||
after scale-up. This is useful when you have different classes of nodes, for example, high CPU or high memory nodes, and only want to expand those when there are pending pods that need a lot of those resources.
|
||||
|
||||
* `least-nodes` - selects the node group that will use the least number of nodes after scale-up. This is useful when you want to minimize the number of nodes in the cluster and instead opt for fewer larger nodes. Useful when chained with the `most-pods` expander before it to ensure that the node group selected can fit the most pods on the fewest nodes.
|
||||
|
@ -962,72 +975,152 @@ The following startup parameters are supported for cluster autoscaler:
|
|||
|
||||
| Parameter | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| `cluster-name` | Autoscaled cluster name, if available | ""
|
||||
| `address` | The address to expose prometheus metrics | :8085
|
||||
| `kubernetes` | Kubernetes API Server location. Leave blank for default | ""
|
||||
| `kubeconfig` | Path to kubeconfig file with authorization and API Server location information | ""
|
||||
| `cloud-config` | The path to the cloud provider configuration file. Empty string for no configuration file | ""
|
||||
| `namespace` | Namespace in which cluster-autoscaler run | "kube-system"
|
||||
| `enforce-node-group-min-size` | Should CA scale up the node group to the configured min size if needed | false
|
||||
| `scale-down-enabled` | Should CA scale down the cluster | true
|
||||
| `scale-down-delay-after-add` | How long after scale up that scale down evaluation resumes | 10 minutes
|
||||
| `scale-down-delay-after-delete` | How long after node deletion that scale down evaluation resumes, defaults to scan-interval | scan-interval
|
||||
| `scale-down-delay-after-failure` | How long after scale down failure that scale down evaluation resumes | 3 minutes
|
||||
| `scale-down-unneeded-time` | How long a node should be unneeded before it is eligible for scale down | 10 minutes
|
||||
| `scale-down-unready-time` | How long an unready node should be unneeded before it is eligible for scale down | 20 minutes
|
||||
| `scale-down-utilization-threshold` | The ratio of requested/allocatable resources below which a node can be scale down. Ratio is calculated from the maximum of sum of cpu requests and sum of memory requests of all pods running on the node, divided by node's corresponding allocatable resource. For GPU see scale-down-gpu-utilization-threshold (nodes labeled via GPULabel, see cloudprovider/*/README.md). This value is a floating point number that can range between zero and one. | 0.5
|
||||
| `scale-down-gpu-utilization-threshold` | Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down. Utilization calculation only cares about gpu resource for accelerator node, cpu and memory utilization will be ignored. | 0.5
|
||||
| `scale-down-non-empty-candidates-count` | Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain<br>Lower value means better CA responsiveness but possible slower scale down latency<br>Higher value can affect CA performance with big clusters (hundreds of nodes)<br>Set to non positive value to turn this heuristic off - CA will not limit the number of nodes it considers." | 30
|
||||
| `scale-down-candidates-pool-ratio` | A ratio of nodes that are considered as additional non empty candidates for<br>scale down when some candidates from previous iteration are no longer valid<br>Lower value means better CA responsiveness but possible slower scale down latency<br>Higher value can affect CA performance with big clusters (hundreds of nodes)<br>Set to 1.0 to turn this heuristics off - CA will take all nodes as additional candidates. | 0.1
|
||||
| `scale-down-candidates-pool-min-count` | Minimum number of nodes that are considered as additional non empty candidates<br>for scale down when some candidates from previous iteration are no longer valid.<br>When calculating the pool size for additional candidates we take<br>`max(#nodes * scale-down-candidates-pool-ratio, scale-down-candidates-pool-min-count)` | 50
|
||||
| `scan-interval` | How often cluster is reevaluated for scale up or down | 10 seconds
|
||||
| `max-nodes-total` | Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number. | 0
|
||||
| `cores-total` | Minimum and maximum number of cores in cluster, in the format \<min>:\<max>. Cluster autoscaler will not scale the cluster beyond these numbers. | 320000
|
||||
| `memory-total` | Minimum and maximum number of gigabytes of memory in cluster, in the format \<min>:\<max>. Cluster autoscaler will not scale the cluster beyond these numbers. | 6400000
|
||||
| `gpu-total` | Minimum and maximum number of different GPUs in cluster, in the format <gpu_type>:\<min>:\<max>. Cluster autoscaler will not scale the cluster beyond these numbers. Can be passed multiple times. CURRENTLY THIS FLAG ONLY WORKS ON GKE. | ""
|
||||
| `cloud-provider` | Cloud provider type. | gce
|
||||
| `max-empty-bulk-delete` | Maximum number of empty nodes that can be deleted at the same time. | 10
|
||||
| `max-graceful-termination-sec` | Maximum number of seconds CA waits for pod termination when trying to scale down a node. | 600
|
||||
| `max-total-unready-percentage` | Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations | 45
|
||||
| `ok-total-unready-count` | Number of allowed unready nodes, irrespective of max-total-unready-percentage | 3
|
||||
| `max-node-provision-time` | Maximum time CA waits for node to be provisioned | 15 minutes
|
||||
| `nodes` | sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: \<min>:\<max>:<other...> | ""
|
||||
| `node-group-auto-discovery` | One or more definition(s) of node group auto-discovery.<br>A definition is expressed `<name of discoverer>:[<key>[=<value>]]`<br>The `aws`, `gce`, and `azure` cloud providers are currently supported. AWS matches by ASG tags, e.g. `asg:tag=tagKey,anotherTagKey`<br>GCE matches by IG name prefix, and requires you to specify min and max nodes per IG, e.g. `mig:namePrefix=pfx,min=0,max=10`<br> Azure matches by VMSS tags, similar to AWS. And you can optionally specify a default min and max size for VMSSs, e.g. `label:tag=tagKey,anotherTagKey=bar,min=0,max=600`.<br>Can be used multiple times | ""
|
||||
| `emit-per-nodegroup-metrics` | If true, emit per node group metrics. | false
|
||||
| `estimator` | Type of resource estimator to be used in scale up | binpacking
|
||||
| `expander` | Type of node group expander to be used in scale up. | random
|
||||
| `ignore-daemonsets-utilization` | Whether DaemonSet pods will be ignored when calculating resource utilization for scaling down | false
|
||||
| `ignore-mirror-pods-utilization` | Whether [Mirror pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/) will be ignored when calculating resource utilization for scaling down | false
|
||||
| `write-status-configmap` | Should CA write status information to a configmap | true
|
||||
| `status-config-map-name` | The name of the status ConfigMap that CA writes | cluster-autoscaler-status
|
||||
| `max-inactivity` | Maximum time from last recorded autoscaler activity before automatic restart | 10 minutes
|
||||
| `max-failing-time` | Maximum time from last recorded successful autoscaler run before automatic restart | 15 minutes
|
||||
| `balance-similar-node-groups` | Detect similar node groups and balance the number of nodes between them | false
|
||||
| `balancing-ignore-label` | Define a node label that should be ignored when considering node group similarity. One label per flag occurrence. | ""
|
||||
| `balancing-label` | Define a node label to use when comparing node group similarity. If set, all other comparison logic is disabled, and only labels are considered when comparing groups. One label per flag occurrence. | ""
|
||||
| `node-autoprovisioning-enabled` | Should CA autoprovision node groups when needed | false
|
||||
| `max-autoprovisioned-node-group-count` | The maximum number of autoprovisioned groups in the cluster | 15
|
||||
| `unremovable-node-recheck-timeout` | The timeout before we check again a node that couldn't be removed before | 5 minutes
|
||||
| `expendable-pods-priority-cutoff` | Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable | -10
|
||||
| `regional` | Cluster is regional | false
|
||||
| `leader-elect` | Start a leader election client and gain leadership before executing the main loop.<br>Enable this when running replicated components for high availability | true
|
||||
| `leader-elect-lease-duration` | The duration that non-leader candidates will wait after observing a leadership<br>renewal until attempting to acquire leadership of a led but unrenewed leader slot.<br>This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate.<br>This is only applicable if leader election is enabled | 15 seconds
|
||||
| `leader-elect-renew-deadline` | The interval between attempts by the active cluster-autoscaler to renew a leadership slot before it stops leading.<br>This must be less than or equal to the lease duration.<br>This is only applicable if leader election is enabled | 10 seconds
|
||||
| `leader-elect-retry-period` | The duration the clients should wait between attempting acquisition and renewal of a leadership.<br>This is only applicable if leader election is enabled | 2 seconds
|
||||
| `leader-elect-resource-lock` | The type of resource object that is used for locking during leader election.<br>Supported options are `leases` (default), `endpoints`, `endpointsleases`, `configmaps`, and `configmapsleases` | "leases"
|
||||
| `aws-use-static-instance-list` | Should CA fetch instance types in runtime or use a static list. AWS only | false
|
||||
| `skip-nodes-with-system-pods` | If true cluster autoscaler will never delete nodes with pods from kube-system (except for [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) or [mirror pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/)) | true
|
||||
| `skip-nodes-with-local-storage`| If true cluster autoscaler will never delete nodes with pods with local storage, e.g. EmptyDir or HostPath | true
|
||||
| `skip-nodes-with-custom-controller-pods` | If true cluster autoscaler will never delete nodes with pods owned by custom controllers | true
|
||||
| `min-replica-count` | Minimum number or replicas that a replica set or replication controller should have to allow their pods deletion in scale down | 0
|
||||
| `daemonset-eviction-for-empty-nodes` | Whether DaemonSet pods will be gracefully terminated from empty nodes | false
|
||||
| `daemonset-eviction-for-occupied-nodes` | Whether DaemonSet pods will be gracefully terminated from non-empty nodes | true
|
||||
| `feature-gates` | A set of key=value pairs that describe feature gates for alpha/experimental features. | ""
|
||||
| `cordon-node-before-terminating` | Should CA cordon nodes before terminating during downscale process | false
|
||||
| `record-duplicated-events` | Enable the autoscaler to print duplicated events within a 5 minute window. | false
|
||||
| `debugging-snapshot-enabled` | Whether the debugging snapshot of cluster autoscaler feature is enabled. | false
|
||||
| `node-delete-delay-after-taint` | How long to wait before deleting a node after tainting it. | 5 seconds
|
||||
| `enable-provisioning-requests` | Whether the clusterautoscaler will be handling the ProvisioningRequest CRs. | false
|
||||
| `add-dir-header` | If true, adds the file directory to the header of the log messages | |
|
||||
| `address` | The address to expose prometheus metrics. | ":8085" |
|
||||
| `alsologtostderr` | log to standard error as well as files (no effect when -logtostderr=true) | |
|
||||
| `async-node-groups` | Whether clusterautoscaler creates and deletes node groups asynchronously. Experimental: requires cloud provider supporting async node group operations, enable at your own risk. | |
|
||||
| `aws-use-static-instance-list` | Should CA fetch instance types in runtime or use a static list. AWS only | |
|
||||
| `balance-similar-node-groups` | Detect similar node groups and balance the number of nodes between them | |
|
||||
| `balancing-ignore-label` | Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar | [] |
|
||||
| `balancing-label` | Specifies a label to use for comparing if two node groups are similar, rather than the built in heuristics. Setting this flag disables all other comparison logic, and cannot be combined with --balancing-ignore-label. | [] |
|
||||
| `bulk-mig-instances-listing-enabled` | Fetch GCE mig instances in bulk instead of per mig | |
|
||||
| `bypassed-scheduler-names` | Names of schedulers to bypass. If set to non-empty value, CA will not wait for pods to reach a certain age before triggering a scale-up. | |
|
||||
| `check-capacity-batch-processing` | Whether to enable batch processing for check capacity requests. | |
|
||||
| `check-capacity-processor-instance` | Name of the processor instance. Only ProvisioningRequests that define this name in their parameters with the key "processorInstance" will be processed by this CA instance. It only refers to check capacity ProvisioningRequests, but if not empty, best-effort atomic ProvisioningRequests processing is disabled in this instance. Not recommended: Until CA 1.35, ProvisioningRequests with this name as prefix in their class will be also processed. | |
|
||||
| `check-capacity-provisioning-request-batch-timebox` | Maximum time to process a batch of provisioning requests. | 10s |
|
||||
| `check-capacity-provisioning-request-max-batch-size` | Maximum number of provisioning requests to process in a single batch. | 10 |
|
||||
| `cloud-config` | The path to the cloud provider configuration file. Empty string for no configuration file. | |
|
||||
| `cloud-provider` | Cloud provider type. Available values: [aws,azure,gce,alicloud,cherryservers,cloudstack,baiducloud,magnum,digitalocean,exoscale,externalgrpc,huaweicloud,hetzner,oci,ovhcloud,clusterapi,ionoscloud,kamatera,kwok,linode,bizflycloud,brightbox,equinixmetal,vultr,tencentcloud,civo,scaleway,rancher,volcengine] | "gce" |
|
||||
| `cloud-provider-gce-l7lb-src-cidrs` | CIDRs opened in GCE firewall for L7 LB traffic proxy & health checks | 130.211.0.0/22,35.191.0.0/16 |
|
||||
| `cloud-provider-gce-lb-src-cidrs` | CIDRs opened in GCE firewall for L4 LB traffic proxy & health checks | 130.211.0.0/22,209.85.152.0/22,209.85.204.0/22,35.191.0.0/16 |
|
||||
| `cluster-name` | Autoscaled cluster name, if available | |
|
||||
| `cluster-snapshot-parallelism` | Maximum parallelism of cluster snapshot creation. | 16 |
|
||||
| `clusterapi-cloud-config-authoritative` | Treat the cloud-config flag authoritatively (do not fallback to using kubeconfig flag). ClusterAPI only | |
|
||||
| `cordon-node-before-terminating` | Should CA cordon nodes before terminating during downscale process | |
|
||||
| `cores-total` | Minimum and maximum number of cores in cluster, in the format <min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers. | "0:320000" |
|
||||
| `daemonset-eviction-for-empty-nodes` | DaemonSet pods will be gracefully terminated from empty nodes | |
|
||||
| `daemonset-eviction-for-occupied-nodes` | DaemonSet pods will be gracefully terminated from non-empty nodes | true |
|
||||
| `debugging-snapshot-enabled` | Whether the debugging snapshot of cluster autoscaler feature is enabled | |
|
||||
| `drain-priority-config` | List of ',' separated pairs (priority:terminationGracePeriodSeconds) of integers separated by ':' enables priority evictor. Priority evictor groups pods into priority groups based on pod priority and evict pods in the ascending order of group priorities--max-graceful-termination-sec flag should not be set when this flag is set. Not setting this flag will use unordered evictor by default.Priority evictor reuses the concepts of drain logic in kubelet(https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2712-pod-priority-based-graceful-node-shutdown#migration-from-the-node-graceful-shutdown-feature).Eg. flag usage: '10000:20,1000:100,0:60' | |
|
||||
| `dynamic-node-delete-delay-after-taint-enabled` | Enables dynamic adjustment of NodeDeleteDelayAfterTaint based of the latency between CA and api-server | |
|
||||
| `emit-per-nodegroup-metrics` | If true, emit per node group metrics. | |
|
||||
| `enable-dynamic-resource-allocation` | Whether logic for handling DRA (Dynamic Resource Allocation) objects is enabled. | |
|
||||
| `enable-proactive-scaleup` | Whether to enable/disable proactive scale-ups, defaults to false | |
|
||||
| `enable-provisioning-requests` | Whether the clusterautoscaler will be handling the ProvisioningRequest CRs. | |
|
||||
| `enforce-node-group-min-size` | Should CA scale up the node group to the configured min size if needed. | |
|
||||
| `estimator` | Type of resource estimator to be used in scale up. Available values: [binpacking] | "binpacking" |
|
||||
| `expander` | Type of node group expander to be used in scale up. Available values: [random,most-pods,least-waste,price,priority,grpc]. Specifying multiple values separated by commas will call the expanders in succession until there is only one option remaining. Ties still existing after this process are broken randomly. | "least-waste" |
|
||||
| `expendable-pods-priority-cutoff` | Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable. | -10 |
|
||||
| `feature-gates` | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: | |
|
||||
| `force-delete-unregistered-nodes` | Whether to enable force deletion of long unregistered nodes, regardless of the min size of the node group the belong to. | |
|
||||
| `force-ds` | Blocks scale-up of node groups too small for all suitable Daemon Sets pods. | |
|
||||
| `frequent-loops-enabled` | Whether clusterautoscaler triggers new iterations more frequently when it's needed | |
|
||||
| `gce-concurrent-refreshes` | Maximum number of concurrent refreshes per cloud object type. | 1 |
|
||||
| `gce-expander-ephemeral-storage-support` | Whether scale-up takes ephemeral storage resources into account for GCE cloud provider (Deprecated, to be removed in 1.30+) | true |
|
||||
| `gce-mig-instances-min-refresh-wait-time` | The minimum time which needs to pass before GCE MIG instances from a given MIG can be refreshed. | 5s |
|
||||
| `gpu-total` | Minimum and maximum number of different GPUs in cluster, in the format <gpu_type>:<min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers. Can be passed multiple times. CURRENTLY THIS FLAG ONLY WORKS ON GKE. | [] |
|
||||
| `grpc-expander-cert` | Path to cert used by gRPC server over TLS | |
|
||||
| `grpc-expander-url` | URL to reach gRPC expander server. | |
|
||||
| `ignore-daemonsets-utilization` | Should CA ignore DaemonSet pods when calculating resource utilization for scaling down | |
|
||||
| `ignore-mirror-pods-utilization` | Should CA ignore Mirror pods when calculating resource utilization for scaling down | |
|
||||
| `ignore-taint` | Specifies a taint to ignore in node templates when considering to scale a node group (Deprecated, use startup-taints instead) | [] |
|
||||
| `initial-node-group-backoff-duration` | initialNodeGroupBackoffDuration is the duration of first backoff after a new node failed to start. | 5m0s |
|
||||
| `kube-api-content-type` | Content type of requests sent to apiserver. | "application/vnd.kubernetes.protobuf" |
|
||||
| `kube-client-burst` | Burst value for kubernetes client. | 10 |
|
||||
| `kube-client-qps` | QPS value for kubernetes client. | 5 |
|
||||
| `kubeconfig` | Path to kubeconfig file with authorization and master location information. | |
|
||||
| `kubernetes` | Kubernetes master location. Leave blank for default | |
|
||||
| `leader-elect` | Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability. | true |
|
||||
| `leader-elect-lease-duration` | The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. | 15s |
|
||||
| `leader-elect-renew-deadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. | 10s |
|
||||
| `leader-elect-resource-lock` | The type of resource object that is used for locking during leader election. Supported options are 'leases'. | "leases" |
|
||||
| `leader-elect-resource-name` | The name of resource object that is used for locking during leader election. | "cluster-autoscaler" |
|
||||
| `leader-elect-resource-namespace` | The namespace of resource object that is used for locking during leader election. | |
|
||||
| `leader-elect-retry-period` | The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. | 2s |
|
||||
| `log-backtrace-at` | when logging hits line file:N, emit a stack trace | :0 |
|
||||
| `log-dir` | If non-empty, write log files in this directory (no effect when -logtostderr=true) | |
|
||||
| `log-file` | If non-empty, use this log file (no effect when -logtostderr=true) | |
|
||||
| `log-file-max-size` | Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. | 1800 |
|
||||
| `log-flush-frequency` | Maximum number of seconds between log flushes | 5s |
|
||||
| `log-json-info-buffer-size` | [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this. | |
|
||||
| `log-json-split-stream` | [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this. | |
|
||||
| `log-text-info-buffer-size` | [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this. | |
|
||||
| `log-text-split-stream` | [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this. | |
|
||||
| `logging-format` | Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". | "text" |
|
||||
| `logtostderr` | log to standard error instead of files | true |
|
||||
| `max-allocatable-difference-ratio` | Maximum difference in allocatable resources between two similar node groups to be considered for balancing. Value is a ratio of the smaller node group's allocatable resource. | 0.05 |
|
||||
| `max-autoprovisioned-node-group-count` | The maximum number of autoprovisioned groups in the cluster.This flag is deprecated and will be removed in future releases. | 15 |
|
||||
| `max-binpacking-time` | Maximum time spend on binpacking for a single scale-up. If binpacking is limited by this, scale-up will continue with the already calculated scale-up options. | 5m0s |
|
||||
| `max-bulk-soft-taint-count` | Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting. | 10 |
|
||||
| `max-bulk-soft-taint-time` | Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time. | 3s |
|
||||
| `max-drain-parallelism` | Maximum number of nodes needing drain, that can be drained and deleted in parallel. | 1 |
|
||||
| `max-empty-bulk-delete` | Maximum number of empty nodes that can be deleted at the same time. DEPRECATED: Use --max-scale-down-parallelism instead. | 10 |
|
||||
| `max-failing-time` | Maximum time from last recorded successful autoscaler run before automatic restart | 15m0s |
|
||||
| `max-free-difference-ratio` | Maximum difference in free resources between two similar node groups to be considered for balancing. Value is a ratio of the smaller node group's free resource. | 0.05 |
|
||||
| `max-graceful-termination-sec` | Maximum number of seconds CA waits for pod termination when trying to scale down a node. This flag is mutually exclusion with drain-priority-config flag which allows more configuration options. | 600 |
|
||||
| `max-inactivity` | Maximum time from last recorded autoscaler activity before automatic restart | 10m0s |
|
||||
| `max-node-group-backoff-duration` | maxNodeGroupBackoffDuration is the maximum backoff duration for a NodeGroup after new nodes failed to start. | 30m0s |
|
||||
| `max-node-provision-time` | The default maximum time CA waits for node to be provisioned - the value can be overridden per node group | 15m0s |
|
||||
| `max-nodegroup-binpacking-duration` | Maximum time that will be spent in binpacking simulation for each NodeGroup. | 10s |
|
||||
| `max-nodes-per-scaleup` | Max nodes added in a single scale-up. This is intended strictly for optimizing CA algorithm latency and not a tool to rate-limit scale-up throughput. | 1000 |
|
||||
| `max-nodes-total` | Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number. | |
|
||||
| `max-pod-eviction-time` | Maximum time CA tries to evict a pod before giving up | 2m0s |
|
||||
| `max-scale-down-parallelism` | Maximum number of nodes (both empty and needing drain) that can be deleted in parallel. | 10 |
|
||||
| `max-total-unready-percentage` | Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations | 45 |
|
||||
| `memory-difference-ratio` | Maximum difference in memory capacity between two similar node groups to be considered for balancing. Value is a ratio of the smaller node group's memory capacity. | 0.015 |
|
||||
| `memory-total` | Minimum and maximum number of gigabytes of memory in cluster, in the format <min>:<max>. Cluster autoscaler will not scale the cluster beyond these numbers. | "0:6400000" |
|
||||
| `min-replica-count` | Minimum number or replicas that a replica set or replication controller should have to allow their pods deletion in scale down | |
|
||||
| `namespace` | Namespace in which cluster-autoscaler run. | "kube-system" |
|
||||
| `new-pod-scale-up-delay` | Pods less than this old will not be considered for scale-up. Can be increased for individual pods through annotation 'cluster-autoscaler.kubernetes.io/pod-scale-up-delay'. | 0s |
|
||||
| `node-autoprovisioning-enabled` | Should CA autoprovision node groups when needed.This flag is deprecated and will be removed in future releases. | |
|
||||
| `node-delete-delay-after-taint` | How long to wait before deleting a node after tainting it | 5s |
|
||||
| `node-deletion-batcher-interval` | How long CA ScaleDown gather nodes to delete them in batch. | 0s |
|
||||
| `node-deletion-delay-timeout` | Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node. | 2m0s |
|
||||
| `node-group-auto-discovery` | of discoverer>:[<key>[=<value>]] One or more definition(s) of node group auto-discovery. A definition is expressed <name of discoverer>:[<key>[=<value>]]. The `aws`, `gce`, and `azure` cloud providers are currently supported. AWS matches by ASG tags, e.g. `asg:tag=tagKey,anotherTagKey`. GCE matches by IG name prefix, and requires you to specify min and max nodes per IG, e.g. `mig:namePrefix=pfx,min=0,max=10` Azure matches by VMSS tags, similar to AWS. And you can optionally specify a default min and max size, e.g. `label:tag=tagKey,anotherTagKey=bar,min=0,max=600`. Can be used multiple times. | [] |
|
||||
| `node-group-backoff-reset-timeout` | nodeGroupBackoffResetTimeout is the time after last failed scale-up when the backoff duration is reset. | 3h0m0s |
|
||||
| `node-info-cache-expire-time` | Node Info cache expire time for each item. Default value is 10 years. | 87600h0m0s |
|
||||
| `nodes` | sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: <min>:<max>:<other...> | [] |
|
||||
| `ok-total-unready-count` | Number of allowed unready nodes, irrespective of max-total-unready-percentage | 3 |
|
||||
| `one-output` | If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true) | |
|
||||
| `parallel-scale-up` | Whether to allow parallel node groups scale up. Experimental: may not work on some cloud providers, enable at your own risk. | |
|
||||
| `pod-injection-limit` | Limits total number of pods while injecting fake pods. If unschedulable pods already exceeds the limit, pod injection is disabled but pods are not truncated. | 5000 |
|
||||
| `profiling` | Is debug/pprof endpoint enabled | |
|
||||
| `provisioning-request-initial-backoff-time` | Initial backoff time for ProvisioningRequest retry after failed ScaleUp. | 1m0s |
|
||||
| `provisioning-request-max-backoff-cache-size` | Max size for ProvisioningRequest cache size used for retry backoff mechanism. | 1000 |
|
||||
| `provisioning-request-max-backoff-time` | Max backoff time for ProvisioningRequest retry after failed ScaleUp. | 10m0s |
|
||||
| `record-duplicated-events` | enable duplication of similar events within a 5 minute window. | |
|
||||
| `regional` | Cluster is regional. | |
|
||||
| `scale-down-candidates-pool-min-count` | Minimum number of nodes that are considered as additional non empty candidatesfor scale down when some candidates from previous iteration are no longer valid.When calculating the pool size for additional candidates we takemax(#nodes * scale-down-candidates-pool-ratio, scale-down-candidates-pool-min-count). | 50 |
|
||||
| `scale-down-candidates-pool-ratio` | A ratio of nodes that are considered as additional non empty candidates forscale down when some candidates from previous iteration are no longer valid.Lower value means better CA responsiveness but possible slower scale down latency.Higher value can affect CA performance with big clusters (hundreds of nodes).Set to 1.0 to turn this heuristics off - CA will take all nodes as additional candidates. | 0.1 |
|
||||
| `scale-down-delay-after-add` | How long after scale up that scale down evaluation resumes | 10m0s |
|
||||
| `scale-down-delay-after-delete` | How long after node deletion that scale down evaluation resumes, defaults to scanInterval | 0s |
|
||||
| `scale-down-delay-after-failure` | How long after scale down failure that scale down evaluation resumes | 3m0s |
|
||||
| `scale-down-delay-type-local` | Should --scale-down-delay-after-* flags be applied locally per nodegroup or globally across all nodegroups | |
|
||||
| `scale-down-enabled` | Should CA scale down the cluster | true |
|
||||
| `scale-down-gpu-utilization-threshold` | Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down.Utilization calculation only cares about gpu resource for accelerator node. cpu and memory utilization will be ignored. | 0.5 |
|
||||
| `scale-down-non-empty-candidates-count` | Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain.Lower value means better CA responsiveness but possible slower scale down latency.Higher value can affect CA performance with big clusters (hundreds of nodes).Set to non positive value to turn this heuristic off - CA will not limit the number of nodes it considers. | 30 |
|
||||
| `scale-down-simulation-timeout` | How long should we run scale down simulation. | 30s |
|
||||
| `scale-down-unneeded-time` | How long a node should be unneeded before it is eligible for scale down | 10m0s |
|
||||
| `scale-down-unready-enabled` | Should CA scale down unready nodes of the cluster | true |
|
||||
| `scale-down-unready-time` | How long an unready node should be unneeded before it is eligible for scale down | 20m0s |
|
||||
| `scale-down-utilization-threshold` | The maximum value between the sum of cpu requests and sum of memory requests of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down | 0.5 |
|
||||
| `scale-up-from-zero` | Should CA scale up when there are 0 ready nodes. | true |
|
||||
| `scan-interval` | How often cluster is reevaluated for scale up or down | 10s |
|
||||
| `scheduler-config-file` | scheduler-config allows changing configuration of in-tree scheduler plugins acting on PreFilter and Filter extension points | |
|
||||
| `skip-headers` | If true, avoid header prefixes in the log messages | |
|
||||
| `skip-log-headers` | If true, avoid headers when opening log files (no effect when -logtostderr=true) | |
|
||||
| `skip-nodes-with-custom-controller-pods` | If true cluster autoscaler will never delete nodes with pods owned by custom controllers | true |
|
||||
| `skip-nodes-with-local-storage` | If true cluster autoscaler will never delete nodes with pods with local storage, e.g. EmptyDir or HostPath | true |
|
||||
| `skip-nodes-with-system-pods` | If true cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods) | true |
|
||||
| `startup-taint` | Specifies a taint to ignore in node templates when considering to scale a node group (Equivalent to ignore-taint) | [] |
|
||||
| `status-config-map-name` | Status configmap name | "cluster-autoscaler-status" |
|
||||
| `status-taint` | Specifies a taint to ignore in node templates when considering to scale a node group but nodes will not be treated as unready | [] |
|
||||
| `stderrthreshold` | logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true) | 2 |
|
||||
| `unremovable-node-recheck-timeout` | The timeout before we check again a node that couldn't be removed before | 5m0s |
|
||||
| `user-agent` | User agent used for HTTP calls. | "cluster-autoscaler" |
|
||||
| `v` | number for the log level verbosity | |
|
||||
| `vmodule` | comma-separated list of pattern=N settings for file-filtered logging (only works for text log format) | |
|
||||
| `write-status-configmap` | Should CA write status information to a configmap | true |
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
|
|
|
@ -7,8 +7,9 @@ LDFLAGS?=-s
|
|||
ENVVAR=CGO_ENABLED=0
|
||||
GOOS?=linux
|
||||
GOARCH?=$(shell go env GOARCH)
|
||||
REGISTRY?=staging-k8s.gcr.io
|
||||
REGISTRY?=gcr.io/k8s-staging-autoscaling
|
||||
DOCKER_NETWORK?=default
|
||||
SUPPORTED_BUILD_TAGS=$(shell ls cloudprovider/builder/ | grep -e '^builder_.*\.go' | sed 's/builder_\(.*\)\.go/\1/')
|
||||
ifdef BUILD_TAGS
|
||||
TAGS_FLAG=--tags ${BUILD_TAGS}
|
||||
PROVIDER=-${BUILD_TAGS}
|
||||
|
@ -19,7 +20,7 @@ else
|
|||
FOR_PROVIDER=
|
||||
endif
|
||||
ifdef LDFLAGS
|
||||
LDFLAGS_FLAG=--ldflags "${LDFLAGS}"
|
||||
LDFLAGS_FLAG=--ldflags="${LDFLAGS}"
|
||||
else
|
||||
LDFLAGS_FLAG=
|
||||
endif
|
||||
|
@ -42,6 +43,16 @@ build:
|
|||
build-arch-%: clean-arch-%
|
||||
$(ENVVAR) GOOS=$(GOOS) GOARCH=$* go build -o cluster-autoscaler-$* ${LDFLAGS_FLAG} ${TAGS_FLAG}
|
||||
|
||||
test-build-tags:
|
||||
@if [ -z "$(SUPPORTED_BUILD_TAGS)" ]; then \
|
||||
echo "No supported build tags found"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@for tag in $(SUPPORTED_BUILD_TAGS); do \
|
||||
echo "Testing build with tag $$tag"; \
|
||||
BUILD_TAGS=$$tag $(MAKE) build || exit 1; \
|
||||
done
|
||||
|
||||
test-unit: clean build
|
||||
go test --test.short -race ./... ${TAGS_FLAG}
|
||||
|
||||
|
@ -53,15 +64,10 @@ dev-release-arch-%: build-arch-% make-image-arch-% push-image-arch-%
|
|||
make-image: make-image-arch-$(GOARCH)
|
||||
|
||||
make-image-arch-%:
|
||||
ifdef BASEIMAGE
|
||||
docker build --pull --build-arg BASEIMAGE=${BASEIMAGE} \
|
||||
GOOS=$(GOOS) docker buildx build --pull --platform linux/$* \
|
||||
--build-arg "GOARCH=$*" \
|
||||
-t ${IMAGE}-$*:${TAG} \
|
||||
-f Dockerfile.$* .
|
||||
else
|
||||
docker build --pull \
|
||||
-t ${IMAGE}-$*:${TAG} \
|
||||
-f Dockerfile.$* .
|
||||
endif
|
||||
-f Dockerfile .
|
||||
@echo "Image ${TAG}${FOR_PROVIDER}-$* completed"
|
||||
|
||||
push-image: push-image-arch-$(GOARCH)
|
||||
|
@ -69,12 +75,15 @@ push-image: push-image-arch-$(GOARCH)
|
|||
push-image-arch-%:
|
||||
./push_image.sh ${IMAGE}-$*:${TAG}
|
||||
|
||||
push-release-image-arch-%:
|
||||
docker push ${IMAGE}-$*:${TAG}
|
||||
|
||||
push-manifest:
|
||||
docker manifest create ${IMAGE}:${TAG} \
|
||||
$(addprefix $(REGISTRY)/cluster-autoscaler$(PROVIDER)-, $(addsuffix :$(TAG), $(ALL_ARCH)))
|
||||
docker manifest push --purge ${IMAGE}:${TAG}
|
||||
|
||||
execute-release: $(addprefix make-image-arch-,$(ALL_ARCH)) $(addprefix push-image-arch-,$(ALL_ARCH)) push-manifest
|
||||
execute-release: $(addprefix make-image-arch-,$(ALL_ARCH)) $(addprefix push-release-image-arch-,$(ALL_ARCH)) push-manifest
|
||||
@echo "Release ${TAG}${FOR_PROVIDER} completed"
|
||||
|
||||
clean: clean-arch-$(GOARCH)
|
||||
|
@ -98,7 +107,20 @@ build-in-docker-arch-%: clean-arch-% docker-builder
|
|||
docker run ${RM_FLAG} -v `pwd`:/gopath/src/k8s.io/autoscaler/cluster-autoscaler/:Z autoscaling-builder:latest \
|
||||
bash -c 'cd /gopath/src/k8s.io/autoscaler/cluster-autoscaler && BUILD_TAGS=${BUILD_TAGS} LDFLAGS="${LDFLAGS}" make build-arch-$*'
|
||||
|
||||
release: $(addprefix build-in-docker-arch-,$(ALL_ARCH)) execute-release
|
||||
release-extract-version = $(shell cat version/version.go | grep "Version =" | cut -d '"' -f 2)
|
||||
|
||||
release-validate:
|
||||
@if [ -z $(shell git tag --points-at HEAD | grep -e ^cluster-autoscaler-1.[1-9][0-9]*.[0-9][0-9]*$) ]; then \
|
||||
echo "Can't release from this commit, there is no compatible git tag"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ -z $(shell git tag --points-at HEAD | grep -e $(call release-extract-version)) ]; then \
|
||||
echo "Can't release from this commit, git tag does not match version/version.go"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
release: TAG=v$(call release-extract-version)
|
||||
release: release-validate $(addprefix build-in-docker-arch-,$(ALL_ARCH)) execute-release
|
||||
@echo "Full in-docker release ${TAG}${FOR_PROVIDER} completed"
|
||||
|
||||
container: container-arch-$(GOARCH)
|
||||
|
|
|
@ -10,5 +10,6 @@ reviewers:
|
|||
- feiskyer
|
||||
- vadasambar
|
||||
- x13n
|
||||
- elmiko
|
||||
labels:
|
||||
- area/cluster-autoscaler
|
||||
|
|
|
@ -49,6 +49,8 @@ Starting from Kubernetes 1.12, versioning scheme was changed to match Kubernetes
|
|||
|
||||
| Kubernetes Version | CA Version | Chart Version |
|
||||
|--------------------|--------------------------|---------------|
|
||||
| 1.33.x | 1.33.x |9.47.0+|
|
||||
| 1.32.x | 1.32.x |9.45.0+|
|
||||
| 1.31.x | 1.31.x |9.38.0+|
|
||||
| 1.30.x | 1.30.x |9.37.0+|
|
||||
| 1.29.X | 1.29.X |9.35.0+|
|
||||
|
@ -91,12 +93,12 @@ target ETA and the actual releases.
|
|||
|
||||
| Date | Maintainer Preparing Release | Backup Maintainer | Type |
|
||||
|------------|------------------------------|-------------------|-------|
|
||||
| 2024-07-18 | x13n | MaciekPytel | patch |
|
||||
| 2024-08-21 | MaciekPytel | gjtempleton | 1.31 |
|
||||
| 2024-09-18 | gjtempleton | towca | patch |
|
||||
| 2024-11-20 | towca | BigDarkClown | patch |
|
||||
| 2024-12-18 | BigDarkClown | x13n | 1.32 |
|
||||
| 2025-01-22 | x13n | MaciekPytel | patch |
|
||||
| 2025-06-11 | jackfrancis | gjtempleton | 1.33 |
|
||||
| 2025-07-16 | gjtempleton | towca | patch |
|
||||
| 2025-08-20 | towca | BigDarkClown | patch |
|
||||
| 2025-09-17 | BigDarkClown | x13n | 1.34 |
|
||||
| 2025-10-22 | x13n | jackfrancis | patch |
|
||||
| 2025-11-19 | jackfrancis | gjtempleton | patch |
|
||||
|
||||
Additional patch releases may happen outside of the schedule in case of critical
|
||||
bugs or vulnerabilities.
|
||||
|
|
|
@ -1,61 +1,58 @@
|
|||
module k8s.io/autoscaler/cluster-autoscaler/apis
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.2
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
github.com/onsi/gomega v1.35.1
|
||||
k8s.io/apimachinery v0.33.0-alpha.0
|
||||
k8s.io/client-go v0.33.0-alpha.0
|
||||
k8s.io/code-generator v0.33.0-alpha.0
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2
|
||||
k8s.io/apimachinery v0.34.0-alpha.1
|
||||
k8s.io/client-go v0.34.0-alpha.1
|
||||
k8s.io/code-generator v0.34.0-alpha.1
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.33.0-alpha.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/api v0.34.0-alpha.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
|
@ -5,8 +5,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
|
@ -21,16 +21,12 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
|||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
|
@ -53,8 +49,9 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
|
|||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
|
@ -63,26 +60,29 @@ github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
|||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -94,28 +94,28 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
|
@ -126,8 +126,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -138,25 +138,28 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.33.0-alpha.0 h1:bZn/3zFtD8eIj2kuvTnI9NOHVH0FlEMvqqUoTAqBPl0=
|
||||
k8s.io/api v0.33.0-alpha.0/go.mod h1:hk95yeuwwXA2VCRMnCPNh/5vRMMxjSINs3nQPhxrp3Y=
|
||||
k8s.io/apimachinery v0.33.0-alpha.0 h1:UEr11OY9sG+9Zizy6qPpyhLwOMhhs4c6+RLcUOjn5G4=
|
||||
k8s.io/apimachinery v0.33.0-alpha.0/go.mod h1:HqhdaJUgQqky29T1V0o2yFkt/pZqLFIDyn9Zi/8rxoY=
|
||||
k8s.io/client-go v0.33.0-alpha.0 h1:j/1m4ocOzykgF7Mx/xSX5rk5EiOghaCMtbIfVnIl2Gw=
|
||||
k8s.io/client-go v0.33.0-alpha.0/go.mod h1:tKjHOpArmmeuq+J+ahsZ1LbZi4YFK5uwqn9HNq2++G4=
|
||||
k8s.io/code-generator v0.33.0-alpha.0 h1:lvV/XBpfQFCXzzhY4M/YFIPo76+wkGCC449RMRcx1nY=
|
||||
k8s.io/code-generator v0.33.0-alpha.0/go.mod h1:E6buYsOCImG+b6OcYyJMOjmkO8dbB3iY+JqmNdUdycE=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/api v0.34.0-alpha.1 h1:Hye5ehH+riYQU/M/y/F8/L7hE6ZO5QZrH53zxcySa2Q=
|
||||
k8s.io/api v0.34.0-alpha.1/go.mod h1:Dl+4wVA5vZVlN4ckJ34aAQXRDciXazH930XZh92Lubk=
|
||||
k8s.io/apimachinery v0.34.0-alpha.1 h1:pA/Biuywm6Us4cZb5FLIHi8idQZXq3/8Bw3h2dqtop4=
|
||||
k8s.io/apimachinery v0.34.0-alpha.1/go.mod h1:EZ7eIfFAwky7ktmG4Pu9XWxBxFG++4dxPDOM0GL3abw=
|
||||
k8s.io/client-go v0.34.0-alpha.1 h1:u9jrtaizUQ1sdchbf5v72ZKC8rj1XI9RAMsDlN4Gcy4=
|
||||
k8s.io/client-go v0.34.0-alpha.1/go.mod h1:MyOhbMoeBUilHgYvjBP7U5BIBkbCUBCdZPzWZuj9i8g=
|
||||
k8s.io/code-generator v0.34.0-alpha.1 h1:bT/Udv1T+9pBL1vkiHArEDhcNFS0bfxTVQQ95tndJ8I=
|
||||
k8s.io/code-generator v0.34.0-alpha.1/go.mod h1:npBqukbEr2Wo+G+rYoKBrLPW2WvBhx2V7u7Ix8gE0mE=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -19,18 +19,18 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
// ProvisioningRequestApplyConfiguration represents a declarative configuration of the ProvisioningRequest type for use
|
||||
// with apply.
|
||||
type ProvisioningRequestApplyConfiguration struct {
|
||||
v1.TypeMetaApplyConfiguration `json:",inline"`
|
||||
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
|
||||
Spec *ProvisioningRequestSpecApplyConfiguration `json:"spec,omitempty"`
|
||||
Status *ProvisioningRequestStatusApplyConfiguration `json:"status,omitempty"`
|
||||
metav1.TypeMetaApplyConfiguration `json:",inline"`
|
||||
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
|
||||
Spec *ProvisioningRequestSpecApplyConfiguration `json:"spec,omitempty"`
|
||||
Status *ProvisioningRequestStatusApplyConfiguration `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ProvisioningRequest constructs a declarative configuration of the ProvisioningRequest type for use with
|
||||
|
@ -48,7 +48,7 @@ func ProvisioningRequest(name, namespace string) *ProvisioningRequestApplyConfig
|
|||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Kind field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithKind(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.Kind = &value
|
||||
b.TypeMetaApplyConfiguration.Kind = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithKind(value string) *Provisio
|
|||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the APIVersion field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithAPIVersion(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.APIVersion = &value
|
||||
b.TypeMetaApplyConfiguration.APIVersion = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithAPIVersion(value string) *Pr
|
|||
// If called multiple times, the Name field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithName(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Name = &value
|
||||
b.ObjectMetaApplyConfiguration.Name = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithName(value string) *Provisio
|
|||
// If called multiple times, the GenerateName field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithGenerateName(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.GenerateName = &value
|
||||
b.ObjectMetaApplyConfiguration.GenerateName = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithGenerateName(value string) *
|
|||
// If called multiple times, the Namespace field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithNamespace(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Namespace = &value
|
||||
b.ObjectMetaApplyConfiguration.Namespace = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithNamespace(value string) *Pro
|
|||
// If called multiple times, the UID field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithUID(value types.UID) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.UID = &value
|
||||
b.ObjectMetaApplyConfiguration.UID = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithUID(value types.UID) *Provis
|
|||
// If called multiple times, the ResourceVersion field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithResourceVersion(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.ResourceVersion = &value
|
||||
b.ObjectMetaApplyConfiguration.ResourceVersion = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -110,25 +110,25 @@ func (b *ProvisioningRequestApplyConfiguration) WithResourceVersion(value string
|
|||
// If called multiple times, the Generation field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithGeneration(value int64) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Generation = &value
|
||||
b.ObjectMetaApplyConfiguration.Generation = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ProvisioningRequestApplyConfiguration {
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.CreationTimestamp = &value
|
||||
b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ProvisioningRequestApplyConfiguration {
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.DeletionTimestamp = &value
|
||||
b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithDeletionTimestamp(value meta
|
|||
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.DeletionGracePeriodSeconds = &value
|
||||
b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -147,11 +147,11 @@ func (b *ProvisioningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(v
|
|||
// overwriting an existing map entries in Labels field with the same key.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithLabels(entries map[string]string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
if b.Labels == nil && len(entries) > 0 {
|
||||
b.Labels = make(map[string]string, len(entries))
|
||||
if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
|
||||
b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.Labels[k] = v
|
||||
b.ObjectMetaApplyConfiguration.Labels[k] = v
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -162,11 +162,11 @@ func (b *ProvisioningRequestApplyConfiguration) WithLabels(entries map[string]st
|
|||
// overwriting an existing map entries in Annotations field with the same key.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
if b.Annotations == nil && len(entries) > 0 {
|
||||
b.Annotations = make(map[string]string, len(entries))
|
||||
if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
|
||||
b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.Annotations[k] = v
|
||||
b.ObjectMetaApplyConfiguration.Annotations[k] = v
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -174,13 +174,13 @@ func (b *ProvisioningRequestApplyConfiguration) WithAnnotations(entries map[stri
|
|||
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ProvisioningRequestApplyConfiguration {
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
for i := range values {
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithOwnerReferences")
|
||||
}
|
||||
b.OwnerReferences = append(b.OwnerReferences, *values[i])
|
||||
b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -191,14 +191,14 @@ func (b *ProvisioningRequestApplyConfiguration) WithOwnerReferences(values ...*v
|
|||
func (b *ProvisioningRequestApplyConfiguration) WithFinalizers(values ...string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
for i := range values {
|
||||
b.Finalizers = append(b.Finalizers, values[i])
|
||||
b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *ProvisioningRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
|
||||
if b.ObjectMetaApplyConfiguration == nil {
|
||||
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
|
||||
b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,5 +221,5 @@ func (b *ProvisioningRequestApplyConfiguration) WithStatus(value *ProvisioningRe
|
|||
// GetName retrieves the value of the Name field in the declarative configuration.
|
||||
func (b *ProvisioningRequestApplyConfiguration) GetName() *string {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
return b.Name
|
||||
return b.ObjectMetaApplyConfiguration.Name
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@ package v1
|
|||
|
||||
import (
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
// ProvisioningRequestStatusApplyConfiguration represents a declarative configuration of the ProvisioningRequestStatus type for use
|
||||
// with apply.
|
||||
type ProvisioningRequestStatusApplyConfiguration struct {
|
||||
Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
|
||||
Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
|
||||
ProvisioningClassDetails map[string]autoscalingxk8siov1.Detail `json:"provisioningClassDetails,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ func ProvisioningRequestStatus() *ProvisioningRequestStatusApplyConfiguration {
|
|||
// WithConditions adds the given value to the Conditions field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Conditions field.
|
||||
func (b *ProvisioningRequestStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ProvisioningRequestStatusApplyConfiguration {
|
||||
func (b *ProvisioningRequestStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ProvisioningRequestStatusApplyConfiguration {
|
||||
for i := range values {
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithConditions")
|
||||
|
|
|
@ -48,7 +48,7 @@ func ProvisioningRequest(name, namespace string) *ProvisioningRequestApplyConfig
|
|||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Kind field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithKind(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.Kind = &value
|
||||
b.TypeMetaApplyConfiguration.Kind = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithKind(value string) *Provisio
|
|||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the APIVersion field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithAPIVersion(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.APIVersion = &value
|
||||
b.TypeMetaApplyConfiguration.APIVersion = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithAPIVersion(value string) *Pr
|
|||
// If called multiple times, the Name field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithName(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Name = &value
|
||||
b.ObjectMetaApplyConfiguration.Name = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithName(value string) *Provisio
|
|||
// If called multiple times, the GenerateName field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithGenerateName(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.GenerateName = &value
|
||||
b.ObjectMetaApplyConfiguration.GenerateName = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithGenerateName(value string) *
|
|||
// If called multiple times, the Namespace field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithNamespace(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Namespace = &value
|
||||
b.ObjectMetaApplyConfiguration.Namespace = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithNamespace(value string) *Pro
|
|||
// If called multiple times, the UID field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithUID(value types.UID) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.UID = &value
|
||||
b.ObjectMetaApplyConfiguration.UID = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithUID(value types.UID) *Provis
|
|||
// If called multiple times, the ResourceVersion field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithResourceVersion(value string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.ResourceVersion = &value
|
||||
b.ObjectMetaApplyConfiguration.ResourceVersion = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithResourceVersion(value string
|
|||
// If called multiple times, the Generation field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithGeneration(value int64) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Generation = &value
|
||||
b.ObjectMetaApplyConfiguration.Generation = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithGeneration(value int64) *Pro
|
|||
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.CreationTimestamp = &value
|
||||
b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithCreationTimestamp(value meta
|
|||
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.DeletionTimestamp = &value
|
||||
b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithDeletionTimestamp(value meta
|
|||
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.DeletionGracePeriodSeconds = &value
|
||||
b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
|
||||
return b
|
||||
}
|
||||
|
||||
|
@ -147,11 +147,11 @@ func (b *ProvisioningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(v
|
|||
// overwriting an existing map entries in Labels field with the same key.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithLabels(entries map[string]string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
if b.Labels == nil && len(entries) > 0 {
|
||||
b.Labels = make(map[string]string, len(entries))
|
||||
if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
|
||||
b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.Labels[k] = v
|
||||
b.ObjectMetaApplyConfiguration.Labels[k] = v
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -162,11 +162,11 @@ func (b *ProvisioningRequestApplyConfiguration) WithLabels(entries map[string]st
|
|||
// overwriting an existing map entries in Annotations field with the same key.
|
||||
func (b *ProvisioningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
if b.Annotations == nil && len(entries) > 0 {
|
||||
b.Annotations = make(map[string]string, len(entries))
|
||||
if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
|
||||
b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.Annotations[k] = v
|
||||
b.ObjectMetaApplyConfiguration.Annotations[k] = v
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithOwnerReferences(values ...*v
|
|||
if values[i] == nil {
|
||||
panic("nil value passed to WithOwnerReferences")
|
||||
}
|
||||
b.OwnerReferences = append(b.OwnerReferences, *values[i])
|
||||
b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func (b *ProvisioningRequestApplyConfiguration) WithOwnerReferences(values ...*v
|
|||
func (b *ProvisioningRequestApplyConfiguration) WithFinalizers(values ...string) *ProvisioningRequestApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
for i := range values {
|
||||
b.Finalizers = append(b.Finalizers, values[i])
|
||||
b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@ -221,5 +221,5 @@ func (b *ProvisioningRequestApplyConfiguration) WithStatus(value *ProvisioningRe
|
|||
// GetName retrieves the value of the Name field in the declarative configuration.
|
||||
func (b *ProvisioningRequestApplyConfiguration) GetName() *string {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
return b.Name
|
||||
return b.ObjectMetaApplyConfiguration.Name
|
||||
}
|
||||
|
|
|
@ -19,15 +19,15 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
// ProvisioningRequestStatusApplyConfiguration represents a declarative configuration of the ProvisioningRequestStatus type for use
|
||||
// with apply.
|
||||
type ProvisioningRequestStatusApplyConfiguration struct {
|
||||
Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
|
||||
ProvisioningClassDetails map[string]v1beta1.Detail `json:"provisioningClassDetails,omitempty"`
|
||||
Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
|
||||
ProvisioningClassDetails map[string]autoscalingxk8siov1beta1.Detail `json:"provisioningClassDetails,omitempty"`
|
||||
}
|
||||
|
||||
// ProvisioningRequestStatusApplyConfiguration constructs a declarative configuration of the ProvisioningRequestStatus type for use with
|
||||
|
@ -53,9 +53,9 @@ func (b *ProvisioningRequestStatusApplyConfiguration) WithConditions(values ...*
|
|||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, the entries provided by each call will be put on the ProvisioningClassDetails field,
|
||||
// overwriting an existing map entries in ProvisioningClassDetails field with the same key.
|
||||
func (b *ProvisioningRequestStatusApplyConfiguration) WithProvisioningClassDetails(entries map[string]v1beta1.Detail) *ProvisioningRequestStatusApplyConfiguration {
|
||||
func (b *ProvisioningRequestStatusApplyConfiguration) WithProvisioningClassDetails(entries map[string]autoscalingxk8siov1beta1.Detail) *ProvisioningRequestStatusApplyConfiguration {
|
||||
if b.ProvisioningClassDetails == nil && len(entries) > 0 {
|
||||
b.ProvisioningClassDetails = make(map[string]v1beta1.Detail, len(entries))
|
||||
b.ProvisioningClassDetails = make(map[string]autoscalingxk8siov1beta1.Detail, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.ProvisioningClassDetails[k] = v
|
||||
|
|
|
@ -19,8 +19,8 @@ limitations under the License.
|
|||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
fmt "fmt"
|
||||
sync "sync"
|
||||
|
||||
typed "sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
)
|
||||
|
|
|
@ -21,12 +21,12 @@ package applyconfiguration
|
|||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1beta1"
|
||||
internal "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/internal"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no
|
||||
|
@ -61,6 +61,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter {
|
||||
return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()}
|
||||
func NewTypeConverter(scheme *runtime.Scheme) managedfields.TypeConverter {
|
||||
return managedfields.NewSchemeTypeConverter(scheme, internal.Parser())
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ limitations under the License.
|
|||
package versioned
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
fmt "fmt"
|
||||
http "net/http"
|
||||
|
||||
autoscalingv1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/typed/autoscaling.x-k8s.io/v1"
|
||||
autoscalingv1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/typed/autoscaling.x-k8s.io/v1beta1"
|
||||
|
|
|
@ -19,6 +19,7 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
applyconfiguration "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration"
|
||||
|
@ -52,9 +53,13 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
|||
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
|
||||
cs.AddReactor("*", "*", testing.ObjectReaction(o))
|
||||
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
|
||||
var opts metav1.ListOptions
|
||||
if watchActcion, ok := action.(testing.WatchActionImpl); ok {
|
||||
opts = watchActcion.ListOptions
|
||||
}
|
||||
gvr := action.GetResource()
|
||||
ns := action.GetNamespace()
|
||||
watch, err := o.Watch(gvr, ns)
|
||||
watch, err := o.Watch(gvr, ns, opts)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
@ -101,9 +106,13 @@ func NewClientset(objects ...runtime.Object) *Clientset {
|
|||
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
|
||||
cs.AddReactor("*", "*", testing.ObjectReaction(o))
|
||||
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
|
||||
var opts metav1.ListOptions
|
||||
if watchActcion, ok := action.(testing.WatchActionImpl); ok {
|
||||
opts = watchActcion.ListOptions
|
||||
}
|
||||
gvr := action.GetResource()
|
||||
ns := action.GetNamespace()
|
||||
watch, err := o.Watch(gvr, ns)
|
||||
watch, err := o.Watch(gvr, ns, opts)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
http "net/http"
|
||||
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/scheme"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
scheme "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -45,9 +45,7 @@ func (c *AutoscalingV1Client) ProvisioningRequests(namespace string) Provisionin
|
|||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setConfigDefaults(&config)
|
||||
httpClient, err := rest.HTTPClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -59,9 +57,7 @@ func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) {
|
|||
// Note the http client provided takes precedence over the configured transport values.
|
||||
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setConfigDefaults(&config)
|
||||
client, err := rest.RESTClientForConfigAndClient(&config, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -84,17 +80,15 @@ func New(c rest.Interface) *AutoscalingV1Client {
|
|||
return &AutoscalingV1Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1.SchemeGroupVersion
|
||||
func setConfigDefaults(config *rest.Config) {
|
||||
gv := autoscalingxk8siov1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
|
|
@ -29,7 +29,7 @@ type FakeAutoscalingV1 struct {
|
|||
}
|
||||
|
||||
func (c *FakeAutoscalingV1) ProvisioningRequests(namespace string) v1.ProvisioningRequestInterface {
|
||||
return &FakeProvisioningRequests{c, namespace}
|
||||
return newFakeProvisioningRequests(c, namespace)
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
|
|
@ -19,179 +19,35 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
json "encoding/json"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1"
|
||||
testing "k8s.io/client-go/testing"
|
||||
typedautoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/typed/autoscaling.x-k8s.io/v1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeProvisioningRequests implements ProvisioningRequestInterface
|
||||
type FakeProvisioningRequests struct {
|
||||
// fakeProvisioningRequests implements ProvisioningRequestInterface
|
||||
type fakeProvisioningRequests struct {
|
||||
*gentype.FakeClientWithListAndApply[*v1.ProvisioningRequest, *v1.ProvisioningRequestList, *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration]
|
||||
Fake *FakeAutoscalingV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var provisioningrequestsResource = v1.SchemeGroupVersion.WithResource("provisioningrequests")
|
||||
|
||||
var provisioningrequestsKind = v1.SchemeGroupVersion.WithKind("ProvisioningRequest")
|
||||
|
||||
// Get takes name of the provisioningRequest, and returns the corresponding provisioningRequest object, and an error if there is any.
|
||||
func (c *FakeProvisioningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetActionWithOptions(provisioningrequestsResource, c.ns, name, options), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
func newFakeProvisioningRequests(fake *FakeAutoscalingV1, namespace string) typedautoscalingxk8siov1.ProvisioningRequestInterface {
|
||||
return &fakeProvisioningRequests{
|
||||
gentype.NewFakeClientWithListAndApply[*v1.ProvisioningRequest, *v1.ProvisioningRequestList, *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1.SchemeGroupVersion.WithResource("provisioningrequests"),
|
||||
v1.SchemeGroupVersion.WithKind("ProvisioningRequest"),
|
||||
func() *v1.ProvisioningRequest { return &v1.ProvisioningRequest{} },
|
||||
func() *v1.ProvisioningRequestList { return &v1.ProvisioningRequestList{} },
|
||||
func(dst, src *v1.ProvisioningRequestList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1.ProvisioningRequestList) []*v1.ProvisioningRequest {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1.ProvisioningRequestList, items []*v1.ProvisioningRequest) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ProvisioningRequests that match those selectors.
|
||||
func (c *FakeProvisioningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ProvisioningRequestList, err error) {
|
||||
emptyResult := &v1.ProvisioningRequestList{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListActionWithOptions(provisioningrequestsResource, provisioningrequestsKind, c.ns, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1.ProvisioningRequestList{ListMeta: obj.(*v1.ProvisioningRequestList).ListMeta}
|
||||
for _, item := range obj.(*v1.ProvisioningRequestList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested provisioningRequests.
|
||||
func (c *FakeProvisioningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchActionWithOptions(provisioningrequestsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a provisioningRequest and creates it. Returns the server's representation of the provisioningRequest, and an error, if there is any.
|
||||
func (c *FakeProvisioningRequests) Create(ctx context.Context, provisioningRequest *v1.ProvisioningRequest, opts metav1.CreateOptions) (result *v1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateActionWithOptions(provisioningrequestsResource, c.ns, provisioningRequest, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a provisioningRequest and updates it. Returns the server's representation of the provisioningRequest, and an error, if there is any.
|
||||
func (c *FakeProvisioningRequests) Update(ctx context.Context, provisioningRequest *v1.ProvisioningRequest, opts metav1.UpdateOptions) (result *v1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateActionWithOptions(provisioningrequestsResource, c.ns, provisioningRequest, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeProvisioningRequests) UpdateStatus(ctx context.Context, provisioningRequest *v1.ProvisioningRequest, opts metav1.UpdateOptions) (result *v1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceActionWithOptions(provisioningrequestsResource, "status", c.ns, provisioningRequest, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// Delete takes name of the provisioningRequest and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeProvisioningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(provisioningrequestsResource, c.ns, name, opts), &v1.ProvisioningRequest{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeProvisioningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionActionWithOptions(provisioningrequestsResource, c.ns, opts, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1.ProvisioningRequestList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched provisioningRequest.
|
||||
func (c *FakeProvisioningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(provisioningrequestsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// Apply takes the given apply declarative configuration, applies it and returns the applied provisioningRequest.
|
||||
func (c *FakeProvisioningRequests) Apply(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ProvisioningRequest, err error) {
|
||||
if provisioningRequest == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest provided to Apply must not be nil")
|
||||
}
|
||||
data, err := json.Marshal(provisioningRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := provisioningRequest.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest.Name must be provided to Apply")
|
||||
}
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(provisioningrequestsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// ApplyStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
|
||||
func (c *FakeProvisioningRequests) ApplyStatus(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ProvisioningRequest, err error) {
|
||||
if provisioningRequest == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest provided to Apply must not be nil")
|
||||
}
|
||||
data, err := json.Marshal(provisioningRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := provisioningRequest.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest.Name must be provided to Apply")
|
||||
}
|
||||
emptyResult := &v1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(provisioningrequestsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1.ProvisioningRequest), err
|
||||
}
|
||||
|
|
|
@ -19,13 +19,13 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
applyconfigurationautoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1"
|
||||
scheme "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/scheme"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
@ -38,36 +38,39 @@ type ProvisioningRequestsGetter interface {
|
|||
|
||||
// ProvisioningRequestInterface has methods to work with ProvisioningRequest resources.
|
||||
type ProvisioningRequestInterface interface {
|
||||
Create(ctx context.Context, provisioningRequest *v1.ProvisioningRequest, opts metav1.CreateOptions) (*v1.ProvisioningRequest, error)
|
||||
Update(ctx context.Context, provisioningRequest *v1.ProvisioningRequest, opts metav1.UpdateOptions) (*v1.ProvisioningRequest, error)
|
||||
Create(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequest, opts metav1.CreateOptions) (*autoscalingxk8siov1.ProvisioningRequest, error)
|
||||
Update(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequest, opts metav1.UpdateOptions) (*autoscalingxk8siov1.ProvisioningRequest, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, provisioningRequest *v1.ProvisioningRequest, opts metav1.UpdateOptions) (*v1.ProvisioningRequest, error)
|
||||
UpdateStatus(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequest, opts metav1.UpdateOptions) (*autoscalingxk8siov1.ProvisioningRequest, error)
|
||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ProvisioningRequest, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.ProvisioningRequestList, error)
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*autoscalingxk8siov1.ProvisioningRequest, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*autoscalingxk8siov1.ProvisioningRequestList, error)
|
||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ProvisioningRequest, err error)
|
||||
Apply(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ProvisioningRequest, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *autoscalingxk8siov1.ProvisioningRequest, err error)
|
||||
Apply(ctx context.Context, provisioningRequest *applyconfigurationautoscalingxk8siov1.ProvisioningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingxk8siov1.ProvisioningRequest, err error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
|
||||
ApplyStatus(ctx context.Context, provisioningRequest *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ProvisioningRequest, err error)
|
||||
ApplyStatus(ctx context.Context, provisioningRequest *applyconfigurationautoscalingxk8siov1.ProvisioningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingxk8siov1.ProvisioningRequest, err error)
|
||||
ProvisioningRequestExpansion
|
||||
}
|
||||
|
||||
// provisioningRequests implements ProvisioningRequestInterface
|
||||
type provisioningRequests struct {
|
||||
*gentype.ClientWithListAndApply[*v1.ProvisioningRequest, *v1.ProvisioningRequestList, *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration]
|
||||
*gentype.ClientWithListAndApply[*autoscalingxk8siov1.ProvisioningRequest, *autoscalingxk8siov1.ProvisioningRequestList, *applyconfigurationautoscalingxk8siov1.ProvisioningRequestApplyConfiguration]
|
||||
}
|
||||
|
||||
// newProvisioningRequests returns a ProvisioningRequests
|
||||
func newProvisioningRequests(c *AutoscalingV1Client, namespace string) *provisioningRequests {
|
||||
return &provisioningRequests{
|
||||
gentype.NewClientWithListAndApply[*v1.ProvisioningRequest, *v1.ProvisioningRequestList, *autoscalingxk8siov1.ProvisioningRequestApplyConfiguration](
|
||||
gentype.NewClientWithListAndApply[*autoscalingxk8siov1.ProvisioningRequest, *autoscalingxk8siov1.ProvisioningRequestList, *applyconfigurationautoscalingxk8siov1.ProvisioningRequestApplyConfiguration](
|
||||
"provisioningrequests",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *v1.ProvisioningRequest { return &v1.ProvisioningRequest{} },
|
||||
func() *v1.ProvisioningRequestList { return &v1.ProvisioningRequestList{} }),
|
||||
func() *autoscalingxk8siov1.ProvisioningRequest { return &autoscalingxk8siov1.ProvisioningRequest{} },
|
||||
func() *autoscalingxk8siov1.ProvisioningRequestList {
|
||||
return &autoscalingxk8siov1.ProvisioningRequestList{}
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
http "net/http"
|
||||
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/scheme"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
scheme "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -45,9 +45,7 @@ func (c *AutoscalingV1beta1Client) ProvisioningRequests(namespace string) Provis
|
|||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
func NewForConfig(c *rest.Config) (*AutoscalingV1beta1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setConfigDefaults(&config)
|
||||
httpClient, err := rest.HTTPClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -59,9 +57,7 @@ func NewForConfig(c *rest.Config) (*AutoscalingV1beta1Client, error) {
|
|||
// Note the http client provided takes precedence over the configured transport values.
|
||||
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AutoscalingV1beta1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setConfigDefaults(&config)
|
||||
client, err := rest.RESTClientForConfigAndClient(&config, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -84,17 +80,15 @@ func New(c rest.Interface) *AutoscalingV1beta1Client {
|
|||
return &AutoscalingV1beta1Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1beta1.SchemeGroupVersion
|
||||
func setConfigDefaults(config *rest.Config) {
|
||||
gv := autoscalingxk8siov1beta1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
|
|
@ -29,7 +29,7 @@ type FakeAutoscalingV1beta1 struct {
|
|||
}
|
||||
|
||||
func (c *FakeAutoscalingV1beta1) ProvisioningRequests(namespace string) v1beta1.ProvisioningRequestInterface {
|
||||
return &FakeProvisioningRequests{c, namespace}
|
||||
return newFakeProvisioningRequests(c, namespace)
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
|
|
@ -19,179 +19,35 @@ limitations under the License.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
json "encoding/json"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1beta1"
|
||||
testing "k8s.io/client-go/testing"
|
||||
typedautoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/typed/autoscaling.x-k8s.io/v1beta1"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
||||
// FakeProvisioningRequests implements ProvisioningRequestInterface
|
||||
type FakeProvisioningRequests struct {
|
||||
// fakeProvisioningRequests implements ProvisioningRequestInterface
|
||||
type fakeProvisioningRequests struct {
|
||||
*gentype.FakeClientWithListAndApply[*v1beta1.ProvisioningRequest, *v1beta1.ProvisioningRequestList, *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration]
|
||||
Fake *FakeAutoscalingV1beta1
|
||||
ns string
|
||||
}
|
||||
|
||||
var provisioningrequestsResource = v1beta1.SchemeGroupVersion.WithResource("provisioningrequests")
|
||||
|
||||
var provisioningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("ProvisioningRequest")
|
||||
|
||||
// Get takes name of the provisioningRequest, and returns the corresponding provisioningRequest object, and an error if there is any.
|
||||
func (c *FakeProvisioningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetActionWithOptions(provisioningrequestsResource, c.ns, name, options), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
func newFakeProvisioningRequests(fake *FakeAutoscalingV1beta1, namespace string) typedautoscalingxk8siov1beta1.ProvisioningRequestInterface {
|
||||
return &fakeProvisioningRequests{
|
||||
gentype.NewFakeClientWithListAndApply[*v1beta1.ProvisioningRequest, *v1beta1.ProvisioningRequestList, *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration](
|
||||
fake.Fake,
|
||||
namespace,
|
||||
v1beta1.SchemeGroupVersion.WithResource("provisioningrequests"),
|
||||
v1beta1.SchemeGroupVersion.WithKind("ProvisioningRequest"),
|
||||
func() *v1beta1.ProvisioningRequest { return &v1beta1.ProvisioningRequest{} },
|
||||
func() *v1beta1.ProvisioningRequestList { return &v1beta1.ProvisioningRequestList{} },
|
||||
func(dst, src *v1beta1.ProvisioningRequestList) { dst.ListMeta = src.ListMeta },
|
||||
func(list *v1beta1.ProvisioningRequestList) []*v1beta1.ProvisioningRequest {
|
||||
return gentype.ToPointerSlice(list.Items)
|
||||
},
|
||||
func(list *v1beta1.ProvisioningRequestList, items []*v1beta1.ProvisioningRequest) {
|
||||
list.Items = gentype.FromPointerSlice(items)
|
||||
},
|
||||
),
|
||||
fake,
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ProvisioningRequests that match those selectors.
|
||||
func (c *FakeProvisioningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ProvisioningRequestList, err error) {
|
||||
emptyResult := &v1beta1.ProvisioningRequestList{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListActionWithOptions(provisioningrequestsResource, provisioningrequestsKind, c.ns, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1beta1.ProvisioningRequestList{ListMeta: obj.(*v1beta1.ProvisioningRequestList).ListMeta}
|
||||
for _, item := range obj.(*v1beta1.ProvisioningRequestList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested provisioningRequests.
|
||||
func (c *FakeProvisioningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchActionWithOptions(provisioningrequestsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a provisioningRequest and creates it. Returns the server's representation of the provisioningRequest, and an error, if there is any.
|
||||
func (c *FakeProvisioningRequests) Create(ctx context.Context, provisioningRequest *v1beta1.ProvisioningRequest, opts v1.CreateOptions) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateActionWithOptions(provisioningrequestsResource, c.ns, provisioningRequest, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a provisioningRequest and updates it. Returns the server's representation of the provisioningRequest, and an error, if there is any.
|
||||
func (c *FakeProvisioningRequests) Update(ctx context.Context, provisioningRequest *v1beta1.ProvisioningRequest, opts v1.UpdateOptions) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateActionWithOptions(provisioningrequestsResource, c.ns, provisioningRequest, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeProvisioningRequests) UpdateStatus(ctx context.Context, provisioningRequest *v1beta1.ProvisioningRequest, opts v1.UpdateOptions) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceActionWithOptions(provisioningrequestsResource, "status", c.ns, provisioningRequest, opts), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// Delete takes name of the provisioningRequest and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeProvisioningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteActionWithOptions(provisioningrequestsResource, c.ns, name, opts), &v1beta1.ProvisioningRequest{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeProvisioningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionActionWithOptions(provisioningrequestsResource, c.ns, opts, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1beta1.ProvisioningRequestList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched provisioningRequest.
|
||||
func (c *FakeProvisioningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(provisioningrequestsResource, c.ns, name, pt, data, opts, subresources...), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// Apply takes the given apply declarative configuration, applies it and returns the applied provisioningRequest.
|
||||
func (c *FakeProvisioningRequests) Apply(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
if provisioningRequest == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest provided to Apply must not be nil")
|
||||
}
|
||||
data, err := json.Marshal(provisioningRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := provisioningRequest.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest.Name must be provided to Apply")
|
||||
}
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(provisioningrequestsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
||||
// ApplyStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
|
||||
func (c *FakeProvisioningRequests) ApplyStatus(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ProvisioningRequest, err error) {
|
||||
if provisioningRequest == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest provided to Apply must not be nil")
|
||||
}
|
||||
data, err := json.Marshal(provisioningRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := provisioningRequest.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("provisioningRequest.Name must be provided to Apply")
|
||||
}
|
||||
emptyResult := &v1beta1.ProvisioningRequest{}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceActionWithOptions(provisioningrequestsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
|
||||
|
||||
if obj == nil {
|
||||
return emptyResult, err
|
||||
}
|
||||
return obj.(*v1beta1.ProvisioningRequest), err
|
||||
}
|
||||
|
|
|
@ -19,13 +19,13 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1beta1"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
applyconfigurationautoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/applyconfiguration/autoscaling.x-k8s.io/v1beta1"
|
||||
scheme "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned/scheme"
|
||||
gentype "k8s.io/client-go/gentype"
|
||||
)
|
||||
|
@ -38,36 +38,41 @@ type ProvisioningRequestsGetter interface {
|
|||
|
||||
// ProvisioningRequestInterface has methods to work with ProvisioningRequest resources.
|
||||
type ProvisioningRequestInterface interface {
|
||||
Create(ctx context.Context, provisioningRequest *v1beta1.ProvisioningRequest, opts v1.CreateOptions) (*v1beta1.ProvisioningRequest, error)
|
||||
Update(ctx context.Context, provisioningRequest *v1beta1.ProvisioningRequest, opts v1.UpdateOptions) (*v1beta1.ProvisioningRequest, error)
|
||||
Create(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequest, opts v1.CreateOptions) (*autoscalingxk8siov1beta1.ProvisioningRequest, error)
|
||||
Update(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequest, opts v1.UpdateOptions) (*autoscalingxk8siov1beta1.ProvisioningRequest, error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
UpdateStatus(ctx context.Context, provisioningRequest *v1beta1.ProvisioningRequest, opts v1.UpdateOptions) (*v1beta1.ProvisioningRequest, error)
|
||||
UpdateStatus(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequest, opts v1.UpdateOptions) (*autoscalingxk8siov1beta1.ProvisioningRequest, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ProvisioningRequest, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ProvisioningRequestList, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingxk8siov1beta1.ProvisioningRequest, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*autoscalingxk8siov1beta1.ProvisioningRequestList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ProvisioningRequest, err error)
|
||||
Apply(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ProvisioningRequest, err error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingxk8siov1beta1.ProvisioningRequest, err error)
|
||||
Apply(ctx context.Context, provisioningRequest *applyconfigurationautoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingxk8siov1beta1.ProvisioningRequest, err error)
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
|
||||
ApplyStatus(ctx context.Context, provisioningRequest *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ProvisioningRequest, err error)
|
||||
ApplyStatus(ctx context.Context, provisioningRequest *applyconfigurationautoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingxk8siov1beta1.ProvisioningRequest, err error)
|
||||
ProvisioningRequestExpansion
|
||||
}
|
||||
|
||||
// provisioningRequests implements ProvisioningRequestInterface
|
||||
type provisioningRequests struct {
|
||||
*gentype.ClientWithListAndApply[*v1beta1.ProvisioningRequest, *v1beta1.ProvisioningRequestList, *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration]
|
||||
*gentype.ClientWithListAndApply[*autoscalingxk8siov1beta1.ProvisioningRequest, *autoscalingxk8siov1beta1.ProvisioningRequestList, *applyconfigurationautoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration]
|
||||
}
|
||||
|
||||
// newProvisioningRequests returns a ProvisioningRequests
|
||||
func newProvisioningRequests(c *AutoscalingV1beta1Client, namespace string) *provisioningRequests {
|
||||
return &provisioningRequests{
|
||||
gentype.NewClientWithListAndApply[*v1beta1.ProvisioningRequest, *v1beta1.ProvisioningRequestList, *autoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration](
|
||||
gentype.NewClientWithListAndApply[*autoscalingxk8siov1beta1.ProvisioningRequest, *autoscalingxk8siov1beta1.ProvisioningRequestList, *applyconfigurationautoscalingxk8siov1beta1.ProvisioningRequestApplyConfiguration](
|
||||
"provisioningrequests",
|
||||
c.RESTClient(),
|
||||
scheme.ParameterCodec,
|
||||
namespace,
|
||||
func() *v1beta1.ProvisioningRequest { return &v1beta1.ProvisioningRequest{} },
|
||||
func() *v1beta1.ProvisioningRequestList { return &v1beta1.ProvisioningRequestList{} }),
|
||||
func() *autoscalingxk8siov1beta1.ProvisioningRequest {
|
||||
return &autoscalingxk8siov1beta1.ProvisioningRequest{}
|
||||
},
|
||||
func() *autoscalingxk8siov1beta1.ProvisioningRequestList {
|
||||
return &autoscalingxk8siov1beta1.ProvisioningRequestList{}
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,16 +19,16 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
time "time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
provisioningrequestautoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
versioned "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned"
|
||||
internalinterfaces "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/informers/externalversions/internalinterfaces"
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
|
@ -36,7 +36,7 @@ import (
|
|||
// ProvisioningRequests.
|
||||
type ProvisioningRequestInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.ProvisioningRequestLister
|
||||
Lister() autoscalingxk8siov1.ProvisioningRequestLister
|
||||
}
|
||||
|
||||
type provisioningRequestInformer struct {
|
||||
|
@ -62,16 +62,28 @@ func NewFilteredProvisioningRequestInformer(client versioned.Interface, namespac
|
|||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1().ProvisioningRequests(namespace).List(context.TODO(), options)
|
||||
return client.AutoscalingV1().ProvisioningRequests(namespace).List(context.Background(), options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1().ProvisioningRequests(namespace).Watch(context.TODO(), options)
|
||||
return client.AutoscalingV1().ProvisioningRequests(namespace).Watch(context.Background(), options)
|
||||
},
|
||||
ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1().ProvisioningRequests(namespace).List(ctx, options)
|
||||
},
|
||||
WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1().ProvisioningRequests(namespace).Watch(ctx, options)
|
||||
},
|
||||
},
|
||||
&autoscalingxk8siov1.ProvisioningRequest{},
|
||||
&provisioningrequestautoscalingxk8siov1.ProvisioningRequest{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
|
@ -82,9 +94,9 @@ func (f *provisioningRequestInformer) defaultInformer(client versioned.Interface
|
|||
}
|
||||
|
||||
func (f *provisioningRequestInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&autoscalingxk8siov1.ProvisioningRequest{}, f.defaultInformer)
|
||||
return f.factory.InformerFor(&provisioningrequestautoscalingxk8siov1.ProvisioningRequest{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *provisioningRequestInformer) Lister() v1.ProvisioningRequestLister {
|
||||
return v1.NewProvisioningRequestLister(f.Informer().GetIndexer())
|
||||
func (f *provisioningRequestInformer) Lister() autoscalingxk8siov1.ProvisioningRequestLister {
|
||||
return autoscalingxk8siov1.NewProvisioningRequestLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
|
|
@ -19,16 +19,16 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "context"
|
||||
time "time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
provisioningrequestautoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
versioned "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/clientset/versioned"
|
||||
internalinterfaces "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/informers/externalversions/internalinterfaces"
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1beta1"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/client/listers/autoscaling.x-k8s.io/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
|
@ -36,7 +36,7 @@ import (
|
|||
// ProvisioningRequests.
|
||||
type ProvisioningRequestInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.ProvisioningRequestLister
|
||||
Lister() autoscalingxk8siov1beta1.ProvisioningRequestLister
|
||||
}
|
||||
|
||||
type provisioningRequestInformer struct {
|
||||
|
@ -62,16 +62,28 @@ func NewFilteredProvisioningRequestInformer(client versioned.Interface, namespac
|
|||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1beta1().ProvisioningRequests(namespace).List(context.TODO(), options)
|
||||
return client.AutoscalingV1beta1().ProvisioningRequests(namespace).List(context.Background(), options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1beta1().ProvisioningRequests(namespace).Watch(context.TODO(), options)
|
||||
return client.AutoscalingV1beta1().ProvisioningRequests(namespace).Watch(context.Background(), options)
|
||||
},
|
||||
ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1beta1().ProvisioningRequests(namespace).List(ctx, options)
|
||||
},
|
||||
WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1beta1().ProvisioningRequests(namespace).Watch(ctx, options)
|
||||
},
|
||||
},
|
||||
&autoscalingxk8siov1beta1.ProvisioningRequest{},
|
||||
&provisioningrequestautoscalingxk8siov1beta1.ProvisioningRequest{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
|
@ -82,9 +94,9 @@ func (f *provisioningRequestInformer) defaultInformer(client versioned.Interface
|
|||
}
|
||||
|
||||
func (f *provisioningRequestInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&autoscalingxk8siov1beta1.ProvisioningRequest{}, f.defaultInformer)
|
||||
return f.factory.InformerFor(&provisioningrequestautoscalingxk8siov1beta1.ProvisioningRequest{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *provisioningRequestInformer) Lister() v1beta1.ProvisioningRequestLister {
|
||||
return v1beta1.NewProvisioningRequestLister(f.Informer().GetIndexer())
|
||||
func (f *provisioningRequestInformer) Lister() autoscalingxk8siov1beta1.ProvisioningRequestLister {
|
||||
return autoscalingxk8siov1beta1.NewProvisioningRequestLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
|
|
@ -228,6 +228,7 @@ type SharedInformerFactory interface {
|
|||
|
||||
// Start initializes all requested informers. They are handled in goroutines
|
||||
// which run until the stop channel gets closed.
|
||||
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
|
||||
Start(stopCh <-chan struct{})
|
||||
|
||||
// Shutdown marks a factory as shutting down. At that point no new
|
||||
|
|
|
@ -19,7 +19,7 @@ limitations under the License.
|
|||
package externalversions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
fmt "fmt"
|
||||
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
|
|
|
@ -19,10 +19,10 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
"k8s.io/client-go/listers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
autoscalingxk8siov1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1"
|
||||
listers "k8s.io/client-go/listers"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ProvisioningRequestLister helps list ProvisioningRequests.
|
||||
|
@ -30,7 +30,7 @@ import (
|
|||
type ProvisioningRequestLister interface {
|
||||
// List lists all ProvisioningRequests in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.ProvisioningRequest, err error)
|
||||
List(selector labels.Selector) (ret []*autoscalingxk8siov1.ProvisioningRequest, err error)
|
||||
// ProvisioningRequests returns an object that can list and get ProvisioningRequests.
|
||||
ProvisioningRequests(namespace string) ProvisioningRequestNamespaceLister
|
||||
ProvisioningRequestListerExpansion
|
||||
|
@ -38,17 +38,17 @@ type ProvisioningRequestLister interface {
|
|||
|
||||
// provisioningRequestLister implements the ProvisioningRequestLister interface.
|
||||
type provisioningRequestLister struct {
|
||||
listers.ResourceIndexer[*v1.ProvisioningRequest]
|
||||
listers.ResourceIndexer[*autoscalingxk8siov1.ProvisioningRequest]
|
||||
}
|
||||
|
||||
// NewProvisioningRequestLister returns a new ProvisioningRequestLister.
|
||||
func NewProvisioningRequestLister(indexer cache.Indexer) ProvisioningRequestLister {
|
||||
return &provisioningRequestLister{listers.New[*v1.ProvisioningRequest](indexer, v1.Resource("provisioningrequest"))}
|
||||
return &provisioningRequestLister{listers.New[*autoscalingxk8siov1.ProvisioningRequest](indexer, autoscalingxk8siov1.Resource("provisioningrequest"))}
|
||||
}
|
||||
|
||||
// ProvisioningRequests returns an object that can list and get ProvisioningRequests.
|
||||
func (s *provisioningRequestLister) ProvisioningRequests(namespace string) ProvisioningRequestNamespaceLister {
|
||||
return provisioningRequestNamespaceLister{listers.NewNamespaced[*v1.ProvisioningRequest](s.ResourceIndexer, namespace)}
|
||||
return provisioningRequestNamespaceLister{listers.NewNamespaced[*autoscalingxk8siov1.ProvisioningRequest](s.ResourceIndexer, namespace)}
|
||||
}
|
||||
|
||||
// ProvisioningRequestNamespaceLister helps list and get ProvisioningRequests.
|
||||
|
@ -56,15 +56,15 @@ func (s *provisioningRequestLister) ProvisioningRequests(namespace string) Provi
|
|||
type ProvisioningRequestNamespaceLister interface {
|
||||
// List lists all ProvisioningRequests in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.ProvisioningRequest, err error)
|
||||
List(selector labels.Selector) (ret []*autoscalingxk8siov1.ProvisioningRequest, err error)
|
||||
// Get retrieves the ProvisioningRequest from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1.ProvisioningRequest, error)
|
||||
Get(name string) (*autoscalingxk8siov1.ProvisioningRequest, error)
|
||||
ProvisioningRequestNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// provisioningRequestNamespaceLister implements the ProvisioningRequestNamespaceLister
|
||||
// interface.
|
||||
type provisioningRequestNamespaceLister struct {
|
||||
listers.ResourceIndexer[*v1.ProvisioningRequest]
|
||||
listers.ResourceIndexer[*autoscalingxk8siov1.ProvisioningRequest]
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
v1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
"k8s.io/client-go/listers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
autoscalingxk8siov1beta1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1beta1"
|
||||
listers "k8s.io/client-go/listers"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ProvisioningRequestLister helps list ProvisioningRequests.
|
||||
|
@ -30,7 +30,7 @@ import (
|
|||
type ProvisioningRequestLister interface {
|
||||
// List lists all ProvisioningRequests in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1beta1.ProvisioningRequest, err error)
|
||||
List(selector labels.Selector) (ret []*autoscalingxk8siov1beta1.ProvisioningRequest, err error)
|
||||
// ProvisioningRequests returns an object that can list and get ProvisioningRequests.
|
||||
ProvisioningRequests(namespace string) ProvisioningRequestNamespaceLister
|
||||
ProvisioningRequestListerExpansion
|
||||
|
@ -38,17 +38,17 @@ type ProvisioningRequestLister interface {
|
|||
|
||||
// provisioningRequestLister implements the ProvisioningRequestLister interface.
|
||||
type provisioningRequestLister struct {
|
||||
listers.ResourceIndexer[*v1beta1.ProvisioningRequest]
|
||||
listers.ResourceIndexer[*autoscalingxk8siov1beta1.ProvisioningRequest]
|
||||
}
|
||||
|
||||
// NewProvisioningRequestLister returns a new ProvisioningRequestLister.
|
||||
func NewProvisioningRequestLister(indexer cache.Indexer) ProvisioningRequestLister {
|
||||
return &provisioningRequestLister{listers.New[*v1beta1.ProvisioningRequest](indexer, v1beta1.Resource("provisioningrequest"))}
|
||||
return &provisioningRequestLister{listers.New[*autoscalingxk8siov1beta1.ProvisioningRequest](indexer, autoscalingxk8siov1beta1.Resource("provisioningrequest"))}
|
||||
}
|
||||
|
||||
// ProvisioningRequests returns an object that can list and get ProvisioningRequests.
|
||||
func (s *provisioningRequestLister) ProvisioningRequests(namespace string) ProvisioningRequestNamespaceLister {
|
||||
return provisioningRequestNamespaceLister{listers.NewNamespaced[*v1beta1.ProvisioningRequest](s.ResourceIndexer, namespace)}
|
||||
return provisioningRequestNamespaceLister{listers.NewNamespaced[*autoscalingxk8siov1beta1.ProvisioningRequest](s.ResourceIndexer, namespace)}
|
||||
}
|
||||
|
||||
// ProvisioningRequestNamespaceLister helps list and get ProvisioningRequests.
|
||||
|
@ -56,15 +56,15 @@ func (s *provisioningRequestLister) ProvisioningRequests(namespace string) Provi
|
|||
type ProvisioningRequestNamespaceLister interface {
|
||||
// List lists all ProvisioningRequests in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1beta1.ProvisioningRequest, err error)
|
||||
List(selector labels.Selector) (ret []*autoscalingxk8siov1beta1.ProvisioningRequest, err error)
|
||||
// Get retrieves the ProvisioningRequest from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1beta1.ProvisioningRequest, error)
|
||||
Get(name string) (*autoscalingxk8siov1beta1.ProvisioningRequest, error)
|
||||
ProvisioningRequestNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// provisioningRequestNamespaceLister implements the ProvisioningRequestNamespaceLister
|
||||
// interface.
|
||||
type provisioningRequestNamespaceLister struct {
|
||||
listers.ResourceIndexer[*v1beta1.ProvisioningRequest]
|
||||
listers.ResourceIndexer[*autoscalingxk8siov1beta1.ProvisioningRequest]
|
||||
}
|
||||
|
|
|
@ -1,60 +1,15 @@
|
|||
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||
timeout: 3600s
|
||||
options:
|
||||
machineType: E2_HIGHCPU_32
|
||||
timeout: 10800s
|
||||
|
||||
substitutions:
|
||||
{ "_TAG": "dev" }
|
||||
|
||||
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||
# or any new substitutions added in the future.
|
||||
substitution_option: ALLOW_LOOSE
|
||||
steps:
|
||||
- name: gcr.io/cloud-builders/git
|
||||
id: git-clone
|
||||
entrypoint: bash
|
||||
args:
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
mkdir -p /workspace/src/k8s.io
|
||||
cd /workspace/src/k8s.io
|
||||
git clone https://github.com/kubernetes/autoscaler.git
|
||||
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
id: build-build-container
|
||||
entrypoint: bash
|
||||
dir: "/workspace/src/k8s.io/autoscaler/cluster-autoscaler"
|
||||
args:
|
||||
- "-c"
|
||||
- |
|
||||
set -e
|
||||
docker build -t autoscaling-builder ../builder
|
||||
|
||||
- name: autoscaling-builder
|
||||
id: run-tests
|
||||
entrypoint: godep
|
||||
dir: "/workspace/src/k8s.io/autoscaler/cluster-autoscaler"
|
||||
env:
|
||||
- "GOPATH=/workspace/"
|
||||
args: ["go", "test", "./..."]
|
||||
|
||||
- name: autoscaling-builder
|
||||
id: run-build
|
||||
entrypoint: godep
|
||||
dir: "/workspace/src/k8s.io/autoscaler/cluster-autoscaler"
|
||||
env:
|
||||
- "GOPATH=/workspace/"
|
||||
- "GOOS=linux"
|
||||
args: ["go", "build", "-o", "cluster-autoscaler"]
|
||||
waitFor: build-build-container
|
||||
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
id: build-container
|
||||
entrypoint: bash
|
||||
dir: "/workspace/src/k8s.io/autoscaler/cluster-autoscaler"
|
||||
args:
|
||||
- "-c"
|
||||
- |
|
||||
set -e
|
||||
docker build -t gcr.io/k8s-image-staging/cluster-autoscaler:${_TAG} .
|
||||
waitFor: ["run-tests", "run-build"]
|
||||
|
||||
images:
|
||||
- "gcr.io/k8s-image-staging/cluster-autoscaler:${_TAG}"
|
||||
- name: "gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:latest"
|
||||
entrypoint: make
|
||||
env:
|
||||
- TAG=$_GIT_TAG
|
||||
args:
|
||||
- execute-release
|
||||
substitutions:
|
||||
_GIT_TAG: "0.0.0" # default value, this is substituted at build time
|
||||
|
|
|
@ -19,6 +19,13 @@ package signers
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jmespath/go-jmespath"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors"
|
||||
|
@ -26,16 +33,12 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils"
|
||||
"k8s.io/klog/v2"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultOIDCDurationSeconds = 3600
|
||||
oidcTokenFilePath = "ALIBABA_CLOUD_OIDC_TOKEN_FILE"
|
||||
oldOidcTokenFilePath = "ALICLOUD_OIDC_TOKEN_FILE_PATH"
|
||||
)
|
||||
|
||||
// OIDCSigner is kind of signer
|
||||
|
@ -149,7 +152,7 @@ func (signer *OIDCSigner) getOIDCToken(OIDCTokenFilePath string) string {
|
|||
tokenPath := OIDCTokenFilePath
|
||||
_, err := os.Stat(tokenPath)
|
||||
if os.IsNotExist(err) {
|
||||
tokenPath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE")
|
||||
tokenPath = utils.FirstNotEmpty(os.Getenv(oidcTokenFilePath), os.Getenv(oldOidcTokenFilePath))
|
||||
if tokenPath == "" {
|
||||
klog.Error("oidc token file path is missing")
|
||||
return ""
|
||||
|
|
|
@ -89,9 +89,10 @@ func (resolver *LocationResolver) TryResolve(param *ResolveParam) (endpoint stri
|
|||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(response.GetHttpContentString()), &getEndpointResponse)
|
||||
content := response.GetHttpContentString()
|
||||
err = json.Unmarshal([]byte(content), &getEndpointResponse)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to unmarshal endpoint response, error: %v", err)
|
||||
klog.Errorf("failed to resolve endpoint, error: %v, response: %s", err, content)
|
||||
support = false
|
||||
return
|
||||
}
|
||||
|
@ -153,7 +154,7 @@ type EndpointsObj struct {
|
|||
|
||||
// EndpointObj wrapper endpoint
|
||||
type EndpointObj struct {
|
||||
Protocols map[string]string
|
||||
Protocols json.RawMessage
|
||||
Type string
|
||||
Namespace string
|
||||
Id string
|
||||
|
|
|
@ -22,11 +22,12 @@ import (
|
|||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
/* if you use go 1.10 or higher, you can hack this util by these to avoid "TimeZone.zip not found" on Windows */
|
||||
|
@ -127,3 +128,15 @@ func InitStructWithDefaultTag(bean interface{}) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FirstNotEmpty returns the first non-empty string from the input list.
|
||||
// If all strings are empty or no arguments are provided, it returns an empty string.
|
||||
func FirstNotEmpty(strs ...string) string {
|
||||
for _, str := range strs {
|
||||
if str != "" {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFirstNotEmpty(t *testing.T) {
|
||||
// Test case where the first non-empty string is at the beginning
|
||||
result := FirstNotEmpty("hello", "world", "test")
|
||||
assert.Equal(t, "hello", result)
|
||||
|
||||
// Test case where the first non-empty string is in the middle
|
||||
result = FirstNotEmpty("", "foo", "bar")
|
||||
assert.Equal(t, "foo", result)
|
||||
|
||||
// Test case where the first non-empty string is at the end
|
||||
result = FirstNotEmpty("", "", "baz")
|
||||
assert.Equal(t, "baz", result)
|
||||
|
||||
// Test case where all strings are empty
|
||||
result = FirstNotEmpty("", "", "")
|
||||
assert.Equal(t, "", result)
|
||||
|
||||
// Test case with no arguments
|
||||
result = FirstNotEmpty()
|
||||
assert.Equal(t, "", result)
|
||||
}
|
|
@ -19,6 +19,7 @@ package alicloud
|
|||
import (
|
||||
"os"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/metadata"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
@ -63,19 +64,19 @@ func (cc *cloudConfig) isValid() bool {
|
|||
}
|
||||
|
||||
if cc.OIDCProviderARN == "" {
|
||||
cc.OIDCProviderARN = firstNotEmpty(os.Getenv(oidcProviderARN), os.Getenv(oldOidcProviderARN))
|
||||
cc.OIDCProviderARN = utils.FirstNotEmpty(os.Getenv(oidcProviderARN), os.Getenv(oldOidcProviderARN))
|
||||
}
|
||||
|
||||
if cc.OIDCTokenFilePath == "" {
|
||||
cc.OIDCTokenFilePath = firstNotEmpty(os.Getenv(oidcTokenFilePath), os.Getenv(oldOidcTokenFilePath))
|
||||
cc.OIDCTokenFilePath = utils.FirstNotEmpty(os.Getenv(oidcTokenFilePath), os.Getenv(oldOidcTokenFilePath))
|
||||
}
|
||||
|
||||
if cc.RoleARN == "" {
|
||||
cc.RoleARN = firstNotEmpty(os.Getenv(roleARN), os.Getenv(oldRoleARN))
|
||||
cc.RoleARN = utils.FirstNotEmpty(os.Getenv(roleARN), os.Getenv(oldRoleARN))
|
||||
}
|
||||
|
||||
if cc.RoleSessionName == "" {
|
||||
cc.RoleSessionName = firstNotEmpty(os.Getenv(roleSessionName), os.Getenv(oldRoleSessionName))
|
||||
cc.RoleSessionName = utils.FirstNotEmpty(os.Getenv(roleSessionName), os.Getenv(oldRoleSessionName))
|
||||
}
|
||||
|
||||
if cc.RegionId != "" && cc.AccessKeyID != "" && cc.AccessKeySecret != "" {
|
||||
|
@ -133,15 +134,3 @@ func (cc *cloudConfig) getRegion() string {
|
|||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// firstNotEmpty returns the first non-empty string from the input list.
|
||||
// If all strings are empty or no arguments are provided, it returns an empty string.
|
||||
func firstNotEmpty(strs ...string) string {
|
||||
for _, str := range strs {
|
||||
if str != "" {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -55,25 +55,3 @@ func TestOldRRSACloudConfigIsValid(t *testing.T) {
|
|||
assert.True(t, cfg.isValid())
|
||||
assert.True(t, cfg.RRSAEnabled)
|
||||
}
|
||||
|
||||
func TestFirstNotEmpty(t *testing.T) {
|
||||
// Test case where the first non-empty string is at the beginning
|
||||
result := firstNotEmpty("hello", "world", "test")
|
||||
assert.Equal(t, "hello", result)
|
||||
|
||||
// Test case where the first non-empty string is in the middle
|
||||
result = firstNotEmpty("", "foo", "bar")
|
||||
assert.Equal(t, "foo", result)
|
||||
|
||||
// Test case where the first non-empty string is at the end
|
||||
result = firstNotEmpty("", "", "baz")
|
||||
assert.Equal(t, "baz", result)
|
||||
|
||||
// Test case where all strings are empty
|
||||
result = firstNotEmpty("", "", "")
|
||||
assert.Equal(t, "", result)
|
||||
|
||||
// Test case with no arguments
|
||||
result = firstNotEmpty()
|
||||
assert.Equal(t, "", result)
|
||||
}
|
||||
|
|
|
@ -421,8 +421,7 @@ specify the command-line flag `--aws-use-static-instance-list=true` to switch
|
|||
the CA back to its original use of a statically defined set.
|
||||
|
||||
To refresh static list, please run `go run ec2_instance_types/gen.go` under
|
||||
`cluster-autoscaler/cloudprovider/aws/` and update `staticListLastUpdateTime` in
|
||||
`aws_util.go`
|
||||
`cluster-autoscaler/cloudprovider/aws/`.
|
||||
|
||||
## Using the AWS SDK vendored in the AWS cloudprovider
|
||||
|
||||
|
|
|
@ -117,7 +117,9 @@ func (aws *awsCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.N
|
|||
}
|
||||
ref, err := AwsRefFromProviderId(node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Dropping this into V as it will be noisy with many Hybrid Nodes
|
||||
klog.V(6).Infof("Node %v has unrecognized providerId: %v", node.Name, node.Spec.ProviderID)
|
||||
return nil, nil
|
||||
}
|
||||
asg := aws.awsManager.GetAsgForInstance(*ref)
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@ package aws
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
|
@ -26,7 +28,6 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/service/autoscaling"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testAwsManager = &AwsManager{
|
||||
|
@ -251,6 +252,20 @@ func TestNodeGroupForNodeWithNoProviderId(t *testing.T) {
|
|||
assert.Equal(t, group, nil)
|
||||
}
|
||||
|
||||
func TestNodeGroupForNodeWithHybridNode(t *testing.T) {
|
||||
hybridNode := &apiv1.Node{
|
||||
Spec: apiv1.NodeSpec{
|
||||
ProviderID: "eks-hybrid:///us-west-2/my-cluster/my-node-1",
|
||||
},
|
||||
}
|
||||
a := &autoScalingMock{}
|
||||
provider := testProvider(t, newTestAwsManagerWithAsgs(t, a, nil, []string{"1:5:test-asg"}))
|
||||
group, err := provider.NodeGroupForNode(hybridNode)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, group)
|
||||
}
|
||||
|
||||
func TestAwsRefFromProviderId(t *testing.T) {
|
||||
tests := []struct {
|
||||
provID string
|
||||
|
|
|
@ -28,7 +28,7 @@ type InstanceType struct {
|
|||
}
|
||||
|
||||
// StaticListLastUpdateTime is a string declaring the last time the static list was updated.
|
||||
var StaticListLastUpdateTime = "2024-10-02"
|
||||
var StaticListLastUpdateTime = "2025-05-27"
|
||||
|
||||
// InstanceTypes is a map of ec2 resources
|
||||
var InstanceTypes = map[string]*InstanceType{
|
||||
|
@ -1187,6 +1187,20 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c7i-flex.12xlarge": {
|
||||
InstanceType: "c7i-flex.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 98304,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"c7i-flex.16xlarge": {
|
||||
InstanceType: "c7i-flex.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 131072,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"c7i-flex.2xlarge": {
|
||||
InstanceType: "c7i-flex.2xlarge",
|
||||
VCPU: 8,
|
||||
|
@ -1383,6 +1397,90 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.12xlarge": {
|
||||
InstanceType: "c8gd.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 98304,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.16xlarge": {
|
||||
InstanceType: "c8gd.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 131072,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.24xlarge": {
|
||||
InstanceType: "c8gd.24xlarge",
|
||||
VCPU: 96,
|
||||
MemoryMb: 196608,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.2xlarge": {
|
||||
InstanceType: "c8gd.2xlarge",
|
||||
VCPU: 8,
|
||||
MemoryMb: 16384,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.48xlarge": {
|
||||
InstanceType: "c8gd.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.4xlarge": {
|
||||
InstanceType: "c8gd.4xlarge",
|
||||
VCPU: 16,
|
||||
MemoryMb: 32768,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.8xlarge": {
|
||||
InstanceType: "c8gd.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 65536,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.large": {
|
||||
InstanceType: "c8gd.large",
|
||||
VCPU: 2,
|
||||
MemoryMb: 4096,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.medium": {
|
||||
InstanceType: "c8gd.medium",
|
||||
VCPU: 1,
|
||||
MemoryMb: 2048,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.metal-24xl": {
|
||||
InstanceType: "c8gd.metal-24xl",
|
||||
VCPU: 96,
|
||||
MemoryMb: 196608,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.metal-48xl": {
|
||||
InstanceType: "c8gd.metal-48xl",
|
||||
VCPU: 192,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"c8gd.xlarge": {
|
||||
InstanceType: "c8gd.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 8192,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"d2.2xlarge": {
|
||||
InstanceType: "d2.2xlarge",
|
||||
VCPU: 8,
|
||||
|
@ -1509,32 +1607,25 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"g3.16xlarge": {
|
||||
InstanceType: "g3.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 499712,
|
||||
GPU: 4,
|
||||
"f2.12xlarge": {
|
||||
InstanceType: "f2.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 524288,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"g3.4xlarge": {
|
||||
InstanceType: "g3.4xlarge",
|
||||
VCPU: 16,
|
||||
MemoryMb: 124928,
|
||||
GPU: 1,
|
||||
"f2.48xlarge": {
|
||||
InstanceType: "f2.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 2097152,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"g3.8xlarge": {
|
||||
InstanceType: "g3.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 249856,
|
||||
GPU: 2,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"g3s.xlarge": {
|
||||
InstanceType: "g3s.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 31232,
|
||||
GPU: 1,
|
||||
"f2.6xlarge": {
|
||||
InstanceType: "f2.6xlarge",
|
||||
VCPU: 24,
|
||||
MemoryMb: 262144,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"g4ad.16xlarge": {
|
||||
|
@ -2139,6 +2230,230 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.12xlarge": {
|
||||
InstanceType: "i7i.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.16xlarge": {
|
||||
InstanceType: "i7i.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 524288,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.24xlarge": {
|
||||
InstanceType: "i7i.24xlarge",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.2xlarge": {
|
||||
InstanceType: "i7i.2xlarge",
|
||||
VCPU: 8,
|
||||
MemoryMb: 65536,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.48xlarge": {
|
||||
InstanceType: "i7i.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.4xlarge": {
|
||||
InstanceType: "i7i.4xlarge",
|
||||
VCPU: 16,
|
||||
MemoryMb: 131072,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.8xlarge": {
|
||||
InstanceType: "i7i.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 262144,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.large": {
|
||||
InstanceType: "i7i.large",
|
||||
VCPU: 2,
|
||||
MemoryMb: 16384,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.metal-24xl": {
|
||||
InstanceType: "i7i.metal-24xl",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.metal-48xl": {
|
||||
InstanceType: "i7i.metal-48xl",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7i.xlarge": {
|
||||
InstanceType: "i7i.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 32768,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.12xlarge": {
|
||||
InstanceType: "i7ie.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.18xlarge": {
|
||||
InstanceType: "i7ie.18xlarge",
|
||||
VCPU: 72,
|
||||
MemoryMb: 589824,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.24xlarge": {
|
||||
InstanceType: "i7ie.24xlarge",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.2xlarge": {
|
||||
InstanceType: "i7ie.2xlarge",
|
||||
VCPU: 8,
|
||||
MemoryMb: 65536,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.3xlarge": {
|
||||
InstanceType: "i7ie.3xlarge",
|
||||
VCPU: 12,
|
||||
MemoryMb: 98304,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.48xlarge": {
|
||||
InstanceType: "i7ie.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.6xlarge": {
|
||||
InstanceType: "i7ie.6xlarge",
|
||||
VCPU: 24,
|
||||
MemoryMb: 196608,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.large": {
|
||||
InstanceType: "i7ie.large",
|
||||
VCPU: 2,
|
||||
MemoryMb: 16384,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.metal-24xl": {
|
||||
InstanceType: "i7ie.metal-24xl",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.metal-48xl": {
|
||||
InstanceType: "i7ie.metal-48xl",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i7ie.xlarge": {
|
||||
InstanceType: "i7ie.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 32768,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"i8g.12xlarge": {
|
||||
InstanceType: "i8g.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.16xlarge": {
|
||||
InstanceType: "i8g.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 524288,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.24xlarge": {
|
||||
InstanceType: "i8g.24xlarge",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.2xlarge": {
|
||||
InstanceType: "i8g.2xlarge",
|
||||
VCPU: 8,
|
||||
MemoryMb: 65536,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.48xlarge": {
|
||||
InstanceType: "i8g.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.4xlarge": {
|
||||
InstanceType: "i8g.4xlarge",
|
||||
VCPU: 16,
|
||||
MemoryMb: 131072,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.8xlarge": {
|
||||
InstanceType: "i8g.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 262144,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.large": {
|
||||
InstanceType: "i8g.large",
|
||||
VCPU: 2,
|
||||
MemoryMb: 16384,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.metal-24xl": {
|
||||
InstanceType: "i8g.metal-24xl",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"i8g.xlarge": {
|
||||
InstanceType: "i8g.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 32768,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"im4gn.16xlarge": {
|
||||
InstanceType: "im4gn.16xlarge",
|
||||
VCPU: 64,
|
||||
|
@ -3504,6 +3819,20 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m7i-flex.12xlarge": {
|
||||
InstanceType: "m7i-flex.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 196608,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"m7i-flex.16xlarge": {
|
||||
InstanceType: "m7i-flex.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 262144,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"m7i-flex.2xlarge": {
|
||||
InstanceType: "m7i-flex.2xlarge",
|
||||
VCPU: 8,
|
||||
|
@ -3700,6 +4029,90 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.12xlarge": {
|
||||
InstanceType: "m8gd.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 196608,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.16xlarge": {
|
||||
InstanceType: "m8gd.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 262144,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.24xlarge": {
|
||||
InstanceType: "m8gd.24xlarge",
|
||||
VCPU: 96,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.2xlarge": {
|
||||
InstanceType: "m8gd.2xlarge",
|
||||
VCPU: 8,
|
||||
MemoryMb: 32768,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.48xlarge": {
|
||||
InstanceType: "m8gd.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.4xlarge": {
|
||||
InstanceType: "m8gd.4xlarge",
|
||||
VCPU: 16,
|
||||
MemoryMb: 65536,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.8xlarge": {
|
||||
InstanceType: "m8gd.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 131072,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.large": {
|
||||
InstanceType: "m8gd.large",
|
||||
VCPU: 2,
|
||||
MemoryMb: 8192,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.medium": {
|
||||
InstanceType: "m8gd.medium",
|
||||
VCPU: 1,
|
||||
MemoryMb: 4096,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.metal-24xl": {
|
||||
InstanceType: "m8gd.metal-24xl",
|
||||
VCPU: 96,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.metal-48xl": {
|
||||
InstanceType: "m8gd.metal-48xl",
|
||||
VCPU: 192,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"m8gd.xlarge": {
|
||||
InstanceType: "m8gd.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 16384,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"mac1.metal": {
|
||||
InstanceType: "mac1.metal",
|
||||
VCPU: 12,
|
||||
|
@ -3735,27 +4148,6 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"p2.16xlarge": {
|
||||
InstanceType: "p2.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 749568,
|
||||
GPU: 16,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"p2.8xlarge": {
|
||||
InstanceType: "p2.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 499712,
|
||||
GPU: 8,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"p2.xlarge": {
|
||||
InstanceType: "p2.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 62464,
|
||||
GPU: 1,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"p3.16xlarge": {
|
||||
InstanceType: "p3.16xlarge",
|
||||
VCPU: 64,
|
||||
|
@ -3805,6 +4197,13 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 8,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"p5en.48xlarge": {
|
||||
InstanceType: "p5en.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 2097152,
|
||||
GPU: 8,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"r3.2xlarge": {
|
||||
InstanceType: "r3.2xlarge",
|
||||
VCPU: 8,
|
||||
|
@ -5233,6 +5632,90 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.12xlarge": {
|
||||
InstanceType: "r8gd.12xlarge",
|
||||
VCPU: 48,
|
||||
MemoryMb: 393216,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.16xlarge": {
|
||||
InstanceType: "r8gd.16xlarge",
|
||||
VCPU: 64,
|
||||
MemoryMb: 524288,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.24xlarge": {
|
||||
InstanceType: "r8gd.24xlarge",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.2xlarge": {
|
||||
InstanceType: "r8gd.2xlarge",
|
||||
VCPU: 8,
|
||||
MemoryMb: 65536,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.48xlarge": {
|
||||
InstanceType: "r8gd.48xlarge",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.4xlarge": {
|
||||
InstanceType: "r8gd.4xlarge",
|
||||
VCPU: 16,
|
||||
MemoryMb: 131072,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.8xlarge": {
|
||||
InstanceType: "r8gd.8xlarge",
|
||||
VCPU: 32,
|
||||
MemoryMb: 262144,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.large": {
|
||||
InstanceType: "r8gd.large",
|
||||
VCPU: 2,
|
||||
MemoryMb: 16384,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.medium": {
|
||||
InstanceType: "r8gd.medium",
|
||||
VCPU: 1,
|
||||
MemoryMb: 8192,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.metal-24xl": {
|
||||
InstanceType: "r8gd.metal-24xl",
|
||||
VCPU: 96,
|
||||
MemoryMb: 786432,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.metal-48xl": {
|
||||
InstanceType: "r8gd.metal-48xl",
|
||||
VCPU: 192,
|
||||
MemoryMb: 1572864,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"r8gd.xlarge": {
|
||||
InstanceType: "r8gd.xlarge",
|
||||
VCPU: 4,
|
||||
MemoryMb: 32768,
|
||||
GPU: 0,
|
||||
Architecture: "arm64",
|
||||
},
|
||||
"t1.micro": {
|
||||
InstanceType: "t1.micro",
|
||||
VCPU: 1,
|
||||
|
@ -5513,6 +5996,20 @@ var InstanceTypes = map[string]*InstanceType{
|
|||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"u7i-6tb.112xlarge": {
|
||||
InstanceType: "u7i-6tb.112xlarge",
|
||||
VCPU: 448,
|
||||
MemoryMb: 6291456,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"u7i-8tb.112xlarge": {
|
||||
InstanceType: "u7i-8tb.112xlarge",
|
||||
VCPU: 448,
|
||||
MemoryMb: 8388608,
|
||||
GPU: 0,
|
||||
Architecture: "amd64",
|
||||
},
|
||||
"u7in-16tb.224xlarge": {
|
||||
InstanceType: "u7in-16tb.224xlarge",
|
||||
VCPU: 896,
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["batch", "extensions"]
|
||||
resources: ["jobs"]
|
||||
|
@ -146,7 +146,7 @@ spec:
|
|||
type: RuntimeDefault
|
||||
serviceAccountName: cluster-autoscaler
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["batch", "extensions"]
|
||||
resources: ["jobs"]
|
||||
|
@ -146,7 +146,7 @@ spec:
|
|||
type: RuntimeDefault
|
||||
serviceAccountName: cluster-autoscaler
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["batch", "extensions"]
|
||||
resources: ["jobs"]
|
||||
|
@ -146,7 +146,7 @@ spec:
|
|||
type: RuntimeDefault
|
||||
serviceAccountName: cluster-autoscaler
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["batch", "extensions"]
|
||||
resources: ["jobs"]
|
||||
|
@ -153,7 +153,7 @@ spec:
|
|||
nodeSelector:
|
||||
kubernetes.io/role: control-plane
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.1
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -33,7 +32,6 @@ import (
|
|||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -172,7 +170,7 @@ func TestDeleteOutdatedDeployments(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetVMsFromCache(t *testing.T) {
|
||||
func TestAgentPoolGetVMsFromCache(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -309,7 +307,7 @@ func TestAgentPoolIncreaseSize(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDecreaseTargetSize(t *testing.T) {
|
||||
func TestAgentPoolDecreaseTargetSize(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -410,20 +408,6 @@ func TestDeleteInstances(t *testing.T) {
|
|||
err = as.DeleteInstances(instances)
|
||||
expectedErr = fmt.Errorf("cannot delete instance (%s) which don't belong to the same node pool (\"as\")", testValidProviderID1)
|
||||
assert.Equal(t, expectedErr, err)
|
||||
|
||||
instances = []*azureRef{
|
||||
{Name: testValidProviderID0},
|
||||
}
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), as.manager.config.ResourceGroup, "as-vm-0", gomock.Any()).Return(getExpectedVMs()[0], nil)
|
||||
mockVMClient.EXPECT().Delete(gomock.Any(), as.manager.config.ResourceGroup, "as-vm-0").Return(nil)
|
||||
mockSAClient.EXPECT().ListKeys(gomock.Any(), as.manager.config.SubscriptionID, as.manager.config.ResourceGroup, "foo").Return(storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: to.StringPtr("dmFsdWUK")},
|
||||
},
|
||||
}, nil)
|
||||
err = as.DeleteInstances(instances)
|
||||
expectedErrStr := "The specified account is disabled."
|
||||
assert.True(t, strings.Contains(err.Error(), expectedErrStr))
|
||||
}
|
||||
|
||||
func TestAgentPoolDeleteNodes(t *testing.T) {
|
||||
|
@ -464,23 +448,6 @@ func TestAgentPoolDeleteNodes(t *testing.T) {
|
|||
expectedErr = fmt.Errorf("node belongs to a different asg than as")
|
||||
assert.Equal(t, expectedErr, err)
|
||||
|
||||
as.manager.azureCache.instanceToNodeGroup[azureRef{Name: testValidProviderID0}] = as
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), as.manager.config.ResourceGroup, "as-vm-0", gomock.Any()).Return(getExpectedVMs()[0], nil)
|
||||
mockVMClient.EXPECT().Delete(gomock.Any(), as.manager.config.ResourceGroup, "as-vm-0").Return(nil)
|
||||
mockSAClient.EXPECT().ListKeys(gomock.Any(), as.manager.config.SubscriptionID, as.manager.config.ResourceGroup, "foo").Return(storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: to.StringPtr("dmFsdWUK")},
|
||||
},
|
||||
}, nil)
|
||||
err = as.DeleteNodes([]*apiv1.Node{
|
||||
{
|
||||
Spec: apiv1.NodeSpec{ProviderID: testValidProviderID0},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node"},
|
||||
},
|
||||
})
|
||||
expectedErrStr := "The specified account is disabled."
|
||||
assert.True(t, strings.Contains(err.Error(), expectedErrStr))
|
||||
|
||||
as.minSize = 3
|
||||
err = as.DeleteNodes([]*apiv1.Node{})
|
||||
expectedErr = fmt.Errorf("min size reached, nodes will not be deleted")
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/Azure/skewer"
|
||||
|
@ -67,13 +68,18 @@ type azureCache struct {
|
|||
|
||||
// Cache content.
|
||||
|
||||
// resourceGroup specifies the name of the resource group that this cache tracks
|
||||
resourceGroup string
|
||||
// resourceGroup specifies the name of the node resource group that this cache tracks
|
||||
resourceGroup string
|
||||
clusterResourceGroup string
|
||||
clusterName string
|
||||
|
||||
// enableVMsAgentPool specifies whether VMs agent pool type is supported.
|
||||
enableVMsAgentPool bool
|
||||
|
||||
// vmType can be one of vmTypeVMSS (default), vmTypeStandard
|
||||
vmType string
|
||||
|
||||
vmsPoolSet map[string]struct{} // track the nodepools that're vms pool
|
||||
vmsPoolMap map[string]armcontainerservice.AgentPool // track the nodepools that're vms pool
|
||||
|
||||
// scaleSets keeps the set of all known scalesets in the resource group, populated/refreshed via VMSS.List() call.
|
||||
// It is only used/populated if vmType is vmTypeVMSS (default).
|
||||
|
@ -106,8 +112,11 @@ func newAzureCache(client *azClient, cacheTTL time.Duration, config Config) (*az
|
|||
azClient: client,
|
||||
refreshInterval: cacheTTL,
|
||||
resourceGroup: config.ResourceGroup,
|
||||
clusterResourceGroup: config.ClusterResourceGroup,
|
||||
clusterName: config.ClusterName,
|
||||
enableVMsAgentPool: config.EnableVMsAgentPool,
|
||||
vmType: config.VMType,
|
||||
vmsPoolSet: make(map[string]struct{}),
|
||||
vmsPoolMap: make(map[string]armcontainerservice.AgentPool),
|
||||
scaleSets: make(map[string]compute.VirtualMachineScaleSet),
|
||||
virtualMachines: make(map[string][]compute.VirtualMachine),
|
||||
registeredNodeGroups: make([]cloudprovider.NodeGroup, 0),
|
||||
|
@ -130,11 +139,11 @@ func newAzureCache(client *azClient, cacheTTL time.Duration, config Config) (*az
|
|||
return cache, nil
|
||||
}
|
||||
|
||||
func (m *azureCache) getVMsPoolSet() map[string]struct{} {
|
||||
func (m *azureCache) getVMsPoolMap() map[string]armcontainerservice.AgentPool {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
return m.vmsPoolSet
|
||||
return m.vmsPoolMap
|
||||
}
|
||||
|
||||
func (m *azureCache) getVirtualMachines() map[string][]compute.VirtualMachine {
|
||||
|
@ -170,7 +179,7 @@ func (m *azureCache) regenerate() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("regenerate: found nodes for node group %s: %+v", ng.Id(), instances)
|
||||
klog.V(4).Infof("regenerate: found %d nodes for node group %s: %+v", len(instances), ng.Id(), instances)
|
||||
|
||||
for _, instance := range instances {
|
||||
ref := azureRef{Name: instance.Id}
|
||||
|
@ -232,13 +241,20 @@ func (m *azureCache) fetchAzureResources() error {
|
|||
return err
|
||||
}
|
||||
m.scaleSets = vmssResult
|
||||
vmResult, vmsPoolSet, err := m.fetchVirtualMachines()
|
||||
vmResult, err := m.fetchVirtualMachines()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we fetch both sets of resources since CAS may operate on mixed nodepools
|
||||
m.virtualMachines = vmResult
|
||||
m.vmsPoolSet = vmsPoolSet
|
||||
// fetch VMs pools if enabled
|
||||
if m.enableVMsAgentPool {
|
||||
vmsPoolMap, err := m.fetchVMsPools()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.vmsPoolMap = vmsPoolMap
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -251,19 +267,17 @@ const (
|
|||
)
|
||||
|
||||
// fetchVirtualMachines returns the updated list of virtual machines in the config resource group using the Azure API.
|
||||
func (m *azureCache) fetchVirtualMachines() (map[string][]compute.VirtualMachine, map[string]struct{}, error) {
|
||||
func (m *azureCache) fetchVirtualMachines() (map[string][]compute.VirtualMachine, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := m.azClient.virtualMachinesClient.List(ctx, m.resourceGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("VirtualMachinesClient.List in resource group %q failed: %v", m.resourceGroup, err)
|
||||
return nil, nil, err.Error()
|
||||
return nil, err.Error()
|
||||
}
|
||||
|
||||
instances := make(map[string][]compute.VirtualMachine)
|
||||
// track the nodepools that're vms pools
|
||||
vmsPoolSet := make(map[string]struct{})
|
||||
for _, instance := range result {
|
||||
if instance.Tags == nil {
|
||||
continue
|
||||
|
@ -280,20 +294,43 @@ func (m *azureCache) fetchVirtualMachines() (map[string][]compute.VirtualMachine
|
|||
}
|
||||
|
||||
instances[to.String(vmPoolName)] = append(instances[to.String(vmPoolName)], instance)
|
||||
}
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
// if the nodepool is already in the map, skip it
|
||||
if _, ok := vmsPoolSet[to.String(vmPoolName)]; ok {
|
||||
continue
|
||||
// fetchVMsPools returns a name to agentpool map of all the VMs pools in the cluster
|
||||
func (m *azureCache) fetchVMsPools() (map[string]armcontainerservice.AgentPool, error) {
|
||||
ctx, cancel := getContextWithTimeout(vmsContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
// defensive check, should never happen when enableVMsAgentPool toggle is on
|
||||
if m.azClient.agentPoolClient == nil {
|
||||
return nil, errors.New("agentPoolClient is nil")
|
||||
}
|
||||
|
||||
vmsPoolMap := make(map[string]armcontainerservice.AgentPool)
|
||||
pager := m.azClient.agentPoolClient.NewListPager(m.clusterResourceGroup, m.clusterName, nil)
|
||||
var aps []*armcontainerservice.AgentPool
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("agentPoolClient.pager.NextPage in cluster %s resource group %s failed: %v",
|
||||
m.clusterName, m.clusterResourceGroup, err)
|
||||
return nil, err
|
||||
}
|
||||
aps = append(aps, resp.Value...)
|
||||
}
|
||||
|
||||
// nodes from vms pool will have tag "aks-managed-agentpool-type" set to "VirtualMachines"
|
||||
if agentpoolType := tags[agentpoolTypeTag]; agentpoolType != nil {
|
||||
if strings.EqualFold(to.String(agentpoolType), vmsPoolType) {
|
||||
vmsPoolSet[to.String(vmPoolName)] = struct{}{}
|
||||
}
|
||||
for _, ap := range aps {
|
||||
if ap != nil && ap.Name != nil && ap.Properties != nil && ap.Properties.Type != nil &&
|
||||
*ap.Properties.Type == armcontainerservice.AgentPoolTypeVirtualMachines {
|
||||
// we only care about VMs pools, skip other types
|
||||
klog.V(6).Infof("Found VMs pool %q", *ap.Name)
|
||||
vmsPoolMap[*ap.Name] = *ap
|
||||
}
|
||||
}
|
||||
return instances, vmsPoolSet, nil
|
||||
|
||||
return vmsPoolMap, nil
|
||||
}
|
||||
|
||||
// fetchScaleSets returns the updated list of scale sets in the config resource group using the Azure API.
|
||||
|
@ -422,7 +459,7 @@ func (m *azureCache) HasInstance(providerID string) (bool, error) {
|
|||
|
||||
// FindForInstance returns node group of the given Instance
|
||||
func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudprovider.NodeGroup, error) {
|
||||
vmsPoolSet := m.getVMsPoolSet()
|
||||
vmsPoolMap := m.getVMsPoolMap()
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
|
@ -441,7 +478,7 @@ func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudpr
|
|||
}
|
||||
|
||||
// cluster with vmss pool only
|
||||
if vmType == providerazureconsts.VMTypeVMSS && len(vmsPoolSet) == 0 {
|
||||
if vmType == providerazureconsts.VMTypeVMSS && len(vmsPoolMap) == 0 {
|
||||
if m.areAllScaleSetsUniform() {
|
||||
// Omit virtual machines not managed by vmss only in case of uniform scale set.
|
||||
if ok := virtualMachineRE.Match([]byte(inst.Name)); ok {
|
||||
|
|
|
@ -22,9 +22,42 @@ import (
|
|||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestFetchVMsPools(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
provider := newTestProvider(t)
|
||||
ac := provider.azureManager.azureCache
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
ac.azClient.agentPoolClient = mockAgentpoolclient
|
||||
|
||||
vmsPool := getTestVMsAgentPool(false)
|
||||
vmssPoolType := armcontainerservice.AgentPoolTypeVirtualMachineScaleSets
|
||||
vmssPool := armcontainerservice.AgentPool{
|
||||
Name: to.StringPtr("vmsspool1"),
|
||||
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
|
||||
Type: &vmssPoolType,
|
||||
},
|
||||
}
|
||||
invalidPool := armcontainerservice.AgentPool{}
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&vmsPool, &vmssPool, &invalidPool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).
|
||||
Return(fakeAPListPager)
|
||||
|
||||
vmsPoolMap, err := ac.fetchVMsPools()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(vmsPoolMap))
|
||||
|
||||
_, ok := vmsPoolMap[to.String(vmsPool.Name)]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
ss := newTestScaleSet(provider.azureManager, "ss")
|
||||
|
|
|
@ -19,6 +19,8 @@ package azure
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
_ "go.uber.org/mock/mockgen/model" // for go:generate
|
||||
|
||||
|
@ -29,8 +31,8 @@ import (
|
|||
azurecore_policy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/azure/auth"
|
||||
|
@ -47,7 +49,12 @@ import (
|
|||
providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config"
|
||||
)
|
||||
|
||||
//go:generate sh -c "mockgen k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure AgentPoolsClient >./agentpool_client.go"
|
||||
//go:generate sh -c "mockgen -source=azure_client.go -destination azure_mock_agentpool_client.go -package azure -exclude_interfaces DeploymentsClient"
|
||||
|
||||
const (
|
||||
vmsContextTimeout = 5 * time.Minute
|
||||
vmsAsyncContextTimeout = 30 * time.Minute
|
||||
)
|
||||
|
||||
// AgentPoolsClient interface defines the methods needed for scaling vms pool.
|
||||
// it is implemented by track2 sdk armcontainerservice.AgentPoolsClient
|
||||
|
@ -68,52 +75,89 @@ type AgentPoolsClient interface {
|
|||
machines armcontainerservice.AgentPoolDeleteMachinesParameter,
|
||||
options *armcontainerservice.AgentPoolsClientBeginDeleteMachinesOptions) (
|
||||
*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse], error)
|
||||
NewListPager(
|
||||
resourceGroupName, resourceName string,
|
||||
options *armcontainerservice.AgentPoolsClientListOptions,
|
||||
) *runtime.Pager[armcontainerservice.AgentPoolsClientListResponse]
|
||||
}
|
||||
|
||||
func getAgentpoolClientCredentials(cfg *Config) (azcore.TokenCredential, error) {
|
||||
var cred azcore.TokenCredential
|
||||
var err error
|
||||
if cfg.AuthMethod == authMethodCLI {
|
||||
cred, err = azidentity.NewAzureCLICredential(&azidentity.AzureCLICredentialOptions{
|
||||
TenantID: cfg.TenantID})
|
||||
if err != nil {
|
||||
klog.Errorf("NewAzureCLICredential failed: %v", err)
|
||||
return nil, err
|
||||
if cfg.AuthMethod == "" || cfg.AuthMethod == authMethodPrincipal {
|
||||
// Use MSI
|
||||
if cfg.UseManagedIdentityExtension {
|
||||
// Use System Assigned MSI
|
||||
if cfg.UserAssignedIdentityID == "" {
|
||||
klog.V(4).Info("Agentpool client: using System Assigned MSI to retrieve access token")
|
||||
return azidentity.NewManagedIdentityCredential(nil)
|
||||
}
|
||||
// Use User Assigned MSI
|
||||
klog.V(4).Info("Agentpool client: using User Assigned MSI to retrieve access token")
|
||||
return azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{
|
||||
ID: azidentity.ClientID(cfg.UserAssignedIdentityID),
|
||||
})
|
||||
}
|
||||
} else if cfg.AuthMethod == "" || cfg.AuthMethod == authMethodPrincipal {
|
||||
cred, err = azidentity.NewClientSecretCredential(cfg.TenantID, cfg.AADClientID, cfg.AADClientSecret, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("NewClientSecretCredential failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported authorization method: %s", cfg.AuthMethod)
|
||||
}
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
func getAgentpoolClientRetryOptions(cfg *Config) azurecore_policy.RetryOptions {
|
||||
if cfg.AuthMethod == authMethodCLI {
|
||||
return azurecore_policy.RetryOptions{
|
||||
MaxRetries: -1, // no retry when using CLI auth for UT
|
||||
// Use Service Principal with ClientID and ClientSecret
|
||||
if cfg.AADClientID != "" && cfg.AADClientSecret != "" {
|
||||
klog.V(2).Infoln("Agentpool client: using client_id+client_secret to retrieve access token")
|
||||
return azidentity.NewClientSecretCredential(cfg.TenantID, cfg.AADClientID, cfg.AADClientSecret, nil)
|
||||
}
|
||||
|
||||
// Use Service Principal with ClientCert and AADClientCertPassword
|
||||
if cfg.AADClientID != "" && cfg.AADClientCertPath != "" {
|
||||
klog.V(2).Infoln("Agentpool client: using client_cert+client_private_key to retrieve access token")
|
||||
certData, err := os.ReadFile(cfg.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s failed with error: %w", cfg.AADClientCertPath, err)
|
||||
}
|
||||
certs, privateKey, err := azidentity.ParseCertificates(certData, []byte(cfg.AADClientCertPassword))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing service principal certificate data failed with error: %w", err)
|
||||
}
|
||||
return azidentity.NewClientCertificateCredential(cfg.TenantID, cfg.AADClientID, certs, privateKey, &azidentity.ClientCertificateCredentialOptions{
|
||||
SendCertificateChain: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
return azextensions.DefaultRetryOpts()
|
||||
|
||||
if cfg.UseFederatedWorkloadIdentityExtension {
|
||||
klog.V(4).Info("Agentpool client: using workload identity for access token")
|
||||
return azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{
|
||||
TokenFilePath: cfg.AADFederatedTokenFile,
|
||||
})
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported authorization method: %s", cfg.AuthMethod)
|
||||
}
|
||||
|
||||
func newAgentpoolClient(cfg *Config) (AgentPoolsClient, error) {
|
||||
retryOptions := getAgentpoolClientRetryOptions(cfg)
|
||||
retryOptions := azextensions.DefaultRetryOpts()
|
||||
cred, err := getAgentpoolClientCredentials(cfg)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get agent pool client credentials: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
env := azure.PublicCloud // default to public cloud
|
||||
if cfg.Cloud != "" {
|
||||
var err error
|
||||
env, err = azure.EnvironmentFromName(cfg.Cloud)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get environment from name %s: with error: %v", cfg.Cloud, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.ARMBaseURLForAPClient != "" {
|
||||
klog.V(10).Infof("Using ARMBaseURLForAPClient to create agent pool client")
|
||||
return newAgentpoolClientWithConfig(cfg.SubscriptionID, nil, cfg.ARMBaseURLForAPClient, "UNKNOWN", retryOptions)
|
||||
return newAgentpoolClientWithConfig(cfg.SubscriptionID, cred, cfg.ARMBaseURLForAPClient, env.TokenAudience, retryOptions, true /*insecureAllowCredentialWithHTTP*/)
|
||||
}
|
||||
|
||||
return newAgentpoolClientWithPublicEndpoint(cfg, retryOptions)
|
||||
return newAgentpoolClientWithConfig(cfg.SubscriptionID, cred, env.ResourceManagerEndpoint, env.TokenAudience, retryOptions, false /*insecureAllowCredentialWithHTTP*/)
|
||||
}
|
||||
|
||||
func newAgentpoolClientWithConfig(subscriptionID string, cred azcore.TokenCredential,
|
||||
cloudCfgEndpoint, cloudCfgAudience string, retryOptions azurecore_policy.RetryOptions) (AgentPoolsClient, error) {
|
||||
cloudCfgEndpoint, cloudCfgAudience string, retryOptions azurecore_policy.RetryOptions, insecureAllowCredentialWithHTTP bool) (AgentPoolsClient, error) {
|
||||
agentPoolsClient, err := armcontainerservice.NewAgentPoolsClient(subscriptionID, cred,
|
||||
&policy.ClientOptions{
|
||||
ClientOptions: azurecore_policy.ClientOptions{
|
||||
|
@ -125,9 +169,10 @@ func newAgentpoolClientWithConfig(subscriptionID string, cred azcore.TokenCreden
|
|||
},
|
||||
},
|
||||
},
|
||||
Telemetry: azextensions.DefaultTelemetryOpts(getUserAgentExtension()),
|
||||
Transport: azextensions.DefaultHTTPClient(),
|
||||
Retry: retryOptions,
|
||||
InsecureAllowCredentialWithHTTP: insecureAllowCredentialWithHTTP,
|
||||
Telemetry: azextensions.DefaultTelemetryOpts(getUserAgentExtension()),
|
||||
Transport: azextensions.DefaultHTTPClient(),
|
||||
Retry: retryOptions,
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -139,26 +184,6 @@ func newAgentpoolClientWithConfig(subscriptionID string, cred azcore.TokenCreden
|
|||
return agentPoolsClient, nil
|
||||
}
|
||||
|
||||
func newAgentpoolClientWithPublicEndpoint(cfg *Config, retryOptions azurecore_policy.RetryOptions) (AgentPoolsClient, error) {
|
||||
cred, err := getAgentpoolClientCredentials(cfg)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get agent pool client credentials: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// default to public cloud
|
||||
env := azure.PublicCloud
|
||||
if cfg.Cloud != "" {
|
||||
env, err = azure.EnvironmentFromName(cfg.Cloud)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get environment from name %s: with error: %v", cfg.Cloud, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newAgentpoolClientWithConfig(cfg.SubscriptionID, cred, env.ResourceManagerEndpoint, env.TokenAudience, retryOptions)
|
||||
}
|
||||
|
||||
type azClient struct {
|
||||
virtualMachineScaleSetsClient vmssclient.Interface
|
||||
virtualMachineScaleSetVMsClient vmssvmclient.Interface
|
||||
|
@ -232,9 +257,11 @@ func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) {
|
|||
|
||||
agentPoolClient, err := newAgentpoolClient(cfg)
|
||||
if err != nil {
|
||||
// we don't want to fail the whole process so we don't break any existing functionality
|
||||
// since this may not be fatal - it is only used by vms pool which is still under development.
|
||||
klog.Warningf("newAgentpoolClient failed with error: %s", err)
|
||||
klog.Errorf("newAgentpoolClient failed with error: %s", err)
|
||||
if cfg.EnableVMsAgentPool {
|
||||
// only return error if VMs agent pool is supported which is controlled by toggle
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &azClient{
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
@ -132,7 +133,7 @@ func TestNodeGroups(t *testing.T) {
|
|||
)
|
||||
assert.True(t, registered)
|
||||
registered = provider.azureManager.RegisterNodeGroup(
|
||||
newTestVMsPool(provider.azureManager, "test-vms-pool"),
|
||||
newTestVMsPool(provider.azureManager),
|
||||
)
|
||||
assert.True(t, registered)
|
||||
assert.Equal(t, len(provider.NodeGroups()), 2)
|
||||
|
@ -146,9 +147,14 @@ func TestHasInstance(t *testing.T) {
|
|||
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
|
||||
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
|
||||
provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
|
||||
provider.azureManager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
provider.azureManager.azureCache.clusterName = "test-cluster"
|
||||
provider.azureManager.azureCache.clusterResourceGroup = "test-rg"
|
||||
provider.azureManager.azureCache.enableVMsAgentPool = true // enable VMs agent pool to support mixed node group types
|
||||
|
||||
// Simulate node groups and instances
|
||||
expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform)
|
||||
|
@ -158,6 +164,20 @@ func TestHasInstance(t *testing.T) {
|
|||
mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
|
||||
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMsPoolVMs, nil).AnyTimes()
|
||||
mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
|
||||
vmssType := armcontainerservice.AgentPoolTypeVirtualMachineScaleSets
|
||||
vmssPool := armcontainerservice.AgentPool{
|
||||
Name: to.StringPtr("test-asg"),
|
||||
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
|
||||
Type: &vmssType,
|
||||
},
|
||||
}
|
||||
|
||||
vmsPool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&vmssPool, &vmsPool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(
|
||||
provider.azureManager.azureCache.clusterResourceGroup,
|
||||
provider.azureManager.azureCache.clusterName, nil).
|
||||
Return(fakeAPListPager).AnyTimes()
|
||||
|
||||
// Register node groups
|
||||
assert.Equal(t, len(provider.NodeGroups()), 0)
|
||||
|
@ -168,9 +188,9 @@ func TestHasInstance(t *testing.T) {
|
|||
assert.True(t, registered)
|
||||
|
||||
registered = provider.azureManager.RegisterNodeGroup(
|
||||
newTestVMsPool(provider.azureManager, "test-vms-pool"),
|
||||
newTestVMsPool(provider.azureManager),
|
||||
)
|
||||
provider.azureManager.explicitlyConfigured["test-vms-pool"] = true
|
||||
provider.azureManager.explicitlyConfigured[vmsNodeGroupName] = true
|
||||
assert.True(t, registered)
|
||||
assert.Equal(t, len(provider.NodeGroups()), 2)
|
||||
|
||||
|
@ -264,9 +284,14 @@ func TestMixedNodeGroups(t *testing.T) {
|
|||
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
provider.azureManager.azClient.virtualMachinesClient = mockVMClient
|
||||
provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
|
||||
provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
|
||||
provider.azureManager.azureCache.clusterName = "test-cluster"
|
||||
provider.azureManager.azureCache.clusterResourceGroup = "test-rg"
|
||||
provider.azureManager.azureCache.enableVMsAgentPool = true // enable VMs agent pool to support mixed node group types
|
||||
provider.azureManager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
|
||||
expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus", compute.Uniform)
|
||||
expectedVMsPoolVMs := newTestVMsPoolVMList(3)
|
||||
|
@ -276,6 +301,19 @@ func TestMixedNodeGroups(t *testing.T) {
|
|||
mockVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedVMsPoolVMs, nil).AnyTimes()
|
||||
mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
|
||||
|
||||
vmssType := armcontainerservice.AgentPoolTypeVirtualMachineScaleSets
|
||||
vmssPool := armcontainerservice.AgentPool{
|
||||
Name: to.StringPtr("test-asg"),
|
||||
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
|
||||
Type: &vmssType,
|
||||
},
|
||||
}
|
||||
|
||||
vmsPool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&vmssPool, &vmsPool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(provider.azureManager.azureCache.clusterResourceGroup, provider.azureManager.azureCache.clusterName, nil).
|
||||
Return(fakeAPListPager).AnyTimes()
|
||||
|
||||
assert.Equal(t, len(provider.NodeGroups()), 0)
|
||||
registered := provider.azureManager.RegisterNodeGroup(
|
||||
newTestScaleSet(provider.azureManager, "test-asg"),
|
||||
|
@ -284,9 +322,9 @@ func TestMixedNodeGroups(t *testing.T) {
|
|||
assert.True(t, registered)
|
||||
|
||||
registered = provider.azureManager.RegisterNodeGroup(
|
||||
newTestVMsPool(provider.azureManager, "test-vms-pool"),
|
||||
newTestVMsPool(provider.azureManager),
|
||||
)
|
||||
provider.azureManager.explicitlyConfigured["test-vms-pool"] = true
|
||||
provider.azureManager.explicitlyConfigured[vmsNodeGroupName] = true
|
||||
assert.True(t, registered)
|
||||
assert.Equal(t, len(provider.NodeGroups()), 2)
|
||||
|
||||
|
@ -307,7 +345,7 @@ func TestMixedNodeGroups(t *testing.T) {
|
|||
group, err = provider.NodeGroupForNode(vmsPoolNode)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, group, "Group should not be nil")
|
||||
assert.Equal(t, group.Id(), "test-vms-pool")
|
||||
assert.Equal(t, group.Id(), vmsNodeGroupName)
|
||||
assert.Equal(t, group.MinSize(), 3)
|
||||
assert.Equal(t, group.MaxSize(), 10)
|
||||
}
|
||||
|
|
|
@ -86,6 +86,9 @@ type Config struct {
|
|||
// EnableForceDelete defines whether to enable force deletion on the APIs
|
||||
EnableForceDelete bool `json:"enableForceDelete,omitempty" yaml:"enableForceDelete,omitempty"`
|
||||
|
||||
// EnableVMsAgentPool defines whether to support VMs agentpool type in addition to VMSS type
|
||||
EnableVMsAgentPool bool `json:"enableVMsAgentPool,omitempty" yaml:"enableVMsAgentPool,omitempty"`
|
||||
|
||||
// (DEPRECATED, DO NOT USE) EnableDynamicInstanceList defines whether to enable dynamic instance workflow for instance information check
|
||||
EnableDynamicInstanceList bool `json:"enableDynamicInstanceList,omitempty" yaml:"enableDynamicInstanceList,omitempty"`
|
||||
|
||||
|
@ -100,6 +103,9 @@ type Config struct {
|
|||
|
||||
// EnableFastDeleteOnFailedProvisioning defines whether to delete the experimental faster VMSS instance deletion on failed provisioning
|
||||
EnableFastDeleteOnFailedProvisioning bool `json:"enableFastDeleteOnFailedProvisioning,omitempty" yaml:"enableFastDeleteOnFailedProvisioning,omitempty"`
|
||||
|
||||
// EnableLabelPredictionsOnTemplate defines whether to enable label predictions on the template when scaling from zero
|
||||
EnableLabelPredictionsOnTemplate bool `json:"enableLabelPredictionsOnTemplate,omitempty" yaml:"enableLabelPredictionsOnTemplate,omitempty"`
|
||||
}
|
||||
|
||||
// These are only here for backward compabitility. Their equivalent exists in providerazure.Config with a different name.
|
||||
|
@ -122,6 +128,7 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) {
|
|||
// Static defaults
|
||||
cfg.EnableDynamicInstanceList = false
|
||||
cfg.EnableVmssFlexNodes = false
|
||||
cfg.EnableVMsAgentPool = false
|
||||
cfg.CloudProviderBackoffRetries = providerazureconsts.BackoffRetriesDefault
|
||||
cfg.CloudProviderBackoffExponent = providerazureconsts.BackoffExponentDefault
|
||||
cfg.CloudProviderBackoffDuration = providerazureconsts.BackoffDurationDefault
|
||||
|
@ -129,6 +136,7 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) {
|
|||
cfg.VMType = providerazureconsts.VMTypeVMSS
|
||||
cfg.MaxDeploymentsCount = int64(defaultMaxDeploymentsCount)
|
||||
cfg.StrictCacheUpdates = false
|
||||
cfg.EnableLabelPredictionsOnTemplate = true
|
||||
|
||||
// Config file overrides defaults
|
||||
if configReader != nil {
|
||||
|
@ -257,6 +265,9 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) {
|
|||
if _, err = assignBoolFromEnvIfExists(&cfg.StrictCacheUpdates, "AZURE_STRICT_CACHE_UPDATES"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = assignBoolFromEnvIfExists(&cfg.EnableVMsAgentPool, "AZURE_ENABLE_VMS_AGENT_POOLS"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = assignBoolFromEnvIfExists(&cfg.EnableDynamicInstanceList, "AZURE_ENABLE_DYNAMIC_INSTANCE_LIST"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -301,6 +312,9 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) {
|
|||
if _, err = assignBoolFromEnvIfExists(&cfg.EnableFastDeleteOnFailedProvisioning, "AZURE_ENABLE_FAST_DELETE_ON_FAILED_PROVISIONING"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = assignBoolFromEnvIfExists(&cfg.EnableLabelPredictionsOnTemplate, "AZURE_ENABLE_LABEL_PREDICTIONS_ON_TEMPLATE"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Nonstatic defaults
|
||||
cfg.VMType = strings.ToLower(cfg.VMType)
|
||||
|
|
|
@ -22,80 +22,79 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// GetVMSSTypeStatically uses static list of vmss generated at azure_instance_types.go to fetch vmss instance information.
|
||||
// GetInstanceTypeStatically uses static list of vmss generated at azure_instance_types.go to fetch vmss instance information.
|
||||
// It is declared as a variable for testing purpose.
|
||||
var GetVMSSTypeStatically = func(template compute.VirtualMachineScaleSet) (*InstanceType, error) {
|
||||
var vmssType *InstanceType
|
||||
var GetInstanceTypeStatically = func(template NodeTemplate) (*InstanceType, error) {
|
||||
var instanceType *InstanceType
|
||||
|
||||
for k := range InstanceTypes {
|
||||
if strings.EqualFold(k, *template.Sku.Name) {
|
||||
vmssType = InstanceTypes[k]
|
||||
if strings.EqualFold(k, template.SkuName) {
|
||||
instanceType = InstanceTypes[k]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
promoRe := regexp.MustCompile(`(?i)_promo`)
|
||||
if promoRe.MatchString(*template.Sku.Name) {
|
||||
if vmssType == nil {
|
||||
if promoRe.MatchString(template.SkuName) {
|
||||
if instanceType == nil {
|
||||
// We didn't find an exact match but this is a promo type, check for matching standard
|
||||
klog.V(4).Infof("No exact match found for %s, checking standard types", *template.Sku.Name)
|
||||
skuName := promoRe.ReplaceAllString(*template.Sku.Name, "")
|
||||
klog.V(4).Infof("No exact match found for %s, checking standard types", template.SkuName)
|
||||
skuName := promoRe.ReplaceAllString(template.SkuName, "")
|
||||
for k := range InstanceTypes {
|
||||
if strings.EqualFold(k, skuName) {
|
||||
vmssType = InstanceTypes[k]
|
||||
instanceType = InstanceTypes[k]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if vmssType == nil {
|
||||
return vmssType, fmt.Errorf("instance type %q not supported", *template.Sku.Name)
|
||||
if instanceType == nil {
|
||||
return instanceType, fmt.Errorf("instance type %q not supported", template.SkuName)
|
||||
}
|
||||
return vmssType, nil
|
||||
return instanceType, nil
|
||||
}
|
||||
|
||||
// GetVMSSTypeDynamically fetched vmss instance information using sku api calls.
|
||||
// GetInstanceTypeDynamically fetched vmss instance information using sku api calls.
|
||||
// It is declared as a variable for testing purpose.
|
||||
var GetVMSSTypeDynamically = func(template compute.VirtualMachineScaleSet, azCache *azureCache) (InstanceType, error) {
|
||||
var GetInstanceTypeDynamically = func(template NodeTemplate, azCache *azureCache) (InstanceType, error) {
|
||||
ctx := context.Background()
|
||||
var vmssType InstanceType
|
||||
var instanceType InstanceType
|
||||
|
||||
sku, err := azCache.GetSKU(ctx, *template.Sku.Name, *template.Location)
|
||||
sku, err := azCache.GetSKU(ctx, template.SkuName, template.Location)
|
||||
if err != nil {
|
||||
// We didn't find an exact match but this is a promo type, check for matching standard
|
||||
promoRe := regexp.MustCompile(`(?i)_promo`)
|
||||
skuName := promoRe.ReplaceAllString(*template.Sku.Name, "")
|
||||
if skuName != *template.Sku.Name {
|
||||
klog.V(1).Infof("No exact match found for %q, checking standard type %q. Error %v", *template.Sku.Name, skuName, err)
|
||||
sku, err = azCache.GetSKU(ctx, skuName, *template.Location)
|
||||
skuName := promoRe.ReplaceAllString(template.SkuName, "")
|
||||
if skuName != template.SkuName {
|
||||
klog.V(1).Infof("No exact match found for %q, checking standard type %q. Error %v", template.SkuName, skuName, err)
|
||||
sku, err = azCache.GetSKU(ctx, skuName, template.Location)
|
||||
}
|
||||
if err != nil {
|
||||
return vmssType, fmt.Errorf("instance type %q not supported. Error %v", *template.Sku.Name, err)
|
||||
return instanceType, fmt.Errorf("instance type %q not supported. Error %v", template.SkuName, err)
|
||||
}
|
||||
}
|
||||
|
||||
vmssType.VCPU, err = sku.VCPU()
|
||||
instanceType.VCPU, err = sku.VCPU()
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Failed to parse vcpu from sku %q %v", *template.Sku.Name, err)
|
||||
return vmssType, err
|
||||
klog.V(1).Infof("Failed to parse vcpu from sku %q %v", template.SkuName, err)
|
||||
return instanceType, err
|
||||
}
|
||||
gpu, err := getGpuFromSku(sku)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Failed to parse gpu from sku %q %v", *template.Sku.Name, err)
|
||||
return vmssType, err
|
||||
klog.V(1).Infof("Failed to parse gpu from sku %q %v", template.SkuName, err)
|
||||
return instanceType, err
|
||||
}
|
||||
vmssType.GPU = gpu
|
||||
instanceType.GPU = gpu
|
||||
|
||||
memoryGb, err := sku.Memory()
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Failed to parse memoryMb from sku %q %v", *template.Sku.Name, err)
|
||||
return vmssType, err
|
||||
klog.V(1).Infof("Failed to parse memoryMb from sku %q %v", template.SkuName, err)
|
||||
return instanceType, err
|
||||
}
|
||||
vmssType.MemoryMb = int64(memoryGb) * 1024
|
||||
instanceType.MemoryMb = int64(memoryGb) * 1024
|
||||
|
||||
return vmssType, nil
|
||||
return instanceType, nil
|
||||
}
|
||||
|
|
|
@ -168,6 +168,23 @@ func (m *AzureManager) fetchExplicitNodeGroups(specs []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// parseSKUAndVMsAgentpoolNameFromSpecName parses the spec name for a mixed-SKU VMs pool.
|
||||
// The spec name should be in the format <agentpoolname>/<sku>, e.g., "mypool1/Standard_D2s_v3", if the agent pool is a VMs pool.
|
||||
// This method returns a boolean indicating if the agent pool is a VMs pool, along with the agent pool name and SKU.
|
||||
func (m *AzureManager) parseSKUAndVMsAgentpoolNameFromSpecName(name string) (bool, string, string) {
|
||||
parts := strings.Split(name, "/")
|
||||
if len(parts) == 2 {
|
||||
agentPoolName := parts[0]
|
||||
sku := parts[1]
|
||||
|
||||
vmsPoolMap := m.azureCache.getVMsPoolMap()
|
||||
if _, ok := vmsPoolMap[agentPoolName]; ok {
|
||||
return true, agentPoolName, sku
|
||||
}
|
||||
}
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
func (m *AzureManager) buildNodeGroupFromSpec(spec string) (cloudprovider.NodeGroup, error) {
|
||||
scaleToZeroSupported := scaleToZeroSupportedStandard
|
||||
if strings.EqualFold(m.config.VMType, providerazureconsts.VMTypeVMSS) {
|
||||
|
@ -177,9 +194,13 @@ func (m *AzureManager) buildNodeGroupFromSpec(spec string) (cloudprovider.NodeGr
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse node group spec: %v", err)
|
||||
}
|
||||
vmsPoolSet := m.azureCache.getVMsPoolSet()
|
||||
if _, ok := vmsPoolSet[s.Name]; ok {
|
||||
return NewVMsPool(s, m), nil
|
||||
|
||||
// Starting from release 1.30, a cluster may have both VMSS and VMs pools.
|
||||
// Therefore, we cannot solely rely on the VMType to determine the node group type.
|
||||
// Instead, we need to check the cache to determine if the agent pool is a VMs pool.
|
||||
isVMsPool, agentPoolName, sku := m.parseSKUAndVMsAgentpoolNameFromSpecName(s.Name)
|
||||
if isVMsPool {
|
||||
return NewVMPool(s, m, agentPoolName, sku)
|
||||
}
|
||||
|
||||
switch m.config.VMType {
|
||||
|
|
|
@ -18,6 +18,7 @@ package azure
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -198,7 +199,26 @@ const (
|
|||
testASG = "test-asg"
|
||||
)
|
||||
|
||||
func saveAndClearEnv() []string {
|
||||
originalEnv := os.Environ()
|
||||
os.Clearenv()
|
||||
return originalEnv
|
||||
}
|
||||
|
||||
func loadEnv(originalEnv []string) {
|
||||
os.Clearenv()
|
||||
for _, e := range originalEnv {
|
||||
parts := strings.SplitN(e, "=", 2)
|
||||
os.Setenv(parts[0], parts[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateAzureManagerValidConfig(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
|
@ -277,6 +297,7 @@ func TestCreateAzureManagerValidConfig(t *testing.T) {
|
|||
VmssVmsCacheJitter: 120,
|
||||
MaxDeploymentsCount: 8,
|
||||
EnableFastDeleteOnFailedProvisioning: true,
|
||||
EnableVMsAgentPool: false,
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
@ -284,6 +305,11 @@ func TestCreateAzureManagerValidConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureManagerLegacyConfig(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
|
@ -367,6 +393,11 @@ func TestCreateAzureManagerLegacyConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
|
@ -474,12 +505,22 @@ func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureManagerValidConfigForStandardVMTypeWithoutDeploymentParameters(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfgForStandardVMTypeWithoutDeploymentParameters), cloudprovider.NodeGroupDiscoveryOptions{}, &azClient{})
|
||||
expectedErr := "open /var/lib/azure/azuredeploy.parameters.json: no such file or directory"
|
||||
assert.Nil(t, manager)
|
||||
assert.Equal(t, expectedErr, err.Error(), "return error does not match, expected: %v, actual: %v", expectedErr, err.Error())
|
||||
}
|
||||
func TestCreateAzureManagerValidConfigForVMsPool(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
|
@ -567,15 +608,25 @@ func TestCreateAzureManagerValidConfigForVMsPool(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureManagerWithNilConfig(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
|
||||
mockVMSSClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes()
|
||||
mockVMClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachine{}, nil).AnyTimes()
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
vmspool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&vmspool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).Return(fakeAPListPager).AnyTimes()
|
||||
mockAzClient := &azClient{
|
||||
virtualMachinesClient: mockVMClient,
|
||||
virtualMachineScaleSetsClient: mockVMSSClient,
|
||||
agentPoolClient: mockAgentpoolclient,
|
||||
}
|
||||
|
||||
expectedConfig := &Config{
|
||||
|
@ -657,6 +708,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) {
|
|||
VmssVmsCacheJitter: 90,
|
||||
MaxDeploymentsCount: 8,
|
||||
EnableFastDeleteOnFailedProvisioning: true,
|
||||
EnableVMsAgentPool: true,
|
||||
}
|
||||
|
||||
t.Setenv("ARM_CLOUD", "AzurePublicCloud")
|
||||
|
@ -690,6 +742,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) {
|
|||
t.Setenv("ARM_CLUSTER_RESOURCE_GROUP", "myrg")
|
||||
t.Setenv("ARM_BASE_URL_FOR_AP_CLIENT", "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local")
|
||||
t.Setenv("AZURE_ENABLE_FAST_DELETE_ON_FAILED_PROVISIONING", "true")
|
||||
t.Setenv("AZURE_ENABLE_VMS_AGENT_POOLS", "true")
|
||||
|
||||
t.Run("environment variables correctly set", func(t *testing.T) {
|
||||
manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient)
|
||||
|
@ -814,6 +867,11 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureManagerWithEnvOverridingConfig(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
|
@ -945,11 +1003,21 @@ func TestCreateAzureManagerWithEnvOverridingConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateAzureManagerInvalidConfig(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
_, err := createAzureManagerInternal(strings.NewReader(invalidAzureCfg), cloudprovider.NodeGroupDiscoveryOptions{}, &azClient{})
|
||||
assert.Error(t, err, "failed to unmarshal config body")
|
||||
}
|
||||
|
||||
func TestFetchExplicitNodeGroups(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1025,6 +1093,11 @@ func TestFetchExplicitNodeGroups(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetFilteredAutoscalingGroupsVmss(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1070,6 +1143,11 @@ func TestGetFilteredAutoscalingGroupsVmss(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetFilteredAutoscalingGroupsVmssWithConfiguredSizes(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1118,6 +1196,11 @@ func TestGetFilteredAutoscalingGroupsVmssWithConfiguredSizes(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetFilteredAutoscalingGroupsWithInvalidVMType(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1142,6 +1225,11 @@ func TestGetFilteredAutoscalingGroupsWithInvalidVMType(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFetchAutoAsgsVmss(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1196,6 +1284,11 @@ func TestFetchAutoAsgsVmss(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManagerRefreshAndCleanup(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1206,6 +1299,11 @@ func TestManagerRefreshAndCleanup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetScaleSetOptions(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
manager := &AzureManager{
|
||||
azureCache: &azureCache{
|
||||
autoscalingOptions: make(map[azureRef]map[string]string),
|
||||
|
@ -1249,6 +1347,54 @@ func TestGetScaleSetOptions(t *testing.T) {
|
|||
assert.Equal(t, *opts, defaultOptions)
|
||||
}
|
||||
|
||||
// TestVMSSNotFound ensures that AzureManager is still able to be built
|
||||
// if one nodeGroup (VMSS) is not found. Previously, we would fail on manager creation
|
||||
// if even one expected nodeGroup was not found. When manager creation errored out,
|
||||
// BuildAzure returns log.Fatalf() which caused CAS to crash.
|
||||
func TestVMSSNotFound(t *testing.T) {
|
||||
originalEnv := saveAndClearEnv()
|
||||
t.Cleanup(func() {
|
||||
loadEnv(originalEnv)
|
||||
})
|
||||
|
||||
// client setup
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
|
||||
client := azClient{}
|
||||
client.virtualMachineScaleSetsClient = mockVMSSClient
|
||||
client.virtualMachinesClient = mockVMClient
|
||||
client.virtualMachineScaleSetVMsClient = mockVMSSVMClient
|
||||
|
||||
// Expect that no vmss are present in the vmss client
|
||||
mockVMSSVMClient.EXPECT().List(gomock.Any(), "fakeId", testASG, gomock.Any()).Return([]compute.VirtualMachineScaleSetVM{}, nil).AnyTimes()
|
||||
mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).AnyTimes()
|
||||
mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes()
|
||||
|
||||
// Add explicit node group to look for during init
|
||||
ngdo := cloudprovider.NodeGroupDiscoveryOptions{
|
||||
NodeGroupSpecs: []string{
|
||||
fmt.Sprintf("%d:%d:%s", 1, 3, testASG),
|
||||
},
|
||||
}
|
||||
|
||||
// We expect the initial BuildAzure flow to pass when a NodeGroup is detected
|
||||
// that doesn't have a corresponding VMSS in the cache.
|
||||
t.Run("should not error when VMSS not found in cache", func(t *testing.T) {
|
||||
manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfg), ngdo, &client)
|
||||
assert.NoError(t, err)
|
||||
// expect one nodegroup to be present
|
||||
nodeGroups := manager.getNodeGroups()
|
||||
assert.Len(t, nodeGroups, 1)
|
||||
assert.Equal(t, nodeGroups[0].Id(), testASG)
|
||||
// expect no scale sets to be present
|
||||
scaleSets := manager.azureCache.getScaleSets()
|
||||
assert.Len(t, scaleSets, 0)
|
||||
})
|
||||
}
|
||||
|
||||
func assertStructsMinimallyEqual(t *testing.T, struct1, struct2 interface{}) bool {
|
||||
return compareStructFields(t, reflect.ValueOf(struct1), reflect.ValueOf(struct2))
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
reflect "reflect"
|
||||
|
||||
runtime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
|
||||
armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4"
|
||||
armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
|
@ -49,46 +49,60 @@ func (m *MockAgentPoolsClient) EXPECT() *MockAgentPoolsClientMockRecorder {
|
|||
}
|
||||
|
||||
// BeginCreateOrUpdate mocks base method.
|
||||
func (m *MockAgentPoolsClient) BeginCreateOrUpdate(arg0 context.Context, arg1, arg2, arg3 string, arg4 armcontainerservice.AgentPool, arg5 *armcontainerservice.AgentPoolsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) {
|
||||
func (m *MockAgentPoolsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, parameters armcontainerservice.AgentPool, options *armcontainerservice.AgentPoolsClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BeginCreateOrUpdate", arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, resourceName, agentPoolName, parameters, options)
|
||||
ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse])
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate.
|
||||
func (mr *MockAgentPoolsClientMockRecorder) BeginCreateOrUpdate(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call {
|
||||
func (mr *MockAgentPoolsClientMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, agentPoolName, parameters, options any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginCreateOrUpdate), arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, resourceName, agentPoolName, parameters, options)
|
||||
}
|
||||
|
||||
// BeginDeleteMachines mocks base method.
|
||||
func (m *MockAgentPoolsClient) BeginDeleteMachines(arg0 context.Context, arg1, arg2, arg3 string, arg4 armcontainerservice.AgentPoolDeleteMachinesParameter, arg5 *armcontainerservice.AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse], error) {
|
||||
func (m *MockAgentPoolsClient) BeginDeleteMachines(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, machines armcontainerservice.AgentPoolDeleteMachinesParameter, options *armcontainerservice.AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse], error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BeginDeleteMachines", arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
ret := m.ctrl.Call(m, "BeginDeleteMachines", ctx, resourceGroupName, resourceName, agentPoolName, machines, options)
|
||||
ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteMachinesResponse])
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// BeginDeleteMachines indicates an expected call of BeginDeleteMachines.
|
||||
func (mr *MockAgentPoolsClientMockRecorder) BeginDeleteMachines(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call {
|
||||
func (mr *MockAgentPoolsClientMockRecorder) BeginDeleteMachines(ctx, resourceGroupName, resourceName, agentPoolName, machines, options any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDeleteMachines", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginDeleteMachines), arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDeleteMachines", reflect.TypeOf((*MockAgentPoolsClient)(nil).BeginDeleteMachines), ctx, resourceGroupName, resourceName, agentPoolName, machines, options)
|
||||
}
|
||||
|
||||
// Get mocks base method.
|
||||
func (m *MockAgentPoolsClient) Get(arg0 context.Context, arg1, arg2, arg3 string, arg4 *armcontainerservice.AgentPoolsClientGetOptions) (armcontainerservice.AgentPoolsClientGetResponse, error) {
|
||||
func (m *MockAgentPoolsClient) Get(ctx context.Context, resourceGroupName, resourceName, agentPoolName string, options *armcontainerservice.AgentPoolsClientGetOptions) (armcontainerservice.AgentPoolsClientGetResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4)
|
||||
ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, resourceName, agentPoolName, options)
|
||||
ret0, _ := ret[0].(armcontainerservice.AgentPoolsClientGetResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockAgentPoolsClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call {
|
||||
func (mr *MockAgentPoolsClientMockRecorder) Get(ctx, resourceGroupName, resourceName, agentPoolName, options any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAgentPoolsClient)(nil).Get), arg0, arg1, arg2, arg3, arg4)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAgentPoolsClient)(nil).Get), ctx, resourceGroupName, resourceName, agentPoolName, options)
|
||||
}
|
||||
|
||||
// NewListPager mocks base method.
|
||||
func (m *MockAgentPoolsClient) NewListPager(resourceGroupName, resourceName string, options *armcontainerservice.AgentPoolsClientListOptions) *runtime.Pager[armcontainerservice.AgentPoolsClientListResponse] {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, resourceName, options)
|
||||
ret0, _ := ret[0].(*runtime.Pager[armcontainerservice.AgentPoolsClientListResponse])
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NewListPager indicates an expected call of NewListPager.
|
||||
func (mr *MockAgentPoolsClientMockRecorder) NewListPager(resourceGroupName, resourceName, options any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockAgentPoolsClient)(nil).NewListPager), resourceGroupName, resourceName, options)
|
||||
}
|
||||
|
|
|
@ -89,6 +89,8 @@ type ScaleSet struct {
|
|||
dedicatedHost bool
|
||||
|
||||
enableFastDeleteOnFailedProvisioning bool
|
||||
|
||||
enableLabelPredictionsOnTemplate bool
|
||||
}
|
||||
|
||||
// NewScaleSet creates a new NewScaleSet.
|
||||
|
@ -108,10 +110,11 @@ func NewScaleSet(spec *dynamic.NodeGroupSpec, az *AzureManager, curSize int64, d
|
|||
instancesRefreshJitter: az.config.VmssVmsCacheJitter,
|
||||
},
|
||||
|
||||
enableForceDelete: az.config.EnableForceDelete,
|
||||
enableDynamicInstanceList: az.config.EnableDynamicInstanceList,
|
||||
enableDetailedCSEMessage: az.config.EnableDetailedCSEMessage,
|
||||
dedicatedHost: dedicatedHost,
|
||||
enableForceDelete: az.config.EnableForceDelete,
|
||||
enableDynamicInstanceList: az.config.EnableDynamicInstanceList,
|
||||
enableDetailedCSEMessage: az.config.EnableDetailedCSEMessage,
|
||||
enableLabelPredictionsOnTemplate: az.config.EnableLabelPredictionsOnTemplate,
|
||||
dedicatedHost: dedicatedHost,
|
||||
}
|
||||
|
||||
if az.config.VmssVirtualMachinesCacheTTLInSeconds != 0 {
|
||||
|
@ -167,7 +170,12 @@ func (scaleSet *ScaleSet) Autoprovisioned() bool {
|
|||
func (scaleSet *ScaleSet) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) {
|
||||
template, err := scaleSet.getVMSSFromCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
klog.Errorf("failed to get information for VMSS: %s", scaleSet.Name)
|
||||
// Note: We don't return an error here and instead accept defaults.
|
||||
// Every invocation of GetOptions() returns an error if this condition is met:
|
||||
// `if err != nil && err != cloudprovider.ErrNotImplemented`
|
||||
// The error return value is intended to only capture unimplemented.
|
||||
return nil, nil
|
||||
}
|
||||
return scaleSet.manager.GetScaleSetOptions(*template.Name, defaults), nil
|
||||
}
|
||||
|
@ -187,14 +195,14 @@ func (scaleSet *ScaleSet) getVMSSFromCache() (compute.VirtualMachineScaleSet, er
|
|||
return allVMSS[scaleSet.Name], nil
|
||||
}
|
||||
|
||||
func (scaleSet *ScaleSet) getCurSize() (int64, error) {
|
||||
func (scaleSet *ScaleSet) getCurSize() (int64, *GetVMSSFailedError) {
|
||||
scaleSet.sizeMutex.Lock()
|
||||
defer scaleSet.sizeMutex.Unlock()
|
||||
|
||||
set, err := scaleSet.getVMSSFromCache()
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get information for VMSS: %s, error: %v", scaleSet.Name, err)
|
||||
return -1, err
|
||||
return -1, newGetVMSSFailedError(err, true)
|
||||
}
|
||||
|
||||
// // Remove check for returning in-memory size when VMSS is in updating state
|
||||
|
@ -231,7 +239,7 @@ func (scaleSet *ScaleSet) getCurSize() (int64, error) {
|
|||
set, rerr = scaleSet.manager.azClient.virtualMachineScaleSetsClient.Get(ctx, scaleSet.manager.config.ResourceGroup, scaleSet.Name)
|
||||
if rerr != nil {
|
||||
klog.Errorf("failed to get information for VMSS: %s, error: %v", scaleSet.Name, rerr)
|
||||
return -1, err
|
||||
return -1, newGetVMSSFailedError(rerr.Error(), rerr.IsNotFound())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -253,12 +261,11 @@ func (scaleSet *ScaleSet) getCurSize() (int64, error) {
|
|||
|
||||
// getScaleSetSize gets Scale Set size.
|
||||
func (scaleSet *ScaleSet) getScaleSetSize() (int64, error) {
|
||||
// First, get the size of the ScaleSet reported by API
|
||||
// -1 indiciates the ScaleSet hasn't been initialized
|
||||
size, err := scaleSet.getCurSize()
|
||||
if size == -1 || err != nil {
|
||||
klog.V(3).Infof("getScaleSetSize: either size is -1 (actual: %d) or error exists (actual err:%v)", size, err)
|
||||
return size, err
|
||||
// First, get the current size of the ScaleSet
|
||||
size, getVMSSError := scaleSet.getCurSize()
|
||||
if size == -1 || getVMSSError != nil {
|
||||
klog.V(3).Infof("getScaleSetSize: either size is -1 (actual: %d) or error exists (actual err:%v)", size, getVMSSError.error)
|
||||
return size, getVMSSError.error
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
@ -647,15 +654,18 @@ func (scaleSet *ScaleSet) Debug() string {
|
|||
|
||||
// TemplateNodeInfo returns a node template for this scale set.
|
||||
func (scaleSet *ScaleSet) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
template, err := scaleSet.getVMSSFromCache()
|
||||
vmss, err := scaleSet.getVMSSFromCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inputLabels := map[string]string{}
|
||||
inputTaints := ""
|
||||
node, err := buildNodeFromTemplate(scaleSet.Name, inputLabels, inputTaints, template, scaleSet.manager, scaleSet.enableDynamicInstanceList)
|
||||
|
||||
template, err := buildNodeTemplateFromVMSS(vmss, inputLabels, inputTaints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node, err := buildNodeFromTemplate(scaleSet.Name, template, scaleSet.manager, scaleSet.enableDynamicInstanceList, scaleSet.enableLabelPredictionsOnTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -666,10 +676,13 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
|||
|
||||
// Nodes returns a list of all nodes that belong to this node group.
|
||||
func (scaleSet *ScaleSet) Nodes() ([]cloudprovider.Instance, error) {
|
||||
curSize, err := scaleSet.getCurSize()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get current size for vmss %q: %v", scaleSet.Name, err)
|
||||
return nil, err
|
||||
curSize, getVMSSError := scaleSet.getCurSize()
|
||||
if getVMSSError != nil {
|
||||
klog.Errorf("Failed to get current size for vmss %q: %v", scaleSet.Name, getVMSSError.error)
|
||||
if getVMSSError.notFound {
|
||||
return []cloudprovider.Instance{}, nil // Don't return error if VMSS not found
|
||||
}
|
||||
return nil, getVMSSError.error // We want to return error if other errors occur.
|
||||
}
|
||||
|
||||
scaleSet.instanceMutex.Lock()
|
||||
|
@ -682,7 +695,7 @@ func (scaleSet *ScaleSet) Nodes() ([]cloudprovider.Instance, error) {
|
|||
}
|
||||
|
||||
// Forcefully updating the instanceCache as the instanceCacheSize didn't match curSize or cache is invalid.
|
||||
err = scaleSet.updateInstanceCache()
|
||||
err := scaleSet.updateInstanceCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -802,6 +815,7 @@ func addVMToCache(instances *[]cloudprovider.Instance, id, provisioningState *st
|
|||
|
||||
// instanceStatusFromProvisioningStateAndPowerState converts the VM provisioning state to cloudprovider.InstanceStatus
|
||||
// instanceStatusFromProvisioningStateAndPowerState used by orchestrationMode == compute.Flexible
|
||||
// Suggestion: reunify this with scaleSet.instanceStatusFromVM()
|
||||
func instanceStatusFromProvisioningStateAndPowerState(resourceID string, provisioningState *string, powerState string, enableFastDeleteOnFailedProvisioning bool) *cloudprovider.InstanceStatus {
|
||||
if provisioningState == nil {
|
||||
return nil
|
||||
|
@ -816,6 +830,8 @@ func instanceStatusFromProvisioningStateAndPowerState(resourceID string, provisi
|
|||
case provisioningStateCreating:
|
||||
status.State = cloudprovider.InstanceCreating
|
||||
case provisioningStateFailed:
|
||||
status.State = cloudprovider.InstanceRunning
|
||||
|
||||
if enableFastDeleteOnFailedProvisioning {
|
||||
// Provisioning can fail both during instance creation or after the instance is running.
|
||||
// Per https://learn.microsoft.com/en-us/azure/virtual-machines/states-billing#provisioning-states,
|
||||
|
@ -901,3 +917,21 @@ func (scaleSet *ScaleSet) verifyNodeGroup(instance *azureRef, commonNgID string)
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetVMSSFailedError is used to differentiate between
|
||||
// NotFound and other errors
|
||||
type GetVMSSFailedError struct {
|
||||
notFound bool
|
||||
error error
|
||||
}
|
||||
|
||||
func newGetVMSSFailedError(error error, notFound bool) *GetVMSSFailedError {
|
||||
return &GetVMSSFailedError{
|
||||
error: error,
|
||||
notFound: notFound,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *GetVMSSFailedError) Error() string {
|
||||
return v.error.Error()
|
||||
}
|
||||
|
|
|
@ -198,6 +198,7 @@ func (scaleSet *ScaleSet) setInstanceStatusByProviderID(providerID string, statu
|
|||
}
|
||||
|
||||
// instanceStatusFromVM converts the VM provisioning state to cloudprovider.InstanceStatus.
|
||||
// Suggestion: reunify this with instanceStatusFromProvisioningStateAndPowerState() in azure_scale_set.go
|
||||
func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSetVM) *cloudprovider.InstanceStatus {
|
||||
// Prefer the proactive cache view of the instance state if we aren't in a terminal state
|
||||
// This is because the power state may be taking longer to update and we don't want
|
||||
|
@ -224,6 +225,8 @@ func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSe
|
|||
case string(compute.GalleryProvisioningStateCreating):
|
||||
status.State = cloudprovider.InstanceCreating
|
||||
case string(compute.GalleryProvisioningStateFailed):
|
||||
status.State = cloudprovider.InstanceRunning
|
||||
|
||||
klog.V(3).Infof("VM %s reports failed provisioning state with power state: %s, eligible for fast delete: %s", to.String(vm.ID), powerState, strconv.FormatBool(scaleSet.enableFastDeleteOnFailedProvisioning))
|
||||
if scaleSet.enableFastDeleteOnFailedProvisioning {
|
||||
// Provisioning can fail both during instance creation or after the instance is running.
|
||||
|
@ -231,7 +234,8 @@ func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSe
|
|||
// ProvisioningState represents the most recent provisioning state, therefore only report
|
||||
// InstanceCreating errors when the power state indicates the instance has not yet started running
|
||||
if !isRunningVmPowerState(powerState) {
|
||||
klog.V(4).Infof("VM %s reports failed provisioning state with non-running power state: %s", *vm.ID, powerState)
|
||||
// This fast deletion relies on the fact that InstanceCreating + ErrorInfo will subsequently trigger a deletion.
|
||||
// Could be revisited to rely on something more stable/explicit.
|
||||
status.State = cloudprovider.InstanceCreating
|
||||
status.ErrorInfo = &cloudprovider.InstanceErrorInfo{
|
||||
ErrorClass: cloudprovider.OutOfResourcesErrorClass,
|
||||
|
@ -239,7 +243,6 @@ func (scaleSet *ScaleSet) instanceStatusFromVM(vm *compute.VirtualMachineScaleSe
|
|||
ErrorMessage: "Azure failed to provision a node for this node group",
|
||||
}
|
||||
} else {
|
||||
klog.V(5).Infof("VM %s reports a failed provisioning state but is running (%s)", *vm.ID, powerState)
|
||||
status.State = cloudprovider.InstanceRunning
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,26 +19,11 @@ package azure
|
|||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
|
||||
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient"
|
||||
)
|
||||
|
||||
var (
|
||||
ctrl *gomock.Controller
|
||||
currentTime, expiredTime time.Time
|
||||
provider *AzureCloudProvider
|
||||
scaleSet *ScaleSet
|
||||
mockVMSSVMClient *mockvmssvmclient.MockInterface
|
||||
expectedVMSSVMs []compute.VirtualMachineScaleSetVM
|
||||
expectedStates []cloudprovider.InstanceState
|
||||
instanceCache, expectedInstanceCache []cloudprovider.Instance
|
||||
)
|
||||
|
||||
func testGetInstanceCacheWithStates(t *testing.T, vms []compute.VirtualMachineScaleSetVM,
|
||||
|
@ -53,3 +38,128 @@ func testGetInstanceCacheWithStates(t *testing.T, vms []compute.VirtualMachineSc
|
|||
}
|
||||
return instanceCacheTest
|
||||
}
|
||||
|
||||
// Suggestion: could populate all combinations, should reunify with TestInstanceStatusFromProvisioningStateAndPowerState
|
||||
func TestInstanceStatusFromVM(t *testing.T) {
|
||||
t.Run("fast delete enablement = false", func(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
scaleSet := newTestScaleSet(provider.azureManager, "testScaleSet")
|
||||
|
||||
t.Run("provisioning state = failed, power state = starting", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStarting)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = running", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateRunning)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopping)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopped)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateDeallocated)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateUnknown)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("fast delete enablement = true", func(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
scaleSet := newTestScaleSetWithFastDelete(provider.azureManager, "testScaleSet")
|
||||
|
||||
t.Run("provisioning state = failed, power state = starting", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStarting)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = running", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateRunning)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopping)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateStopped)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateDeallocated)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) {
|
||||
vm := newVMObjectWithState(string(compute.GalleryProvisioningStateFailed), vmPowerStateUnknown)
|
||||
|
||||
status := scaleSet.instanceStatusFromVM(vm)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -43,21 +43,32 @@ func newTestScaleSet(manager *AzureManager, name string) *ScaleSet {
|
|||
azureRef: azureRef{
|
||||
Name: name,
|
||||
},
|
||||
manager: manager,
|
||||
minSize: 1,
|
||||
maxSize: 5,
|
||||
enableForceDelete: manager.config.EnableForceDelete,
|
||||
enableFastDeleteOnFailedProvisioning: true,
|
||||
manager: manager,
|
||||
minSize: 1,
|
||||
maxSize: 5,
|
||||
enableForceDelete: manager.config.EnableForceDelete,
|
||||
}
|
||||
}
|
||||
|
||||
func newTestScaleSetMinSizeZero(manager *AzureManager, name string) *ScaleSet {
|
||||
return &ScaleSet{
|
||||
azureRef: azureRef{
|
||||
Name: name,
|
||||
},
|
||||
manager: manager,
|
||||
minSize: 0,
|
||||
maxSize: 5,
|
||||
enableForceDelete: manager.config.EnableForceDelete,
|
||||
}
|
||||
}
|
||||
|
||||
func newTestScaleSetWithFastDelete(manager *AzureManager, name string) *ScaleSet {
|
||||
return &ScaleSet{
|
||||
azureRef: azureRef{
|
||||
Name: name,
|
||||
},
|
||||
manager: manager,
|
||||
minSize: 0,
|
||||
minSize: 1,
|
||||
maxSize: 5,
|
||||
enableForceDelete: manager.config.EnableForceDelete,
|
||||
enableFastDeleteOnFailedProvisioning: true,
|
||||
|
@ -140,7 +151,7 @@ func newApiNode(orchmode compute.OrchestrationMode, vmID int64) *apiv1.Node {
|
|||
}
|
||||
return node
|
||||
}
|
||||
func TestMaxSize(t *testing.T) {
|
||||
func TestScaleSetMaxSize(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
registered := provider.azureManager.RegisterNodeGroup(
|
||||
newTestScaleSet(provider.azureManager, "test-asg"))
|
||||
|
@ -149,7 +160,7 @@ func TestMaxSize(t *testing.T) {
|
|||
assert.Equal(t, provider.NodeGroups()[0].MaxSize(), 5)
|
||||
}
|
||||
|
||||
func TestMinSize(t *testing.T) {
|
||||
func TestScaleSetMinSize(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
registered := provider.azureManager.RegisterNodeGroup(
|
||||
newTestScaleSet(provider.azureManager, "test-asg"))
|
||||
|
@ -158,7 +169,7 @@ func TestMinSize(t *testing.T) {
|
|||
assert.Equal(t, provider.NodeGroups()[0].MinSize(), 1)
|
||||
}
|
||||
|
||||
func TestMinSizeZero(t *testing.T) {
|
||||
func TestScaleSetMinSizeZero(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
registered := provider.azureManager.RegisterNodeGroup(
|
||||
newTestScaleSetMinSizeZero(provider.azureManager, testASG))
|
||||
|
@ -167,7 +178,7 @@ func TestMinSizeZero(t *testing.T) {
|
|||
assert.Equal(t, provider.NodeGroups()[0].MinSize(), 0)
|
||||
}
|
||||
|
||||
func TestTargetSize(t *testing.T) {
|
||||
func TestScaleSetTargetSize(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -237,7 +248,7 @@ func TestTargetSize(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIncreaseSize(t *testing.T) {
|
||||
func TestScaleSetIncreaseSize(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -354,7 +365,7 @@ func TestIncreaseSize(t *testing.T) {
|
|||
|
||||
// TestIncreaseSizeOnVMProvisioningFailed has been tweeked only for Uniform Orchestration mode.
|
||||
// If ProvisioningState == failed and power state is not running, Status.State == InstanceCreating with errorInfo populated.
|
||||
func TestIncreaseSizeOnVMProvisioningFailed(t *testing.T) {
|
||||
func TestScaleSetIncreaseSizeOnVMProvisioningFailed(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
expectInstanceRunning bool
|
||||
isMissingInstanceView bool
|
||||
|
@ -362,15 +373,15 @@ func TestIncreaseSizeOnVMProvisioningFailed(t *testing.T) {
|
|||
expectErrorInfoPopulated bool
|
||||
}{
|
||||
"out of resources when no power state exists": {
|
||||
expectErrorInfoPopulated: true,
|
||||
expectErrorInfoPopulated: false,
|
||||
},
|
||||
"out of resources when VM is stopped": {
|
||||
statuses: []compute.InstanceViewStatus{{Code: to.StringPtr(vmPowerStateStopped)}},
|
||||
expectErrorInfoPopulated: true,
|
||||
expectErrorInfoPopulated: false,
|
||||
},
|
||||
"out of resources when VM reports invalid power state": {
|
||||
statuses: []compute.InstanceViewStatus{{Code: to.StringPtr("PowerState/invalid")}},
|
||||
expectErrorInfoPopulated: true,
|
||||
expectErrorInfoPopulated: false,
|
||||
},
|
||||
"instance running when power state is running": {
|
||||
expectInstanceRunning: true,
|
||||
|
@ -443,7 +454,96 @@ func TestIncreaseSizeOnVMProvisioningFailed(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIncreaseSizeOnVMSSUpdating(t *testing.T) {
|
||||
func TestIncreaseSizeOnVMProvisioningFailedWithFastDelete(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
expectInstanceRunning bool
|
||||
isMissingInstanceView bool
|
||||
statuses []compute.InstanceViewStatus
|
||||
expectErrorInfoPopulated bool
|
||||
}{
|
||||
"out of resources when no power state exists": {
|
||||
expectErrorInfoPopulated: true,
|
||||
},
|
||||
"out of resources when VM is stopped": {
|
||||
statuses: []compute.InstanceViewStatus{{Code: to.StringPtr(vmPowerStateStopped)}},
|
||||
expectErrorInfoPopulated: true,
|
||||
},
|
||||
"out of resources when VM reports invalid power state": {
|
||||
statuses: []compute.InstanceViewStatus{{Code: to.StringPtr("PowerState/invalid")}},
|
||||
expectErrorInfoPopulated: true,
|
||||
},
|
||||
"instance running when power state is running": {
|
||||
expectInstanceRunning: true,
|
||||
statuses: []compute.InstanceViewStatus{{Code: to.StringPtr(vmPowerStateRunning)}},
|
||||
expectErrorInfoPopulated: false,
|
||||
},
|
||||
"instance running if instance view cannot be retrieved": {
|
||||
expectInstanceRunning: true,
|
||||
isMissingInstanceView: true,
|
||||
expectErrorInfoPopulated: false,
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
manager := newTestAzureManager(t)
|
||||
vmssName := "vmss-failed-upscale"
|
||||
|
||||
expectedScaleSets := newTestVMSSList(3, "vmss-failed-upscale", "eastus", compute.Uniform)
|
||||
expectedVMSSVMs := newTestVMSSVMList(3)
|
||||
// The failed state is important line of code here
|
||||
expectedVMs := newTestVMList(3)
|
||||
expectedVMSSVMs[2].ProvisioningState = to.StringPtr(provisioningStateFailed)
|
||||
if !testCase.isMissingInstanceView {
|
||||
expectedVMSSVMs[2].InstanceView = &compute.VirtualMachineScaleSetVMInstanceView{Statuses: &testCase.statuses}
|
||||
}
|
||||
|
||||
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
|
||||
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil)
|
||||
mockVMSSClient.EXPECT().CreateOrUpdateAsync(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(nil, nil)
|
||||
mockVMSSClient.EXPECT().WaitForCreateOrUpdateResult(gomock.Any(), gomock.Any(), manager.config.ResourceGroup).Return(&http.Response{StatusCode: http.StatusOK}, nil).AnyTimes()
|
||||
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
|
||||
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
|
||||
mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "vmss-failed-upscale", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
|
||||
manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
|
||||
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
mockVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedVMs, nil).AnyTimes()
|
||||
manager.azClient.virtualMachinesClient = mockVMClient
|
||||
|
||||
manager.explicitlyConfigured["vmss-failed-upscale"] = true
|
||||
registered := manager.RegisterNodeGroup(newTestScaleSetWithFastDelete(manager, vmssName))
|
||||
assert.True(t, registered)
|
||||
manager.Refresh()
|
||||
|
||||
provider, err := BuildAzureCloudProvider(manager, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
scaleSet, ok := provider.NodeGroups()[0].(*ScaleSet)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Increase size by one, but the new node fails provisioning
|
||||
err = scaleSet.IncreaseSize(1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodes, err := scaleSet.Nodes()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 3, len(nodes))
|
||||
|
||||
assert.Equal(t, testCase.expectErrorInfoPopulated, nodes[2].Status.ErrorInfo != nil)
|
||||
if testCase.expectErrorInfoPopulated {
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, nodes[2].Status.State)
|
||||
} else {
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, nodes[2].Status.State)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleSetIncreaseSizeOnVMSSUpdating(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -493,7 +593,7 @@ func TestIncreaseSizeOnVMSSUpdating(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBelongs(t *testing.T) {
|
||||
func TestScaleSetBelongs(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -545,7 +645,7 @@ func TestBelongs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDeleteNodes(t *testing.T) {
|
||||
func TestScaleSetDeleteNodes(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -683,7 +783,7 @@ func TestDeleteNodes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDeleteNodeUnregistered(t *testing.T) {
|
||||
func TestScaleSetDeleteNodeUnregistered(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -796,7 +896,7 @@ func TestDeleteNodeUnregistered(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDeleteInstancesWithForceDeleteEnabled(t *testing.T) {
|
||||
func TestScaleSetDeleteInstancesWithForceDeleteEnabled(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
manager := newTestAzureManager(t)
|
||||
|
@ -910,7 +1010,7 @@ func TestDeleteInstancesWithForceDeleteEnabled(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestDeleteNoConflictRequest(t *testing.T) {
|
||||
func TestScaleSetDeleteNoConflictRequest(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -971,7 +1071,7 @@ func TestDeleteNoConflictRequest(t *testing.T) {
|
|||
err = scaleSet.DeleteNodes([]*apiv1.Node{node})
|
||||
}
|
||||
|
||||
func TestId(t *testing.T) {
|
||||
func TestScaleSetId(t *testing.T) {
|
||||
provider := newTestProvider(t)
|
||||
registered := provider.azureManager.RegisterNodeGroup(
|
||||
newTestScaleSet(provider.azureManager, "test-asg"))
|
||||
|
@ -980,7 +1080,7 @@ func TestId(t *testing.T) {
|
|||
assert.Equal(t, provider.NodeGroups()[0].Id(), "test-asg")
|
||||
}
|
||||
|
||||
func TestDebug(t *testing.T) {
|
||||
func TestAgentPoolDebug(t *testing.T) {
|
||||
asg := ScaleSet{
|
||||
manager: newTestAzureManager(t),
|
||||
minSize: 5,
|
||||
|
@ -1059,7 +1159,7 @@ func TestScaleSetNodes(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestEnableVmssFlexNodesFlag(t *testing.T) {
|
||||
func TestScaleSetEnableVmssFlexNodesFlag(t *testing.T) {
|
||||
|
||||
// flag set to false
|
||||
ctrl := gomock.NewController(t)
|
||||
|
@ -1091,7 +1191,7 @@ func TestEnableVmssFlexNodesFlag(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTemplateNodeInfo(t *testing.T) {
|
||||
func TestScaleSetTemplateNodeInfo(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
@ -1116,6 +1216,10 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
}
|
||||
asg.Name = "test-asg"
|
||||
|
||||
// The dynamic SKU list ("cache") in the test provider is empty
|
||||
// (initialized with cfg.EnableDynamicInstanceList = false).
|
||||
assert.False(t, provider.azureManager.azureCache.HasVMSKUs())
|
||||
|
||||
t.Run("Checking fallback to static because dynamic list is empty", func(t *testing.T) {
|
||||
asg.enableDynamicInstanceList = true
|
||||
|
||||
|
@ -1128,12 +1232,12 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
// Properly testing dynamic SKU list through skewer is not possible,
|
||||
// because there are no Resource API mocks included yet.
|
||||
// Instead, the rest of the (consumer side) tests here
|
||||
// override GetVMSSTypeDynamically and GetVMSSTypeStatically functions.
|
||||
// override GetInstanceTypeDynamically and GetInstanceTypeStatically functions.
|
||||
|
||||
t.Run("Checking dynamic workflow", func(t *testing.T) {
|
||||
asg.enableDynamicInstanceList = true
|
||||
|
||||
GetVMSSTypeDynamically = func(template compute.VirtualMachineScaleSet, azCache *azureCache) (InstanceType, error) {
|
||||
GetInstanceTypeDynamically = func(template NodeTemplate, azCache *azureCache) (InstanceType, error) {
|
||||
vmssType := InstanceType{}
|
||||
vmssType.VCPU = 1
|
||||
vmssType.GPU = 2
|
||||
|
@ -1151,10 +1255,10 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
t.Run("Checking static workflow if dynamic fails", func(t *testing.T) {
|
||||
asg.enableDynamicInstanceList = true
|
||||
|
||||
GetVMSSTypeDynamically = func(template compute.VirtualMachineScaleSet, azCache *azureCache) (InstanceType, error) {
|
||||
GetInstanceTypeDynamically = func(template NodeTemplate, azCache *azureCache) (InstanceType, error) {
|
||||
return InstanceType{}, fmt.Errorf("dynamic error exists")
|
||||
}
|
||||
GetVMSSTypeStatically = func(template compute.VirtualMachineScaleSet) (*InstanceType, error) {
|
||||
GetInstanceTypeStatically = func(template NodeTemplate) (*InstanceType, error) {
|
||||
vmssType := InstanceType{}
|
||||
vmssType.VCPU = 1
|
||||
vmssType.GPU = 2
|
||||
|
@ -1172,10 +1276,10 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
t.Run("Fails to find vmss instance information using static and dynamic workflow, instance not supported", func(t *testing.T) {
|
||||
asg.enableDynamicInstanceList = true
|
||||
|
||||
GetVMSSTypeDynamically = func(template compute.VirtualMachineScaleSet, azCache *azureCache) (InstanceType, error) {
|
||||
GetInstanceTypeDynamically = func(template NodeTemplate, azCache *azureCache) (InstanceType, error) {
|
||||
return InstanceType{}, fmt.Errorf("dynamic error exists")
|
||||
}
|
||||
GetVMSSTypeStatically = func(template compute.VirtualMachineScaleSet) (*InstanceType, error) {
|
||||
GetInstanceTypeStatically = func(template NodeTemplate) (*InstanceType, error) {
|
||||
return &InstanceType{}, fmt.Errorf("static error exists")
|
||||
}
|
||||
nodeInfo, err := asg.TemplateNodeInfo()
|
||||
|
@ -1188,7 +1292,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
t.Run("Checking static-only workflow", func(t *testing.T) {
|
||||
asg.enableDynamicInstanceList = false
|
||||
|
||||
GetVMSSTypeStatically = func(template compute.VirtualMachineScaleSet) (*InstanceType, error) {
|
||||
GetInstanceTypeStatically = func(template NodeTemplate) (*InstanceType, error) {
|
||||
vmssType := InstanceType{}
|
||||
vmssType.VCPU = 1
|
||||
vmssType.GPU = 2
|
||||
|
@ -1213,7 +1317,7 @@ func TestTemplateNodeInfo(t *testing.T) {
|
|||
})
|
||||
|
||||
}
|
||||
func TestCseErrors(t *testing.T) {
|
||||
func TestScaleSetCseErrors(t *testing.T) {
|
||||
errorMessage := to.StringPtr("Error Message Test")
|
||||
vmssVMs := compute.VirtualMachineScaleSetVM{
|
||||
Name: to.StringPtr("vmTest"),
|
||||
|
@ -1261,3 +1365,113 @@ func TestCseErrors(t *testing.T) {
|
|||
assert.Equal(t, []string(nil), actualCSEErrorMessage)
|
||||
})
|
||||
}
|
||||
|
||||
func newVMObjectWithState(provisioningState string, powerState string) *compute.VirtualMachineScaleSetVM {
|
||||
return &compute.VirtualMachineScaleSetVM{
|
||||
ID: to.StringPtr("1"), // Beware; refactor if needed
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
ProvisioningState: to.StringPtr(provisioningState),
|
||||
InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{
|
||||
Statuses: &[]compute.InstanceViewStatus{
|
||||
{Code: to.StringPtr(powerState)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Suggestion: could populate all combinations, should reunify with TestInstanceStatusFromVM
|
||||
func TestInstanceStatusFromProvisioningStateAndPowerState(t *testing.T) {
|
||||
t.Run("fast delete enablement = false", func(t *testing.T) {
|
||||
t.Run("provisioning state = failed, power state = starting", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStarting, false)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = running", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateRunning, false)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopping, false)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) {
|
||||
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopped, false)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateDeallocated, false)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateUnknown, false)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("fast delete enablement = true", func(t *testing.T) {
|
||||
t.Run("provisioning state = failed, power state = starting", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStarting, true)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = running", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateRunning, true)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceRunning, status.State)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopping", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopping, true)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = stopped", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateStopped, true)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = deallocated", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateDeallocated, true)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
|
||||
t.Run("provisioning state = failed, power state = unknown", func(t *testing.T) {
|
||||
status := instanceStatusFromProvisioningStateAndPowerState("1", to.StringPtr(string(compute.GalleryProvisioningStateFailed)), vmPowerStateUnknown, true)
|
||||
|
||||
assert.NotNil(t, status)
|
||||
assert.Equal(t, cloudprovider.InstanceCreating, status.State)
|
||||
assert.NotNil(t, status.ErrorInfo)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -24,7 +24,9 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -84,8 +86,132 @@ const (
|
|||
clusterLabelKey = AKSLabelKeyPrefixValue + "cluster"
|
||||
)
|
||||
|
||||
func buildNodeFromTemplate(nodeGroupName string, inputLabels map[string]string, inputTaints string,
|
||||
template compute.VirtualMachineScaleSet, manager *AzureManager, enableDynamicInstanceList bool) (*apiv1.Node, error) {
|
||||
// VMPoolNodeTemplate holds properties for node from VMPool
|
||||
type VMPoolNodeTemplate struct {
|
||||
AgentPoolName string
|
||||
Taints []apiv1.Taint
|
||||
Labels map[string]*string
|
||||
OSDiskType *armcontainerservice.OSDiskType
|
||||
}
|
||||
|
||||
// VMSSNodeTemplate holds properties for node from VMSS
|
||||
type VMSSNodeTemplate struct {
|
||||
InputLabels map[string]string
|
||||
InputTaints string
|
||||
Tags map[string]*string
|
||||
OSDisk *compute.VirtualMachineScaleSetOSDisk
|
||||
}
|
||||
|
||||
// NodeTemplate represents a template for an Azure node
|
||||
type NodeTemplate struct {
|
||||
SkuName string
|
||||
InstanceOS string
|
||||
Location string
|
||||
Zones []string
|
||||
VMPoolNodeTemplate *VMPoolNodeTemplate
|
||||
VMSSNodeTemplate *VMSSNodeTemplate
|
||||
}
|
||||
|
||||
func buildNodeTemplateFromVMSS(vmss compute.VirtualMachineScaleSet, inputLabels map[string]string, inputTaints string) (NodeTemplate, error) {
|
||||
instanceOS := cloudprovider.DefaultOS
|
||||
if vmss.VirtualMachineProfile != nil &&
|
||||
vmss.VirtualMachineProfile.OsProfile != nil &&
|
||||
vmss.VirtualMachineProfile.OsProfile.WindowsConfiguration != nil {
|
||||
instanceOS = "windows"
|
||||
}
|
||||
|
||||
var osDisk *compute.VirtualMachineScaleSetOSDisk
|
||||
if vmss.VirtualMachineProfile != nil &&
|
||||
vmss.VirtualMachineProfile.StorageProfile != nil &&
|
||||
vmss.VirtualMachineProfile.StorageProfile.OsDisk != nil {
|
||||
osDisk = vmss.VirtualMachineProfile.StorageProfile.OsDisk
|
||||
}
|
||||
|
||||
if vmss.Sku == nil || vmss.Sku.Name == nil {
|
||||
return NodeTemplate{}, fmt.Errorf("VMSS %s has no SKU", to.String(vmss.Name))
|
||||
}
|
||||
|
||||
if vmss.Location == nil {
|
||||
return NodeTemplate{}, fmt.Errorf("VMSS %s has no location", to.String(vmss.Name))
|
||||
}
|
||||
|
||||
zones := []string{}
|
||||
if vmss.Zones != nil {
|
||||
zones = *vmss.Zones
|
||||
}
|
||||
|
||||
return NodeTemplate{
|
||||
SkuName: *vmss.Sku.Name,
|
||||
|
||||
Location: *vmss.Location,
|
||||
Zones: zones,
|
||||
InstanceOS: instanceOS,
|
||||
VMSSNodeTemplate: &VMSSNodeTemplate{
|
||||
InputLabels: inputLabels,
|
||||
InputTaints: inputTaints,
|
||||
OSDisk: osDisk,
|
||||
Tags: vmss.Tags,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildNodeTemplateFromVMPool(vmsPool armcontainerservice.AgentPool, location string, skuName string, labelsFromSpec map[string]string, taintsFromSpec string) (NodeTemplate, error) {
|
||||
if vmsPool.Properties == nil {
|
||||
return NodeTemplate{}, fmt.Errorf("vmsPool %s has nil properties", to.String(vmsPool.Name))
|
||||
}
|
||||
// labels from the agentpool
|
||||
labels := vmsPool.Properties.NodeLabels
|
||||
// labels from spec
|
||||
for k, v := range labelsFromSpec {
|
||||
if labels == nil {
|
||||
labels = make(map[string]*string)
|
||||
}
|
||||
labels[k] = to.StringPtr(v)
|
||||
}
|
||||
|
||||
// taints from the agentpool
|
||||
taintsList := []string{}
|
||||
for _, taint := range vmsPool.Properties.NodeTaints {
|
||||
if to.String(taint) != "" {
|
||||
taintsList = append(taintsList, to.String(taint))
|
||||
}
|
||||
}
|
||||
// taints from spec
|
||||
if taintsFromSpec != "" {
|
||||
taintsList = append(taintsList, taintsFromSpec)
|
||||
}
|
||||
taintsStr := strings.Join(taintsList, ",")
|
||||
taints := extractTaintsFromSpecString(taintsStr)
|
||||
|
||||
var zones []string
|
||||
if vmsPool.Properties.AvailabilityZones != nil {
|
||||
for _, zone := range vmsPool.Properties.AvailabilityZones {
|
||||
if zone != nil {
|
||||
zones = append(zones, *zone)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var instanceOS string
|
||||
if vmsPool.Properties.OSType != nil {
|
||||
instanceOS = strings.ToLower(string(*vmsPool.Properties.OSType))
|
||||
}
|
||||
|
||||
return NodeTemplate{
|
||||
SkuName: skuName,
|
||||
Zones: zones,
|
||||
InstanceOS: instanceOS,
|
||||
Location: location,
|
||||
VMPoolNodeTemplate: &VMPoolNodeTemplate{
|
||||
AgentPoolName: to.String(vmsPool.Name),
|
||||
OSDiskType: vmsPool.Properties.OSDiskType,
|
||||
Taints: taints,
|
||||
Labels: labels,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildNodeFromTemplate(nodeGroupName string, template NodeTemplate, manager *AzureManager, enableDynamicInstanceList bool, enableLabelPrediction bool) (*apiv1.Node, error) {
|
||||
node := apiv1.Node{}
|
||||
nodeName := fmt.Sprintf("%s-asg-%d", nodeGroupName, rand.Int63())
|
||||
|
||||
|
@ -104,28 +230,28 @@ func buildNodeFromTemplate(nodeGroupName string, inputLabels map[string]string,
|
|||
// Fetching SKU information from SKU API if enableDynamicInstanceList is true.
|
||||
var dynamicErr error
|
||||
if enableDynamicInstanceList {
|
||||
var vmssTypeDynamic InstanceType
|
||||
klog.V(1).Infof("Fetching instance information for SKU: %s from SKU API", *template.Sku.Name)
|
||||
vmssTypeDynamic, dynamicErr = GetVMSSTypeDynamically(template, manager.azureCache)
|
||||
var instanceTypeDynamic InstanceType
|
||||
klog.V(1).Infof("Fetching instance information for SKU: %s from SKU API", template.SkuName)
|
||||
instanceTypeDynamic, dynamicErr = GetInstanceTypeDynamically(template, manager.azureCache)
|
||||
if dynamicErr == nil {
|
||||
vcpu = vmssTypeDynamic.VCPU
|
||||
gpuCount = vmssTypeDynamic.GPU
|
||||
memoryMb = vmssTypeDynamic.MemoryMb
|
||||
vcpu = instanceTypeDynamic.VCPU
|
||||
gpuCount = instanceTypeDynamic.GPU
|
||||
memoryMb = instanceTypeDynamic.MemoryMb
|
||||
} else {
|
||||
klog.Errorf("Dynamically fetching of instance information from SKU api failed with error: %v", dynamicErr)
|
||||
}
|
||||
}
|
||||
if !enableDynamicInstanceList || dynamicErr != nil {
|
||||
klog.V(1).Infof("Falling back to static SKU list for SKU: %s", *template.Sku.Name)
|
||||
klog.V(1).Infof("Falling back to static SKU list for SKU: %s", template.SkuName)
|
||||
// fall-back on static list of vmss if dynamic workflow fails.
|
||||
vmssTypeStatic, staticErr := GetVMSSTypeStatically(template)
|
||||
instanceTypeStatic, staticErr := GetInstanceTypeStatically(template)
|
||||
if staticErr == nil {
|
||||
vcpu = vmssTypeStatic.VCPU
|
||||
gpuCount = vmssTypeStatic.GPU
|
||||
memoryMb = vmssTypeStatic.MemoryMb
|
||||
vcpu = instanceTypeStatic.VCPU
|
||||
gpuCount = instanceTypeStatic.GPU
|
||||
memoryMb = instanceTypeStatic.MemoryMb
|
||||
} else {
|
||||
// return error if neither of the workflows results with vmss data.
|
||||
klog.V(1).Infof("Instance type %q not supported, err: %v", *template.Sku.Name, staticErr)
|
||||
klog.V(1).Infof("Instance type %q not supported, err: %v", template.SkuName, staticErr)
|
||||
return nil, staticErr
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +260,7 @@ func buildNodeFromTemplate(nodeGroupName string, inputLabels map[string]string,
|
|||
node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(vcpu, resource.DecimalSI)
|
||||
// isNPSeries returns if a SKU is an NP-series SKU
|
||||
// SKU API reports GPUs for NP-series but it's actually FPGAs
|
||||
if isNPSeries(*template.Sku.Name) {
|
||||
if isNPSeries(template.SkuName) {
|
||||
node.Status.Capacity[xilinxFpgaResourceName] = *resource.NewQuantity(gpuCount, resource.DecimalSI)
|
||||
} else {
|
||||
node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(gpuCount, resource.DecimalSI)
|
||||
|
@ -145,9 +271,37 @@ func buildNodeFromTemplate(nodeGroupName string, inputLabels map[string]string,
|
|||
// TODO: set real allocatable.
|
||||
node.Status.Allocatable = node.Status.Capacity
|
||||
|
||||
if template.VMSSNodeTemplate != nil {
|
||||
node = processVMSSTemplate(template, nodeName, node, enableLabelPrediction)
|
||||
} else if template.VMPoolNodeTemplate != nil {
|
||||
node = processVMPoolTemplate(template, nodeName, node)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid node template: missing both VMSS and VMPool templates")
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Setting node %s labels to: %s", nodeName, node.Labels)
|
||||
klog.V(4).Infof("Setting node %s taints to: %s", nodeName, node.Spec.Taints)
|
||||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||
return &node, nil
|
||||
}
|
||||
|
||||
func processVMPoolTemplate(template NodeTemplate, nodeName string, node apiv1.Node) apiv1.Node {
|
||||
labels := buildGenericLabels(template, nodeName)
|
||||
labels[agentPoolNodeLabelKey] = template.VMPoolNodeTemplate.AgentPoolName
|
||||
if template.VMPoolNodeTemplate.Labels != nil {
|
||||
for k, v := range template.VMPoolNodeTemplate.Labels {
|
||||
labels[k] = to.String(v)
|
||||
}
|
||||
}
|
||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, labels)
|
||||
node.Spec.Taints = template.VMPoolNodeTemplate.Taints
|
||||
return node
|
||||
}
|
||||
|
||||
func processVMSSTemplate(template NodeTemplate, nodeName string, node apiv1.Node, enableLabelPrediction bool) apiv1.Node {
|
||||
// NodeLabels
|
||||
if template.Tags != nil {
|
||||
for k, v := range template.Tags {
|
||||
if template.VMSSNodeTemplate.Tags != nil {
|
||||
for k, v := range template.VMSSNodeTemplate.Tags {
|
||||
if v != nil {
|
||||
node.Labels[k] = *v
|
||||
} else {
|
||||
|
@ -164,105 +318,97 @@ func buildNodeFromTemplate(nodeGroupName string, inputLabels map[string]string,
|
|||
labels := make(map[string]string)
|
||||
|
||||
// Prefer the explicit labels in spec coming from RP over the VMSS template
|
||||
if len(inputLabels) > 0 {
|
||||
labels = inputLabels
|
||||
if len(template.VMSSNodeTemplate.InputLabels) > 0 {
|
||||
labels = template.VMSSNodeTemplate.InputLabels
|
||||
} else {
|
||||
labels = extractLabelsFromScaleSet(template.Tags)
|
||||
labels = extractLabelsFromTags(template.VMSSNodeTemplate.Tags)
|
||||
}
|
||||
|
||||
// Add the agentpool label, its value should come from the VMSS poolName tag
|
||||
// NOTE: The plan is for agentpool label to be deprecated in favor of the aks-prefixed one
|
||||
// We will have to live with both labels for a while
|
||||
if node.Labels[legacyPoolNameTag] != "" {
|
||||
labels[legacyAgentPoolNodeLabelKey] = node.Labels[legacyPoolNameTag]
|
||||
labels[agentPoolNodeLabelKey] = node.Labels[legacyPoolNameTag]
|
||||
}
|
||||
if node.Labels[poolNameTag] != "" {
|
||||
labels[legacyAgentPoolNodeLabelKey] = node.Labels[poolNameTag]
|
||||
labels[agentPoolNodeLabelKey] = node.Labels[poolNameTag]
|
||||
}
|
||||
// This is the best-effort to match AKS system labels,
|
||||
// this prediction needs to be constantly worked on and maintained to keep up with the changes in AKS
|
||||
if enableLabelPrediction {
|
||||
// Add the agentpool label, its value should come from the VMSS poolName tag
|
||||
// NOTE: The plan is for agentpool label to be deprecated in favor of the aks-prefixed one
|
||||
// We will have to live with both labels for a while
|
||||
if node.Labels[legacyPoolNameTag] != "" {
|
||||
labels[legacyAgentPoolNodeLabelKey] = node.Labels[legacyPoolNameTag]
|
||||
labels[agentPoolNodeLabelKey] = node.Labels[legacyPoolNameTag]
|
||||
}
|
||||
if node.Labels[poolNameTag] != "" {
|
||||
labels[legacyAgentPoolNodeLabelKey] = node.Labels[poolNameTag]
|
||||
labels[agentPoolNodeLabelKey] = node.Labels[poolNameTag]
|
||||
}
|
||||
|
||||
// Add the storage profile and storage tier labels
|
||||
if template.VirtualMachineProfile != nil && template.VirtualMachineProfile.StorageProfile != nil && template.VirtualMachineProfile.StorageProfile.OsDisk != nil {
|
||||
// ephemeral
|
||||
if template.VirtualMachineProfile.StorageProfile.OsDisk.DiffDiskSettings != nil && template.VirtualMachineProfile.StorageProfile.OsDisk.DiffDiskSettings.Option == compute.Local {
|
||||
labels[legacyStorageProfileNodeLabelKey] = "ephemeral"
|
||||
labels[storageProfileNodeLabelKey] = "ephemeral"
|
||||
} else {
|
||||
labels[legacyStorageProfileNodeLabelKey] = "managed"
|
||||
labels[storageProfileNodeLabelKey] = "managed"
|
||||
// Add the storage profile and storage tier labels for vmss node
|
||||
if template.VMSSNodeTemplate.OSDisk != nil {
|
||||
// ephemeral
|
||||
if template.VMSSNodeTemplate.OSDisk.DiffDiskSettings != nil && template.VMSSNodeTemplate.OSDisk.DiffDiskSettings.Option == compute.Local {
|
||||
labels[legacyStorageProfileNodeLabelKey] = "ephemeral"
|
||||
labels[storageProfileNodeLabelKey] = "ephemeral"
|
||||
} else {
|
||||
labels[legacyStorageProfileNodeLabelKey] = "managed"
|
||||
labels[storageProfileNodeLabelKey] = "managed"
|
||||
}
|
||||
if template.VMSSNodeTemplate.OSDisk.ManagedDisk != nil {
|
||||
labels[legacyStorageTierNodeLabelKey] = string(template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType)
|
||||
labels[storageTierNodeLabelKey] = string(template.VMSSNodeTemplate.OSDisk.ManagedDisk.StorageAccountType)
|
||||
}
|
||||
}
|
||||
if template.VirtualMachineProfile.StorageProfile.OsDisk.ManagedDisk != nil {
|
||||
labels[legacyStorageTierNodeLabelKey] = string(template.VirtualMachineProfile.StorageProfile.OsDisk.ManagedDisk.StorageAccountType)
|
||||
labels[storageTierNodeLabelKey] = string(template.VirtualMachineProfile.StorageProfile.OsDisk.ManagedDisk.StorageAccountType)
|
||||
}
|
||||
// Add ephemeral-storage value
|
||||
if template.VirtualMachineProfile.StorageProfile.OsDisk.DiskSizeGB != nil {
|
||||
node.Status.Capacity[apiv1.ResourceEphemeralStorage] = *resource.NewQuantity(int64(int(*template.VirtualMachineProfile.StorageProfile.OsDisk.DiskSizeGB)*1024*1024*1024), resource.DecimalSI)
|
||||
klog.V(4).Infof("OS Disk Size from template is: %d", *template.VirtualMachineProfile.StorageProfile.OsDisk.DiskSizeGB)
|
||||
klog.V(4).Infof("Setting ephemeral storage to: %v", node.Status.Capacity[apiv1.ResourceEphemeralStorage])
|
||||
|
||||
// If we are on GPU-enabled SKUs, append the accelerator
|
||||
// label so that CA makes better decision when scaling from zero for GPU pools
|
||||
if isNvidiaEnabledSKU(template.SkuName) {
|
||||
labels[GPULabel] = "nvidia"
|
||||
labels[legacyGPULabel] = "nvidia"
|
||||
}
|
||||
}
|
||||
|
||||
// If we are on GPU-enabled SKUs, append the accelerator
|
||||
// label so that CA makes better decision when scaling from zero for GPU pools
|
||||
if isNvidiaEnabledSKU(*template.Sku.Name) {
|
||||
labels[GPULabel] = "nvidia"
|
||||
labels[legacyGPULabel] = "nvidia"
|
||||
// Add ephemeral-storage value
|
||||
if template.VMSSNodeTemplate.OSDisk != nil && template.VMSSNodeTemplate.OSDisk.DiskSizeGB != nil {
|
||||
node.Status.Capacity[apiv1.ResourceEphemeralStorage] = *resource.NewQuantity(int64(int(*template.VMSSNodeTemplate.OSDisk.DiskSizeGB)*1024*1024*1024), resource.DecimalSI)
|
||||
klog.V(4).Infof("OS Disk Size from template is: %d", *template.VMSSNodeTemplate.OSDisk.DiskSizeGB)
|
||||
klog.V(4).Infof("Setting ephemeral storage to: %v", node.Status.Capacity[apiv1.ResourceEphemeralStorage])
|
||||
}
|
||||
|
||||
// Extract allocatables from tags
|
||||
resourcesFromTags := extractAllocatableResourcesFromScaleSet(template.Tags)
|
||||
resourcesFromTags := extractAllocatableResourcesFromScaleSet(template.VMSSNodeTemplate.Tags)
|
||||
for resourceName, val := range resourcesFromTags {
|
||||
node.Status.Capacity[apiv1.ResourceName(resourceName)] = *val
|
||||
}
|
||||
|
||||
node.Labels = cloudprovider.JoinStringMaps(node.Labels, labels)
|
||||
klog.V(4).Infof("Setting node %s labels to: %s", nodeName, node.Labels)
|
||||
|
||||
var taints []apiv1.Taint
|
||||
// Prefer the explicit taints in spec over the VMSS template
|
||||
if inputTaints != "" {
|
||||
taints = extractTaintsFromSpecString(inputTaints)
|
||||
// Prefer the explicit taints in spec over the tags from vmss or vm
|
||||
if template.VMSSNodeTemplate.InputTaints != "" {
|
||||
taints = extractTaintsFromSpecString(template.VMSSNodeTemplate.InputTaints)
|
||||
} else {
|
||||
taints = extractTaintsFromScaleSet(template.Tags)
|
||||
taints = extractTaintsFromTags(template.VMSSNodeTemplate.Tags)
|
||||
}
|
||||
|
||||
// Taints from the Scale Set's Tags
|
||||
node.Spec.Taints = taints
|
||||
klog.V(4).Infof("Setting node %s taints to: %s", nodeName, node.Spec.Taints)
|
||||
|
||||
node.Status.Conditions = cloudprovider.BuildReadyConditions()
|
||||
return &node, nil
|
||||
return node
|
||||
}
|
||||
|
||||
func buildInstanceOS(template compute.VirtualMachineScaleSet) string {
|
||||
instanceOS := cloudprovider.DefaultOS
|
||||
if template.VirtualMachineProfile != nil && template.VirtualMachineProfile.OsProfile != nil && template.VirtualMachineProfile.OsProfile.WindowsConfiguration != nil {
|
||||
instanceOS = "windows"
|
||||
}
|
||||
|
||||
return instanceOS
|
||||
}
|
||||
|
||||
func buildGenericLabels(template compute.VirtualMachineScaleSet, nodeName string) map[string]string {
|
||||
func buildGenericLabels(template NodeTemplate, nodeName string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
result[kubeletapis.LabelArch] = cloudprovider.DefaultArch
|
||||
result[apiv1.LabelArchStable] = cloudprovider.DefaultArch
|
||||
|
||||
result[kubeletapis.LabelOS] = buildInstanceOS(template)
|
||||
result[apiv1.LabelOSStable] = buildInstanceOS(template)
|
||||
result[kubeletapis.LabelOS] = template.InstanceOS
|
||||
result[apiv1.LabelOSStable] = template.InstanceOS
|
||||
|
||||
result[apiv1.LabelInstanceType] = *template.Sku.Name
|
||||
result[apiv1.LabelInstanceTypeStable] = *template.Sku.Name
|
||||
result[apiv1.LabelZoneRegion] = strings.ToLower(*template.Location)
|
||||
result[apiv1.LabelTopologyRegion] = strings.ToLower(*template.Location)
|
||||
result[apiv1.LabelInstanceType] = template.SkuName
|
||||
result[apiv1.LabelInstanceTypeStable] = template.SkuName
|
||||
result[apiv1.LabelZoneRegion] = strings.ToLower(template.Location)
|
||||
result[apiv1.LabelTopologyRegion] = strings.ToLower(template.Location)
|
||||
|
||||
if template.Zones != nil && len(*template.Zones) > 0 {
|
||||
failureDomains := make([]string, len(*template.Zones))
|
||||
for k, v := range *template.Zones {
|
||||
failureDomains[k] = strings.ToLower(*template.Location) + "-" + v
|
||||
if len(template.Zones) > 0 {
|
||||
failureDomains := make([]string, len(template.Zones))
|
||||
for k, v := range template.Zones {
|
||||
failureDomains[k] = strings.ToLower(template.Location) + "-" + v
|
||||
}
|
||||
//Picks random zones for Multi-zone nodepool when scaling from zero.
|
||||
//This random zone will not be the same as the zone of the VMSS that is being created, the purpose of creating
|
||||
|
@ -283,7 +429,7 @@ func buildGenericLabels(template compute.VirtualMachineScaleSet, nodeName string
|
|||
return result
|
||||
}
|
||||
|
||||
func extractLabelsFromScaleSet(tags map[string]*string) map[string]string {
|
||||
func extractLabelsFromTags(tags map[string]*string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
for tagName, tagValue := range tags {
|
||||
|
@ -300,7 +446,7 @@ func extractLabelsFromScaleSet(tags map[string]*string) map[string]string {
|
|||
return result
|
||||
}
|
||||
|
||||
func extractTaintsFromScaleSet(tags map[string]*string) []apiv1.Taint {
|
||||
func extractTaintsFromTags(tags map[string]*string) []apiv1.Taint {
|
||||
taints := make([]apiv1.Taint, 0)
|
||||
|
||||
for tagName, tagValue := range tags {
|
||||
|
@ -327,35 +473,61 @@ func extractTaintsFromScaleSet(tags map[string]*string) []apiv1.Taint {
|
|||
return taints
|
||||
}
|
||||
|
||||
// extractTaintsFromSpecString is for nodepool taints
|
||||
// Example of a valid taints string, is the same argument to kubelet's `--register-with-taints`
|
||||
// "dedicated=foo:NoSchedule,group=bar:NoExecute,app=fizz:PreferNoSchedule"
|
||||
func extractTaintsFromSpecString(taintsString string) []apiv1.Taint {
|
||||
taints := make([]apiv1.Taint, 0)
|
||||
dedupMap := make(map[string]interface{})
|
||||
// First split the taints at the separator
|
||||
splits := strings.Split(taintsString, ",")
|
||||
for _, split := range splits {
|
||||
taintSplit := strings.Split(split, "=")
|
||||
if len(taintSplit) != 2 {
|
||||
if dedupMap[split] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
taintKey := taintSplit[0]
|
||||
taintValue := taintSplit[1]
|
||||
|
||||
r, _ := regexp.Compile("(.*):(?:NoSchedule|NoExecute|PreferNoSchedule)")
|
||||
if !r.MatchString(taintValue) {
|
||||
continue
|
||||
dedupMap[split] = struct{}{}
|
||||
valid, taint := constructTaintFromString(split)
|
||||
if valid {
|
||||
taints = append(taints, taint)
|
||||
}
|
||||
}
|
||||
return taints
|
||||
}
|
||||
|
||||
values := strings.SplitN(taintValue, ":", 2)
|
||||
taints = append(taints, apiv1.Taint{
|
||||
Key: taintKey,
|
||||
Value: values[0],
|
||||
Effect: apiv1.TaintEffect(values[1]),
|
||||
})
|
||||
// buildNodeTaintsForVMPool is for VMPool taints, it looks for the taints in the format
|
||||
// []string{zone=dmz:NoSchedule, usage=monitoring:NoSchedule}
|
||||
func buildNodeTaintsForVMPool(taintStrs []string) []apiv1.Taint {
|
||||
taints := make([]apiv1.Taint, 0)
|
||||
for _, taintStr := range taintStrs {
|
||||
valid, taint := constructTaintFromString(taintStr)
|
||||
if valid {
|
||||
taints = append(taints, taint)
|
||||
}
|
||||
}
|
||||
return taints
|
||||
}
|
||||
|
||||
// constructTaintFromString constructs a taint from a string in the format <key>=<value>:<effect>
|
||||
// if the input string is not in the correct format, it returns false and an empty taint
|
||||
func constructTaintFromString(taintString string) (bool, apiv1.Taint) {
|
||||
taintSplit := strings.Split(taintString, "=")
|
||||
if len(taintSplit) != 2 {
|
||||
return false, apiv1.Taint{}
|
||||
}
|
||||
taintKey := taintSplit[0]
|
||||
taintValue := taintSplit[1]
|
||||
|
||||
r, _ := regexp.Compile("(.*):(?:NoSchedule|NoExecute|PreferNoSchedule)")
|
||||
if !r.MatchString(taintValue) {
|
||||
return false, apiv1.Taint{}
|
||||
}
|
||||
|
||||
return taints
|
||||
values := strings.SplitN(taintValue, ":", 2)
|
||||
return true, apiv1.Taint{
|
||||
Key: taintKey,
|
||||
Value: values[0],
|
||||
Effect: apiv1.TaintEffect(values[1]),
|
||||
}
|
||||
}
|
||||
|
||||
func extractAutoscalingOptionsFromScaleSetTags(tags map[string]*string) map[string]string {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
@ -30,7 +31,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestExtractLabelsFromScaleSet(t *testing.T) {
|
||||
func TestExtractLabelsFromTags(t *testing.T) {
|
||||
expectedNodeLabelKey := "zip"
|
||||
expectedNodeLabelValue := "zap"
|
||||
extraNodeLabelValue := "buzz"
|
||||
|
@ -52,14 +53,14 @@ func TestExtractLabelsFromScaleSet(t *testing.T) {
|
|||
fmt.Sprintf("%s%s", nodeLabelTagName, escapedUnderscoreNodeLabelKey): &escapedUnderscoreNodeLabelValue,
|
||||
}
|
||||
|
||||
labels := extractLabelsFromScaleSet(tags)
|
||||
labels := extractLabelsFromTags(tags)
|
||||
assert.Len(t, labels, 3)
|
||||
assert.Equal(t, expectedNodeLabelValue, labels[expectedNodeLabelKey])
|
||||
assert.Equal(t, escapedSlashNodeLabelValue, labels[expectedSlashEscapedNodeLabelKey])
|
||||
assert.Equal(t, escapedUnderscoreNodeLabelValue, labels[expectedUnderscoreEscapedNodeLabelKey])
|
||||
}
|
||||
|
||||
func TestExtractTaintsFromScaleSet(t *testing.T) {
|
||||
func TestExtractTaintsFromTags(t *testing.T) {
|
||||
noScheduleTaintValue := "foo:NoSchedule"
|
||||
noExecuteTaintValue := "bar:NoExecute"
|
||||
preferNoScheduleTaintValue := "fizz:PreferNoSchedule"
|
||||
|
@ -100,7 +101,7 @@ func TestExtractTaintsFromScaleSet(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
taints := extractTaintsFromScaleSet(tags)
|
||||
taints := extractTaintsFromTags(tags)
|
||||
assert.Len(t, taints, 4)
|
||||
assert.Equal(t, makeTaintSet(expectedTaints), makeTaintSet(taints))
|
||||
}
|
||||
|
@ -137,6 +138,11 @@ func TestExtractTaintsFromSpecString(t *testing.T) {
|
|||
Value: "fizz",
|
||||
Effect: apiv1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "dedicated", // duplicate key, should be ignored
|
||||
Value: "foo",
|
||||
Effect: apiv1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
taints := extractTaintsFromSpecString(strings.Join(taintsString, ","))
|
||||
|
@ -176,8 +182,9 @@ func TestTopologyFromScaleSet(t *testing.T) {
|
|||
Location: to.StringPtr("westus"),
|
||||
}
|
||||
expectedZoneValues := []string{"westus-1", "westus-2", "westus-3"}
|
||||
|
||||
labels := buildGenericLabels(testVmss, testNodeName)
|
||||
template, err := buildNodeTemplateFromVMSS(testVmss, map[string]string{}, "")
|
||||
assert.NoError(t, err)
|
||||
labels := buildGenericLabels(template, testNodeName)
|
||||
failureDomain, ok := labels[apiv1.LabelZoneFailureDomain]
|
||||
assert.True(t, ok)
|
||||
topologyZone, ok := labels[apiv1.LabelTopologyZone]
|
||||
|
@ -205,7 +212,9 @@ func TestEmptyTopologyFromScaleSet(t *testing.T) {
|
|||
expectedFailureDomain := "0"
|
||||
expectedTopologyZone := "0"
|
||||
expectedAzureDiskTopology := ""
|
||||
labels := buildGenericLabels(testVmss, testNodeName)
|
||||
template, err := buildNodeTemplateFromVMSS(testVmss, map[string]string{}, "")
|
||||
assert.NoError(t, err)
|
||||
labels := buildGenericLabels(template, testNodeName)
|
||||
|
||||
failureDomain, ok := labels[apiv1.LabelZoneFailureDomain]
|
||||
assert.True(t, ok)
|
||||
|
@ -219,6 +228,61 @@ func TestEmptyTopologyFromScaleSet(t *testing.T) {
|
|||
assert.True(t, ok)
|
||||
assert.Equal(t, expectedAzureDiskTopology, azureDiskTopology)
|
||||
}
|
||||
func TestBuildNodeTemplateFromVMPool(t *testing.T) {
|
||||
agentPoolName := "testpool"
|
||||
location := "eastus"
|
||||
skuName := "Standard_DS2_v2"
|
||||
labelKey := "foo"
|
||||
labelVal := "bar"
|
||||
taintStr := "dedicated=foo:NoSchedule,boo=fizz:PreferNoSchedule,group=bar:NoExecute"
|
||||
|
||||
osType := armcontainerservice.OSTypeLinux
|
||||
osDiskType := armcontainerservice.OSDiskTypeEphemeral
|
||||
zone1 := "1"
|
||||
zone2 := "2"
|
||||
|
||||
vmpool := armcontainerservice.AgentPool{
|
||||
Name: to.StringPtr(agentPoolName),
|
||||
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
|
||||
NodeLabels: map[string]*string{
|
||||
"existing": to.StringPtr("label"),
|
||||
"department": to.StringPtr("engineering"),
|
||||
},
|
||||
NodeTaints: []*string{to.StringPtr("group=bar:NoExecute")},
|
||||
OSType: &osType,
|
||||
OSDiskType: &osDiskType,
|
||||
AvailabilityZones: []*string{&zone1, &zone2},
|
||||
},
|
||||
}
|
||||
|
||||
labelsFromSpec := map[string]string{labelKey: labelVal}
|
||||
taintsFromSpec := taintStr
|
||||
|
||||
template, err := buildNodeTemplateFromVMPool(vmpool, location, skuName, labelsFromSpec, taintsFromSpec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, skuName, template.SkuName)
|
||||
assert.Equal(t, location, template.Location)
|
||||
assert.ElementsMatch(t, []string{zone1, zone2}, template.Zones)
|
||||
assert.Equal(t, "linux", template.InstanceOS)
|
||||
assert.NotNil(t, template.VMPoolNodeTemplate)
|
||||
assert.Equal(t, agentPoolName, template.VMPoolNodeTemplate.AgentPoolName)
|
||||
assert.Equal(t, &osDiskType, template.VMPoolNodeTemplate.OSDiskType)
|
||||
// Labels: should include both from NodeLabels and labelsFromSpec
|
||||
assert.Contains(t, template.VMPoolNodeTemplate.Labels, "existing")
|
||||
assert.Equal(t, "label", *template.VMPoolNodeTemplate.Labels["existing"])
|
||||
assert.Contains(t, template.VMPoolNodeTemplate.Labels, "department")
|
||||
assert.Equal(t, "engineering", *template.VMPoolNodeTemplate.Labels["department"])
|
||||
assert.Contains(t, template.VMPoolNodeTemplate.Labels, labelKey)
|
||||
assert.Equal(t, labelVal, *template.VMPoolNodeTemplate.Labels[labelKey])
|
||||
// Taints: should include both from NodeTaints and taintsFromSpec
|
||||
taintSet := makeTaintSet(template.VMPoolNodeTemplate.Taints)
|
||||
expectedTaints := []apiv1.Taint{
|
||||
{Key: "group", Value: "bar", Effect: apiv1.TaintEffectNoExecute},
|
||||
{Key: "dedicated", Value: "foo", Effect: apiv1.TaintEffectNoSchedule},
|
||||
{Key: "boo", Value: "fizz", Effect: apiv1.TaintEffectPreferNoSchedule},
|
||||
}
|
||||
assert.Equal(t, makeTaintSet(expectedTaints), taintSet)
|
||||
}
|
||||
|
||||
func makeTaintSet(taints []apiv1.Taint) map[apiv1.Taint]bool {
|
||||
set := make(map[apiv1.Taint]bool)
|
||||
|
@ -227,3 +291,91 @@ func makeTaintSet(taints []apiv1.Taint) map[apiv1.Taint]bool {
|
|||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func TestBuildNodeFromTemplateWithLabelPrediction(t *testing.T) {
|
||||
poolName := "testpool"
|
||||
testSkuName := "Standard_DS2_v2"
|
||||
testNodeName := "test-node"
|
||||
|
||||
vmss := compute.VirtualMachineScaleSet{
|
||||
Response: autorest.Response{},
|
||||
Sku: &compute.Sku{Name: &testSkuName},
|
||||
Plan: nil,
|
||||
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
|
||||
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
|
||||
StorageProfile: &compute.VirtualMachineScaleSetStorageProfile{
|
||||
OsDisk: &compute.VirtualMachineScaleSetOSDisk{
|
||||
DiffDiskSettings: nil, // This makes it managed
|
||||
ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{
|
||||
StorageAccountType: compute.StorageAccountTypesPremiumLRS,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Tags: map[string]*string{
|
||||
"poolName": &poolName,
|
||||
},
|
||||
Zones: &[]string{"1", "2"},
|
||||
Location: to.StringPtr("westus"),
|
||||
}
|
||||
|
||||
template, err := buildNodeTemplateFromVMSS(vmss, map[string]string{}, "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
manager := &AzureManager{}
|
||||
node, err := buildNodeFromTemplate(testNodeName, template, manager, false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, node)
|
||||
|
||||
// Verify label prediction labels are added
|
||||
assert.Equal(t, poolName, node.Labels["agentpool"])
|
||||
assert.Equal(t, poolName, node.Labels["kubernetes.azure.com/agentpool"])
|
||||
assert.Equal(t, "managed", node.Labels["storageprofile"])
|
||||
assert.Equal(t, "managed", node.Labels["kubernetes.azure.com/storageprofile"])
|
||||
}
|
||||
|
||||
func TestBuildNodeFromTemplateWithEphemeralStorage(t *testing.T) {
|
||||
poolName := "testpool"
|
||||
testSkuName := "Standard_DS2_v2"
|
||||
testNodeName := "test-node"
|
||||
diskSizeGB := int32(128)
|
||||
|
||||
vmss := compute.VirtualMachineScaleSet{
|
||||
Response: autorest.Response{},
|
||||
Sku: &compute.Sku{Name: &testSkuName},
|
||||
Plan: nil,
|
||||
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
|
||||
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
|
||||
StorageProfile: &compute.VirtualMachineScaleSetStorageProfile{
|
||||
OsDisk: &compute.VirtualMachineScaleSetOSDisk{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
DiffDiskSettings: nil, // This makes it managed
|
||||
ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{
|
||||
StorageAccountType: compute.StorageAccountTypesPremiumLRS,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Tags: map[string]*string{
|
||||
"poolName": &poolName,
|
||||
},
|
||||
Zones: &[]string{"1", "2"},
|
||||
Location: to.StringPtr("westus"),
|
||||
}
|
||||
|
||||
template, err := buildNodeTemplateFromVMSS(vmss, map[string]string{}, "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
manager := &AzureManager{}
|
||||
node, err := buildNodeFromTemplate(testNodeName, template, manager, false, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, node)
|
||||
|
||||
// Verify ephemeral storage is set correctly
|
||||
expectedEphemeralStorage := resource.NewQuantity(int64(diskSizeGB)*1024*1024*1024, resource.DecimalSI)
|
||||
ephemeralStorage, exists := node.Status.Capacity[apiv1.ResourceEphemeralStorage]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, expectedEphemeralStorage.String(), ephemeralStorage.String())
|
||||
}
|
||||
|
|
|
@ -18,152 +18,436 @@ package azure
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// VMsPool is single instance VM pool
|
||||
// this is a placeholder for now, no real implementation
|
||||
type VMsPool struct {
|
||||
// VMPool represents a group of standalone virtual machines (VMs) with a single SKU.
|
||||
// It is part of a mixed-SKU agent pool (an agent pool with type `VirtualMachines`).
|
||||
// Terminology:
|
||||
// - Agent pool: A node pool in an AKS cluster.
|
||||
// - VMs pool: An agent pool of type `VirtualMachines`, which can contain mixed SKUs.
|
||||
// - VMPool: A subset of VMs within a VMs pool that share the same SKU.
|
||||
type VMPool struct {
|
||||
azureRef
|
||||
manager *AzureManager
|
||||
resourceGroup string
|
||||
agentPoolName string // the virtual machines agentpool that this VMPool belongs to
|
||||
sku string // sku of the VM in the pool
|
||||
|
||||
minSize int
|
||||
maxSize int
|
||||
|
||||
curSize int64
|
||||
// sizeMutex sync.Mutex
|
||||
// lastSizeRefresh time.Time
|
||||
}
|
||||
|
||||
// NewVMsPool creates a new VMsPool
|
||||
func NewVMsPool(spec *dynamic.NodeGroupSpec, am *AzureManager) *VMsPool {
|
||||
nodepool := &VMsPool{
|
||||
azureRef: azureRef{
|
||||
Name: spec.Name,
|
||||
},
|
||||
|
||||
manager: am,
|
||||
resourceGroup: am.config.ResourceGroup,
|
||||
|
||||
curSize: -1,
|
||||
minSize: spec.MinSize,
|
||||
maxSize: spec.MaxSize,
|
||||
// NewVMPool creates a new VMPool - a pool of standalone VMs of a single size.
|
||||
func NewVMPool(spec *dynamic.NodeGroupSpec, am *AzureManager, agentPoolName string, sku string) (*VMPool, error) {
|
||||
if am.azClient.agentPoolClient == nil {
|
||||
return nil, fmt.Errorf("agentPoolClient is nil")
|
||||
}
|
||||
|
||||
return nodepool
|
||||
nodepool := &VMPool{
|
||||
azureRef: azureRef{
|
||||
Name: spec.Name, // in format "<agentPoolName>/<sku>"
|
||||
},
|
||||
manager: am,
|
||||
sku: sku,
|
||||
agentPoolName: agentPoolName,
|
||||
minSize: spec.MinSize,
|
||||
maxSize: spec.MaxSize,
|
||||
}
|
||||
return nodepool, nil
|
||||
}
|
||||
|
||||
// MinSize returns the minimum size the cluster is allowed to scaled down
|
||||
// MinSize returns the minimum size the vmPool is allowed to scaled down
|
||||
// to as provided by the node spec in --node parameter.
|
||||
func (agentPool *VMsPool) MinSize() int {
|
||||
return agentPool.minSize
|
||||
func (vmPool *VMPool) MinSize() int {
|
||||
return vmPool.minSize
|
||||
}
|
||||
|
||||
// Exist is always true since we are initialized with an existing agentpool
|
||||
func (agentPool *VMsPool) Exist() bool {
|
||||
// Exist is always true since we are initialized with an existing vmPool
|
||||
func (vmPool *VMPool) Exist() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Create creates the node group on the cloud provider side.
|
||||
func (agentPool *VMsPool) Create() (cloudprovider.NodeGroup, error) {
|
||||
func (vmPool *VMPool) Create() (cloudprovider.NodeGroup, error) {
|
||||
return nil, cloudprovider.ErrAlreadyExist
|
||||
}
|
||||
|
||||
// Delete deletes the node group on the cloud provider side.
|
||||
func (agentPool *VMsPool) Delete() error {
|
||||
func (vmPool *VMPool) Delete() error {
|
||||
return cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
// ForceDeleteNodes deletes nodes from the group regardless of constraints.
|
||||
func (vmPool *VMPool) ForceDeleteNodes(nodes []*apiv1.Node) error {
|
||||
return cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
||||
// Autoprovisioned is always false since we are initialized with an existing agentpool
|
||||
func (agentPool *VMsPool) Autoprovisioned() bool {
|
||||
func (vmPool *VMPool) Autoprovisioned() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetOptions returns NodeGroupAutoscalingOptions that should be used for this particular
|
||||
// NodeGroup. Returning a nil will result in using default options.
|
||||
func (agentPool *VMsPool) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) {
|
||||
// TODO(wenxuan): Implement this method
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
func (vmPool *VMPool) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) {
|
||||
// TODO(wenxuan): implement this method when vmPool can fully support GPU nodepool
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// MaxSize returns the maximum size scale limit provided by --node
|
||||
// parameter to the autoscaler main
|
||||
func (agentPool *VMsPool) MaxSize() int {
|
||||
return agentPool.maxSize
|
||||
func (vmPool *VMPool) MaxSize() int {
|
||||
return vmPool.maxSize
|
||||
}
|
||||
|
||||
// TargetSize returns the current TARGET size of the node group. It is possible that the
|
||||
// number is different from the number of nodes registered in Kubernetes.
|
||||
func (agentPool *VMsPool) TargetSize() (int, error) {
|
||||
// TODO(wenxuan): Implement this method
|
||||
return -1, cloudprovider.ErrNotImplemented
|
||||
// TargetSize returns the current target size of the node group. This value represents
|
||||
// the desired number of nodes in the VMPool, which may differ from the actual number
|
||||
// of nodes currently present.
|
||||
func (vmPool *VMPool) TargetSize() (int, error) {
|
||||
// VMs in the "Deleting" state are not counted towards the target size.
|
||||
size, err := vmPool.getCurSize(skipOption{skipDeleting: true, skipFailed: false})
|
||||
return int(size), err
|
||||
}
|
||||
|
||||
// IncreaseSize increase the size through a PUT AP call. It calculates the expected size
|
||||
// based on a delta provided as parameter
|
||||
func (agentPool *VMsPool) IncreaseSize(delta int) error {
|
||||
// TODO(wenxuan): Implement this method
|
||||
return cloudprovider.ErrNotImplemented
|
||||
// IncreaseSize increases the size of the VMPool by sending a PUT request to update the agent pool.
|
||||
// This method waits until the asynchronous PUT operation completes or the client-side timeout is reached.
|
||||
func (vmPool *VMPool) IncreaseSize(delta int) error {
|
||||
if delta <= 0 {
|
||||
return fmt.Errorf("size increase must be positive, current delta: %d", delta)
|
||||
}
|
||||
|
||||
// Skip VMs in the failed state so that a PUT AP will be triggered to fix the failed VMs.
|
||||
currentSize, err := vmPool.getCurSize(skipOption{skipDeleting: true, skipFailed: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if int(currentSize)+delta > vmPool.MaxSize() {
|
||||
return fmt.Errorf("size-increasing request of %d is bigger than max size %d", int(currentSize)+delta, vmPool.MaxSize())
|
||||
}
|
||||
|
||||
updateCtx, cancel := getContextWithTimeout(vmsAsyncContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
versionedAP, err := vmPool.getAgentpoolFromCache()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get vmPool %s, error: %s", vmPool.agentPoolName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
count := currentSize + int32(delta)
|
||||
requestBody := armcontainerservice.AgentPool{}
|
||||
// self-hosted CAS will be using Manual scale profile
|
||||
if len(versionedAP.Properties.VirtualMachinesProfile.Scale.Manual) > 0 {
|
||||
requestBody = buildRequestBodyForScaleUp(versionedAP, count, vmPool.sku)
|
||||
|
||||
} else { // AKS-managed CAS will use custom header for setting the target count
|
||||
header := make(http.Header)
|
||||
header.Set("Target-Count", fmt.Sprintf("%d", count))
|
||||
updateCtx = policy.WithHTTPHeader(updateCtx, header)
|
||||
}
|
||||
|
||||
defer vmPool.manager.invalidateCache()
|
||||
poller, err := vmPool.manager.azClient.agentPoolClient.BeginCreateOrUpdate(
|
||||
updateCtx,
|
||||
vmPool.manager.config.ClusterResourceGroup,
|
||||
vmPool.manager.config.ClusterName,
|
||||
vmPool.agentPoolName,
|
||||
requestBody, nil)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to scale up agentpool %s in cluster %s for vmPool %s with error: %v",
|
||||
vmPool.agentPoolName, vmPool.manager.config.ClusterName, vmPool.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := poller.PollUntilDone(updateCtx, nil /*default polling interval is 30s*/); err != nil {
|
||||
klog.Errorf("agentPoolClient.BeginCreateOrUpdate for aks cluster %s agentpool %s for scaling up vmPool %s failed with error %s",
|
||||
vmPool.manager.config.ClusterName, vmPool.agentPoolName, vmPool.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("Successfully scaled up agentpool %s in cluster %s for vmPool %s to size %d",
|
||||
vmPool.agentPoolName, vmPool.manager.config.ClusterName, vmPool.Name, count)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteNodes extracts the providerIDs from the node spec and
|
||||
// delete or deallocate the nodes from the agent pool based on the scale down policy.
|
||||
func (agentPool *VMsPool) DeleteNodes(nodes []*apiv1.Node) error {
|
||||
// TODO(wenxuan): Implement this method
|
||||
return cloudprovider.ErrNotImplemented
|
||||
// buildRequestBodyForScaleUp builds the request body for scale up for self-hosted CAS
|
||||
func buildRequestBodyForScaleUp(agentpool armcontainerservice.AgentPool, count int32, vmSku string) armcontainerservice.AgentPool {
|
||||
requestBody := armcontainerservice.AgentPool{
|
||||
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
|
||||
Type: agentpool.Properties.Type,
|
||||
},
|
||||
}
|
||||
|
||||
// the request body must have the same mode as the original agentpool
|
||||
// otherwise the PUT request will fail
|
||||
if agentpool.Properties.Mode != nil &&
|
||||
*agentpool.Properties.Mode == armcontainerservice.AgentPoolModeSystem {
|
||||
systemMode := armcontainerservice.AgentPoolModeSystem
|
||||
requestBody.Properties.Mode = &systemMode
|
||||
}
|
||||
|
||||
// set the count of the matching manual scale profile to the new target value
|
||||
for _, manualProfile := range agentpool.Properties.VirtualMachinesProfile.Scale.Manual {
|
||||
if manualProfile != nil && len(manualProfile.Sizes) == 1 &&
|
||||
strings.EqualFold(to.String(manualProfile.Sizes[0]), vmSku) {
|
||||
klog.V(5).Infof("Found matching manual profile for VM SKU: %s, updating count to: %d", vmSku, count)
|
||||
manualProfile.Count = to.Int32Ptr(count)
|
||||
requestBody.Properties.VirtualMachinesProfile = agentpool.Properties.VirtualMachinesProfile
|
||||
break
|
||||
}
|
||||
}
|
||||
return requestBody
|
||||
}
|
||||
|
||||
// ForceDeleteNodes deletes nodes from the group regardless of constraints.
|
||||
func (agentPool *VMsPool) ForceDeleteNodes(nodes []*apiv1.Node) error {
|
||||
return cloudprovider.ErrNotImplemented
|
||||
// DeleteNodes removes the specified nodes from the VMPool by extracting their providerIDs
|
||||
// and performing the appropriate delete or deallocate operation based on the agent pool's
|
||||
// scale-down policy. This method waits for the asynchronous delete operation to complete,
|
||||
// with a client-side timeout.
|
||||
func (vmPool *VMPool) DeleteNodes(nodes []*apiv1.Node) error {
|
||||
// Ensure we don't scale below the minimum size by excluding VMs in the "Deleting" state.
|
||||
currentSize, err := vmPool.getCurSize(skipOption{skipDeleting: true, skipFailed: false})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to retrieve current size: %w", err)
|
||||
}
|
||||
|
||||
if int(currentSize) <= vmPool.MinSize() {
|
||||
return fmt.Errorf("cannot delete nodes as minimum size of %d has been reached", vmPool.MinSize())
|
||||
}
|
||||
|
||||
providerIDs, err := vmPool.getProviderIDsForNodes(nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve provider IDs for nodes: %w", err)
|
||||
}
|
||||
|
||||
if len(providerIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Deleting nodes from vmPool %s: %v", vmPool.Name, providerIDs)
|
||||
|
||||
machineNames := make([]*string, len(providerIDs))
|
||||
for i, providerID := range providerIDs {
|
||||
// extract the machine name from the providerID by splitting the providerID by '/' and get the last element
|
||||
// The providerID look like this:
|
||||
// "azure:///subscriptions/0000000-0000-0000-0000-00000000000/resourceGroups/mc_myrg_mycluster_eastus/providers/Microsoft.Compute/virtualMachines/aks-mypool-12345678-vms0"
|
||||
machineName, err := resourceName(providerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
machineNames[i] = &machineName
|
||||
}
|
||||
|
||||
requestBody := armcontainerservice.AgentPoolDeleteMachinesParameter{
|
||||
MachineNames: machineNames,
|
||||
}
|
||||
|
||||
deleteCtx, cancel := getContextWithTimeout(vmsAsyncContextTimeout)
|
||||
defer cancel()
|
||||
defer vmPool.manager.invalidateCache()
|
||||
|
||||
poller, err := vmPool.manager.azClient.agentPoolClient.BeginDeleteMachines(
|
||||
deleteCtx,
|
||||
vmPool.manager.config.ClusterResourceGroup,
|
||||
vmPool.manager.config.ClusterName,
|
||||
vmPool.agentPoolName,
|
||||
requestBody, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete nodes from agentpool %s in cluster %s with error: %v",
|
||||
vmPool.agentPoolName, vmPool.manager.config.ClusterName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := poller.PollUntilDone(deleteCtx, nil); err != nil {
|
||||
klog.Errorf("agentPoolClient.BeginDeleteMachines for aks cluster %s for scaling down vmPool %s failed with error %s",
|
||||
vmPool.manager.config.ClusterName, vmPool.agentPoolName, err)
|
||||
return err
|
||||
}
|
||||
klog.Infof("Successfully deleted %d nodes from vmPool %s", len(providerIDs), vmPool.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vmPool *VMPool) getProviderIDsForNodes(nodes []*apiv1.Node) ([]string, error) {
|
||||
var providerIDs []string
|
||||
for _, node := range nodes {
|
||||
belongs, err := vmPool.Belongs(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if node %s belongs to vmPool %s: %w", node.Name, vmPool.Name, err)
|
||||
}
|
||||
if !belongs {
|
||||
return nil, fmt.Errorf("node %s does not belong to vmPool %s", node.Name, vmPool.Name)
|
||||
}
|
||||
providerIDs = append(providerIDs, node.Spec.ProviderID)
|
||||
}
|
||||
return providerIDs, nil
|
||||
}
|
||||
|
||||
// Belongs returns true if the given k8s node belongs to this vms nodepool.
|
||||
func (vmPool *VMPool) Belongs(node *apiv1.Node) (bool, error) {
|
||||
klog.V(6).Infof("Check if node belongs to this vmPool:%s, node:%v\n", vmPool, node)
|
||||
|
||||
ref := &azureRef{
|
||||
Name: node.Spec.ProviderID,
|
||||
}
|
||||
|
||||
nodeGroup, err := vmPool.manager.GetNodeGroupForInstance(ref)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if nodeGroup == nil {
|
||||
return false, fmt.Errorf("%s doesn't belong to a known node group", node.Name)
|
||||
}
|
||||
if !strings.EqualFold(nodeGroup.Id(), vmPool.Id()) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DecreaseTargetSize decreases the target size of the node group.
|
||||
func (agentPool *VMsPool) DecreaseTargetSize(delta int) error {
|
||||
// TODO(wenxuan): Implement this method
|
||||
return cloudprovider.ErrNotImplemented
|
||||
func (vmPool *VMPool) DecreaseTargetSize(delta int) error {
|
||||
// The TargetSize of a VMPool is automatically adjusted after node deletions.
|
||||
// This method is invoked in scenarios such as (see details in clusterstate.go):
|
||||
// - len(readiness.Registered) > acceptableRange.CurrentTarget
|
||||
// - len(readiness.Registered) < acceptableRange.CurrentTarget - unregisteredNodes
|
||||
|
||||
// For VMPool, this method should not be called because:
|
||||
// CurrentTarget = len(readiness.Registered) + unregisteredNodes - len(nodesInDeletingState)
|
||||
// Here, nodesInDeletingState is a subset of unregisteredNodes,
|
||||
// ensuring len(readiness.Registered) is always within the acceptable range.
|
||||
|
||||
// here we just invalidate the cache to avoid any potential bugs
|
||||
vmPool.manager.invalidateCache()
|
||||
klog.Warningf("DecreaseTargetSize called for VMPool %s, but it should not be used, invalidating cache", vmPool.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Id returns the name of the agentPool
|
||||
func (agentPool *VMsPool) Id() string {
|
||||
return agentPool.azureRef.Name
|
||||
// Id returns the name of the agentPool, it is in the format of <agentpoolname>/<sku>
|
||||
// e.g. mypool1/Standard_D2s_v3
|
||||
func (vmPool *VMPool) Id() string {
|
||||
return vmPool.azureRef.Name
|
||||
}
|
||||
|
||||
// Debug returns a string with basic details of the agentPool
|
||||
func (agentPool *VMsPool) Debug() string {
|
||||
return fmt.Sprintf("%s (%d:%d)", agentPool.Id(), agentPool.MinSize(), agentPool.MaxSize())
|
||||
func (vmPool *VMPool) Debug() string {
|
||||
return fmt.Sprintf("%s (%d:%d)", vmPool.Id(), vmPool.MinSize(), vmPool.MaxSize())
|
||||
}
|
||||
|
||||
func (agentPool *VMsPool) getVMsFromCache() ([]compute.VirtualMachine, error) {
|
||||
// vmsPoolMap is a map of agent pool name to the list of virtual machines
|
||||
vmsPoolMap := agentPool.manager.azureCache.getVirtualMachines()
|
||||
if _, ok := vmsPoolMap[agentPool.Name]; !ok {
|
||||
return []compute.VirtualMachine{}, fmt.Errorf("vms pool %s not found in the cache", agentPool.Name)
|
||||
func isSpotAgentPool(ap armcontainerservice.AgentPool) bool {
|
||||
if ap.Properties != nil && ap.Properties.ScaleSetPriority != nil {
|
||||
return strings.EqualFold(string(*ap.Properties.ScaleSetPriority), "Spot")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// skipOption is used to determine whether to skip VMs in certain states when calculating the current size of the vmPool.
|
||||
type skipOption struct {
|
||||
// skipDeleting indicates whether to skip VMs in the "Deleting" state.
|
||||
skipDeleting bool
|
||||
// skipFailed indicates whether to skip VMs in the "Failed" state.
|
||||
skipFailed bool
|
||||
}
|
||||
|
||||
// getCurSize determines the current count of VMs in the vmPool, including unregistered ones.
|
||||
// The source of truth depends on the pool type (spot or non-spot).
|
||||
func (vmPool *VMPool) getCurSize(op skipOption) (int32, error) {
|
||||
agentPool, err := vmPool.getAgentpoolFromCache()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to retrieve agent pool %s from cache: %v", vmPool.agentPoolName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return vmsPoolMap[agentPool.Name], nil
|
||||
// spot pool size is retrieved directly from Azure instead of the cache
|
||||
if isSpotAgentPool(agentPool) {
|
||||
return vmPool.getSpotPoolSize()
|
||||
}
|
||||
|
||||
// non-spot pool size is retrieved from the cache
|
||||
vms, err := vmPool.getVMsFromCache(op)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get VMs from cache for agentpool %s with error: %v", vmPool.agentPoolName, err)
|
||||
return -1, err
|
||||
}
|
||||
return int32(len(vms)), nil
|
||||
}
|
||||
|
||||
// getSpotPoolSize retrieves the current size of a spot agent pool directly from Azure.
|
||||
func (vmPool *VMPool) getSpotPoolSize() (int32, error) {
|
||||
ap, err := vmPool.getAgentpoolFromAzure()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get agentpool %s from Azure with error: %v", vmPool.agentPoolName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if ap.Properties != nil {
|
||||
// the VirtualMachineNodesStatus returned by AKS-RP is constructed from the vm list returned from CRP.
|
||||
// it only contains VMs in the running state.
|
||||
for _, status := range ap.Properties.VirtualMachineNodesStatus {
|
||||
if status != nil {
|
||||
if strings.EqualFold(to.String(status.Size), vmPool.sku) {
|
||||
return to.Int32(status.Count), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("failed to get the size of spot agentpool %s", vmPool.agentPoolName)
|
||||
}
|
||||
|
||||
// getVMsFromCache retrieves the list of virtual machines in this VMPool.
|
||||
// If excludeDeleting is true, it skips VMs in the "Deleting" state.
|
||||
// https://learn.microsoft.com/en-us/azure/virtual-machines/states-billing#provisioning-states
|
||||
func (vmPool *VMPool) getVMsFromCache(op skipOption) ([]compute.VirtualMachine, error) {
|
||||
vmsMap := vmPool.manager.azureCache.getVirtualMachines()
|
||||
var filteredVMs []compute.VirtualMachine
|
||||
|
||||
for _, vm := range vmsMap[vmPool.agentPoolName] {
|
||||
if vm.VirtualMachineProperties == nil ||
|
||||
vm.VirtualMachineProperties.HardwareProfile == nil ||
|
||||
!strings.EqualFold(string(vm.HardwareProfile.VMSize), vmPool.sku) {
|
||||
continue
|
||||
}
|
||||
|
||||
if op.skipDeleting && strings.Contains(to.String(vm.VirtualMachineProperties.ProvisioningState), "Deleting") {
|
||||
klog.V(4).Infof("Skipping VM %s in deleting state", to.String(vm.ID))
|
||||
continue
|
||||
}
|
||||
|
||||
if op.skipFailed && strings.Contains(to.String(vm.VirtualMachineProperties.ProvisioningState), "Failed") {
|
||||
klog.V(4).Infof("Skipping VM %s in failed state", to.String(vm.ID))
|
||||
continue
|
||||
}
|
||||
|
||||
filteredVMs = append(filteredVMs, vm)
|
||||
}
|
||||
return filteredVMs, nil
|
||||
}
|
||||
|
||||
// Nodes returns the list of nodes in the vms agentPool.
|
||||
func (agentPool *VMsPool) Nodes() ([]cloudprovider.Instance, error) {
|
||||
vms, err := agentPool.getVMsFromCache()
|
||||
func (vmPool *VMPool) Nodes() ([]cloudprovider.Instance, error) {
|
||||
vms, err := vmPool.getVMsFromCache(skipOption{}) // no skip option, get all VMs
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes := make([]cloudprovider.Instance, 0, len(vms))
|
||||
for _, vm := range vms {
|
||||
if len(*vm.ID) == 0 {
|
||||
if vm.ID == nil || len(*vm.ID) == 0 {
|
||||
continue
|
||||
}
|
||||
resourceID, err := convertResourceGroupNameToLower("azure://" + *vm.ID)
|
||||
resourceID, err := convertResourceGroupNameToLower("azure://" + to.String(vm.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -173,12 +457,53 @@ func (agentPool *VMsPool) Nodes() ([]cloudprovider.Instance, error) {
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// TemplateNodeInfo is not implemented.
|
||||
func (agentPool *VMsPool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
return nil, cloudprovider.ErrNotImplemented
|
||||
// TemplateNodeInfo returns a NodeInfo object that can be used to create a new node in the vmPool.
|
||||
func (vmPool *VMPool) TemplateNodeInfo() (*framework.NodeInfo, error) {
|
||||
ap, err := vmPool.getAgentpoolFromCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputLabels := map[string]string{}
|
||||
inputTaints := ""
|
||||
template, err := buildNodeTemplateFromVMPool(ap, vmPool.manager.config.Location, vmPool.sku, inputLabels, inputTaints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node, err := buildNodeFromTemplate(vmPool.agentPoolName, template, vmPool.manager, vmPool.manager.config.EnableDynamicInstanceList, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo := framework.NewNodeInfo(node, nil, &framework.PodInfo{Pod: cloudprovider.BuildKubeProxy(vmPool.agentPoolName)})
|
||||
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
func (vmPool *VMPool) getAgentpoolFromCache() (armcontainerservice.AgentPool, error) {
|
||||
vmsPoolMap := vmPool.manager.azureCache.getVMsPoolMap()
|
||||
if _, exists := vmsPoolMap[vmPool.agentPoolName]; !exists {
|
||||
return armcontainerservice.AgentPool{}, fmt.Errorf("VMs agent pool %s not found in cache", vmPool.agentPoolName)
|
||||
}
|
||||
return vmsPoolMap[vmPool.agentPoolName], nil
|
||||
}
|
||||
|
||||
// getAgentpoolFromAzure returns the AKS agentpool from Azure
|
||||
func (vmPool *VMPool) getAgentpoolFromAzure() (armcontainerservice.AgentPool, error) {
|
||||
ctx, cancel := getContextWithTimeout(vmsContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := vmPool.manager.azClient.agentPoolClient.Get(
|
||||
ctx,
|
||||
vmPool.manager.config.ClusterResourceGroup,
|
||||
vmPool.manager.config.ClusterName,
|
||||
vmPool.agentPoolName, nil)
|
||||
if err != nil {
|
||||
return resp.AgentPool, fmt.Errorf("failed to get agentpool %s in cluster %s with error: %v",
|
||||
vmPool.agentPoolName, vmPool.manager.config.ClusterName, err)
|
||||
}
|
||||
return resp.AgentPool, nil
|
||||
}
|
||||
|
||||
// AtomicIncreaseSize is not implemented.
|
||||
func (agentPool *VMsPool) AtomicIncreaseSize(delta int) error {
|
||||
func (vmPool *VMPool) AtomicIncreaseSize(delta int) error {
|
||||
return cloudprovider.ErrNotImplemented
|
||||
}
|
||||
|
|
|
@ -17,39 +17,64 @@ limitations under the License.
|
|||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config"
|
||||
|
||||
"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
|
||||
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient"
|
||||
)
|
||||
|
||||
func newTestVMsPool(manager *AzureManager, name string) *VMsPool {
|
||||
return &VMsPool{
|
||||
const (
|
||||
vmSku = "Standard_D2_v2"
|
||||
vmsAgentPoolName = "test-vms-pool"
|
||||
vmsNodeGroupName = vmsAgentPoolName + "/" + vmSku
|
||||
fakeVMsNodeName = "aks-" + vmsAgentPoolName + "-13222729-vms%d"
|
||||
fakeVMsPoolVMID = "/subscriptions/test-subscription-id/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/" + fakeVMsNodeName
|
||||
)
|
||||
|
||||
func newTestVMsPool(manager *AzureManager) *VMPool {
|
||||
return &VMPool{
|
||||
azureRef: azureRef{
|
||||
Name: name,
|
||||
Name: vmsNodeGroupName,
|
||||
},
|
||||
manager: manager,
|
||||
minSize: 3,
|
||||
maxSize: 10,
|
||||
manager: manager,
|
||||
minSize: 3,
|
||||
maxSize: 10,
|
||||
agentPoolName: vmsAgentPoolName,
|
||||
sku: vmSku,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
fakeVMsPoolVMID = "/subscriptions/test-subscription-id/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/%d"
|
||||
)
|
||||
|
||||
func newTestVMsPoolVMList(count int) []compute.VirtualMachine {
|
||||
var vmList []compute.VirtualMachine
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
vm := compute.VirtualMachine{
|
||||
ID: to.StringPtr(fmt.Sprintf(fakeVMsPoolVMID, i)),
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
VMID: to.StringPtr(fmt.Sprintf("123E4567-E89B-12D3-A456-426655440000-%d", i)),
|
||||
HardwareProfile: &compute.HardwareProfile{
|
||||
VMSize: compute.VirtualMachineSizeTypes(vmSku),
|
||||
},
|
||||
ProvisioningState: to.StringPtr("Succeeded"),
|
||||
},
|
||||
Tags: map[string]*string{
|
||||
agentpoolTypeTag: to.StringPtr("VirtualMachines"),
|
||||
agentpoolNameTag: to.StringPtr("test-vms-pool"),
|
||||
agentpoolNameTag: to.StringPtr(vmsAgentPoolName),
|
||||
},
|
||||
}
|
||||
vmList = append(vmList, vm)
|
||||
|
@ -57,11 +82,486 @@ func newTestVMsPoolVMList(count int) []compute.VirtualMachine {
|
|||
return vmList
|
||||
}
|
||||
|
||||
func newVMsNode(vmID int64) *apiv1.Node {
|
||||
node := &apiv1.Node{
|
||||
func newVMsNode(vmIdx int64) *apiv1.Node {
|
||||
return &apiv1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(fakeVMsNodeName, vmIdx),
|
||||
},
|
||||
Spec: apiv1.NodeSpec{
|
||||
ProviderID: "azure://" + fmt.Sprintf(fakeVMsPoolVMID, vmID),
|
||||
ProviderID: "azure://" + fmt.Sprintf(fakeVMsPoolVMID, vmIdx),
|
||||
},
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
func getTestVMsAgentPool(isSystemPool bool) armcontainerservice.AgentPool {
|
||||
mode := armcontainerservice.AgentPoolModeUser
|
||||
if isSystemPool {
|
||||
mode = armcontainerservice.AgentPoolModeSystem
|
||||
}
|
||||
vmsPoolType := armcontainerservice.AgentPoolTypeVirtualMachines
|
||||
return armcontainerservice.AgentPool{
|
||||
Name: to.StringPtr(vmsAgentPoolName),
|
||||
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
|
||||
Type: &vmsPoolType,
|
||||
Mode: &mode,
|
||||
VirtualMachinesProfile: &armcontainerservice.VirtualMachinesProfile{
|
||||
Scale: &armcontainerservice.ScaleProfile{
|
||||
Manual: []*armcontainerservice.ManualScaleProfile{
|
||||
{
|
||||
Count: to.Int32Ptr(3),
|
||||
Sizes: []*string{to.StringPtr(vmSku)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualMachineNodesStatus: []*armcontainerservice.VirtualMachineNodes{
|
||||
{
|
||||
Count: to.Int32Ptr(3),
|
||||
Size: to.StringPtr(vmSku),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVMsPool(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
manager := newTestAzureManager(t)
|
||||
manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
manager.config.ResourceGroup = "MC_rg"
|
||||
manager.config.ClusterResourceGroup = "rg"
|
||||
manager.config.ClusterName = "mycluster"
|
||||
|
||||
spec := &dynamic.NodeGroupSpec{
|
||||
Name: vmsAgentPoolName,
|
||||
MinSize: 1,
|
||||
MaxSize: 10,
|
||||
}
|
||||
|
||||
ap, err := NewVMPool(spec, manager, vmsAgentPoolName, vmSku)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, vmsAgentPoolName, ap.azureRef.Name)
|
||||
assert.Equal(t, 1, ap.minSize)
|
||||
assert.Equal(t, 10, ap.maxSize)
|
||||
}
|
||||
|
||||
func TestMinSize(t *testing.T) {
|
||||
agentPool := &VMPool{
|
||||
minSize: 1,
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, agentPool.MinSize())
|
||||
}
|
||||
|
||||
func TestExist(t *testing.T) {
|
||||
agentPool := &VMPool{}
|
||||
|
||||
assert.True(t, agentPool.Exist())
|
||||
}
|
||||
func TestCreate(t *testing.T) {
|
||||
agentPool := &VMPool{}
|
||||
|
||||
nodeGroup, err := agentPool.Create()
|
||||
assert.Nil(t, nodeGroup)
|
||||
assert.Equal(t, cloudprovider.ErrAlreadyExist, err)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
agentPool := &VMPool{}
|
||||
|
||||
err := agentPool.Delete()
|
||||
assert.Equal(t, cloudprovider.ErrNotImplemented, err)
|
||||
}
|
||||
|
||||
func TestAutoprovisioned(t *testing.T) {
|
||||
agentPool := &VMPool{}
|
||||
|
||||
assert.False(t, agentPool.Autoprovisioned())
|
||||
}
|
||||
|
||||
func TestGetOptions(t *testing.T) {
|
||||
agentPool := &VMPool{}
|
||||
defaults := config.NodeGroupAutoscalingOptions{}
|
||||
|
||||
options, err := agentPool.GetOptions(defaults)
|
||||
assert.Nil(t, options)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
func TestMaxSize(t *testing.T) {
|
||||
agentPool := &VMPool{
|
||||
maxSize: 10,
|
||||
}
|
||||
|
||||
assert.Equal(t, 10, agentPool.MaxSize())
|
||||
}
|
||||
|
||||
func TestDecreaseTargetSize(t *testing.T) {
|
||||
agentPool := newTestVMsPool(newTestAzureManager(t))
|
||||
|
||||
err := agentPool.DecreaseTargetSize(1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestId(t *testing.T) {
|
||||
agentPool := &VMPool{
|
||||
azureRef: azureRef{
|
||||
Name: "test-id",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, "test-id", agentPool.Id())
|
||||
}
|
||||
|
||||
func TestDebug(t *testing.T) {
|
||||
agentPool := &VMPool{
|
||||
azureRef: azureRef{
|
||||
Name: "test-debug",
|
||||
},
|
||||
minSize: 1,
|
||||
maxSize: 5,
|
||||
}
|
||||
|
||||
expectedDebugString := "test-debug (1:5)"
|
||||
assert.Equal(t, expectedDebugString, agentPool.Debug())
|
||||
}
|
||||
func TestTemplateNodeInfo(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ap := newTestVMsPool(newTestAzureManager(t))
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).
|
||||
Return(fakeAPListPager)
|
||||
|
||||
ac, err := newAzureCache(ap.manager.azClient, refreshInterval, *ap.manager.config)
|
||||
assert.NoError(t, err)
|
||||
ap.manager.azureCache = ac
|
||||
|
||||
nodeInfo, err := ap.TemplateNodeInfo()
|
||||
assert.NotNil(t, nodeInfo)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestAtomicIncreaseSize(t *testing.T) {
|
||||
agentPool := &VMPool{}
|
||||
|
||||
err := agentPool.AtomicIncreaseSize(1)
|
||||
assert.Equal(t, cloudprovider.ErrNotImplemented, err)
|
||||
}
|
||||
|
||||
func TestGetVMsFromCache(t *testing.T) {
|
||||
manager := &AzureManager{
|
||||
azureCache: &azureCache{
|
||||
virtualMachines: make(map[string][]compute.VirtualMachine),
|
||||
vmsPoolMap: make(map[string]armcontainerservice.AgentPool),
|
||||
},
|
||||
}
|
||||
agentPool := &VMPool{
|
||||
manager: manager,
|
||||
agentPoolName: vmsAgentPoolName,
|
||||
sku: vmSku,
|
||||
}
|
||||
|
||||
// Test case 1 - when the vms pool is not found in the cache
|
||||
vms, err := agentPool.getVMsFromCache(skipOption{})
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, vms, 0)
|
||||
|
||||
// Test case 2 - when the vms pool is found in the cache but has no VMs
|
||||
manager.azureCache.virtualMachines[vmsAgentPoolName] = []compute.VirtualMachine{}
|
||||
vms, err = agentPool.getVMsFromCache(skipOption{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, vms, 0)
|
||||
|
||||
// Test case 3 - when the vms pool is found in the cache and has VMs
|
||||
manager.azureCache.virtualMachines[vmsAgentPoolName] = newTestVMsPoolVMList(3)
|
||||
vms, err = agentPool.getVMsFromCache(skipOption{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, vms, 3)
|
||||
|
||||
// Test case 4 - should skip failed VMs
|
||||
vmList := newTestVMsPoolVMList(3)
|
||||
vmList[0].VirtualMachineProperties.ProvisioningState = to.StringPtr("Failed")
|
||||
manager.azureCache.virtualMachines[vmsAgentPoolName] = vmList
|
||||
vms, err = agentPool.getVMsFromCache(skipOption{skipFailed: true})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, vms, 2)
|
||||
|
||||
// Test case 5 - should skip deleting VMs
|
||||
vmList = newTestVMsPoolVMList(3)
|
||||
vmList[0].VirtualMachineProperties.ProvisioningState = to.StringPtr("Deleting")
|
||||
manager.azureCache.virtualMachines[vmsAgentPoolName] = vmList
|
||||
vms, err = agentPool.getVMsFromCache(skipOption{skipDeleting: true})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, vms, 2)
|
||||
|
||||
// Test case 6 - should not skip deleting VMs
|
||||
vmList = newTestVMsPoolVMList(3)
|
||||
vmList[0].VirtualMachineProperties.ProvisioningState = to.StringPtr("Deleting")
|
||||
manager.azureCache.virtualMachines[vmsAgentPoolName] = vmList
|
||||
vms, err = agentPool.getVMsFromCache(skipOption{skipFailed: true})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, vms, 3)
|
||||
|
||||
// Test case 7 - when the vms pool is found in the cache and has VMs with no name
|
||||
manager.azureCache.virtualMachines[vmsAgentPoolName] = newTestVMsPoolVMList(3)
|
||||
agentPool.agentPoolName = ""
|
||||
vms, err = agentPool.getVMsFromCache(skipOption{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, vms, 0)
|
||||
}
|
||||
|
||||
func TestGetVMsFromCacheForVMsPool(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ap := newTestVMsPool(newTestAzureManager(t))
|
||||
|
||||
expectedVMs := newTestVMsPoolVMList(2)
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
ap.manager.azClient.virtualMachinesClient = mockVMClient
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil)
|
||||
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).
|
||||
Return(fakeAPListPager)
|
||||
|
||||
ac, err := newAzureCache(ap.manager.azClient, refreshInterval, *ap.manager.config)
|
||||
assert.NoError(t, err)
|
||||
ac.enableVMsAgentPool = true
|
||||
ap.manager.azureCache = ac
|
||||
|
||||
vms, err := ap.getVMsFromCache(skipOption{})
|
||||
assert.Equal(t, 2, len(vms))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNodes(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ap := newTestVMsPool(newTestAzureManager(t))
|
||||
expectedVMs := newTestVMsPoolVMList(2)
|
||||
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
ap.manager.azClient.virtualMachinesClient = mockVMClient
|
||||
mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil)
|
||||
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).
|
||||
Return(fakeAPListPager)
|
||||
|
||||
ac, err := newAzureCache(ap.manager.azClient, refreshInterval, *ap.manager.config)
|
||||
assert.NoError(t, err)
|
||||
ap.manager.azureCache = ac
|
||||
|
||||
vms, err := ap.Nodes()
|
||||
assert.Equal(t, 2, len(vms))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetCurSizeForVMsPool(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ap := newTestVMsPool(newTestAzureManager(t))
|
||||
expectedVMs := newTestVMsPoolVMList(3)
|
||||
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
ap.manager.azClient.virtualMachinesClient = mockVMClient
|
||||
mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil)
|
||||
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).
|
||||
Return(fakeAPListPager)
|
||||
|
||||
ac, err := newAzureCache(ap.manager.azClient, refreshInterval, *ap.manager.config)
|
||||
assert.NoError(t, err)
|
||||
ap.manager.azureCache = ac
|
||||
|
||||
curSize, err := ap.getCurSize(skipOption{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int32(3), curSize)
|
||||
}
|
||||
|
||||
func TestVMsPoolIncreaseSize(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
manager := newTestAzureManager(t)
|
||||
|
||||
ap := newTestVMsPool(manager)
|
||||
expectedVMs := newTestVMsPoolVMList(3)
|
||||
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
ap.manager.azClient.virtualMachinesClient = mockVMClient
|
||||
mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil)
|
||||
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).
|
||||
Return(fakeAPListPager)
|
||||
|
||||
ac, err := newAzureCache(ap.manager.azClient, refreshInterval, *ap.manager.config)
|
||||
assert.NoError(t, err)
|
||||
ap.manager.azureCache = ac
|
||||
|
||||
// failure case 1
|
||||
err1 := ap.IncreaseSize(-1)
|
||||
expectedErr := fmt.Errorf("size increase must be positive, current delta: -1")
|
||||
assert.Equal(t, expectedErr, err1)
|
||||
|
||||
// failure case 2
|
||||
err2 := ap.IncreaseSize(8)
|
||||
expectedErr = fmt.Errorf("size-increasing request of 11 is bigger than max size 10")
|
||||
assert.Equal(t, expectedErr, err2)
|
||||
|
||||
// success case 3
|
||||
resp := &http.Response{
|
||||
Header: map[string][]string{
|
||||
"Fake-Poller-Status": {"Done"},
|
||||
},
|
||||
}
|
||||
|
||||
fakePoller, pollerErr := runtime.NewPoller(resp, runtime.Pipeline{},
|
||||
&runtime.NewPollerOptions[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse]{
|
||||
Handler: &fakehandler[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse]{},
|
||||
})
|
||||
|
||||
assert.NoError(t, pollerErr)
|
||||
|
||||
mockAgentpoolclient.EXPECT().BeginCreateOrUpdate(
|
||||
gomock.Any(), manager.config.ClusterResourceGroup,
|
||||
manager.config.ClusterName,
|
||||
vmsAgentPoolName,
|
||||
gomock.Any(), gomock.Any()).Return(fakePoller, nil)
|
||||
|
||||
err3 := ap.IncreaseSize(1)
|
||||
assert.NoError(t, err3)
|
||||
}
|
||||
|
||||
func TestDeleteVMsPoolNodes_Failed(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
ap := newTestVMsPool(newTestAzureManager(t))
|
||||
node := newVMsNode(0)
|
||||
|
||||
expectedVMs := newTestVMsPoolVMList(3)
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
ap.manager.azClient.virtualMachinesClient = mockVMClient
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).Return(fakeAPListPager)
|
||||
mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil)
|
||||
|
||||
ap.manager.azureCache.enableVMsAgentPool = true
|
||||
registered := ap.manager.RegisterNodeGroup(ap)
|
||||
assert.True(t, registered)
|
||||
|
||||
ap.manager.explicitlyConfigured[vmsNodeGroupName] = true
|
||||
ap.manager.forceRefresh()
|
||||
|
||||
// failure case
|
||||
deleteErr := ap.DeleteNodes([]*apiv1.Node{node})
|
||||
assert.Error(t, deleteErr)
|
||||
assert.Contains(t, deleteErr.Error(), "cannot delete nodes as minimum size of 3 has been reached")
|
||||
}
|
||||
|
||||
func TestDeleteVMsPoolNodes_Success(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
ap := newTestVMsPool(newTestAzureManager(t))
|
||||
|
||||
expectedVMs := newTestVMsPoolVMList(5)
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
ap.manager.azClient.virtualMachinesClient = mockVMClient
|
||||
ap.manager.config.EnableVMsAgentPool = true
|
||||
mockAgentpoolclient := NewMockAgentPoolsClient(ctrl)
|
||||
agentpool := getTestVMsAgentPool(false)
|
||||
ap.manager.azClient.agentPoolClient = mockAgentpoolclient
|
||||
fakeAPListPager := getFakeAgentpoolListPager(&agentpool)
|
||||
mockAgentpoolclient.EXPECT().NewListPager(gomock.Any(), gomock.Any(), nil).Return(fakeAPListPager)
|
||||
mockVMClient.EXPECT().List(gomock.Any(), ap.manager.config.ResourceGroup).Return(expectedVMs, nil)
|
||||
|
||||
ap.manager.azureCache.enableVMsAgentPool = true
|
||||
registered := ap.manager.RegisterNodeGroup(ap)
|
||||
assert.True(t, registered)
|
||||
|
||||
ap.manager.explicitlyConfigured[vmsNodeGroupName] = true
|
||||
ap.manager.forceRefresh()
|
||||
|
||||
// success case
|
||||
resp := &http.Response{
|
||||
Header: map[string][]string{
|
||||
"Fake-Poller-Status": {"Done"},
|
||||
},
|
||||
}
|
||||
fakePoller, err := runtime.NewPoller(resp, runtime.Pipeline{},
|
||||
&runtime.NewPollerOptions[armcontainerservice.AgentPoolsClientDeleteMachinesResponse]{
|
||||
Handler: &fakehandler[armcontainerservice.AgentPoolsClientDeleteMachinesResponse]{},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
mockAgentpoolclient.EXPECT().BeginDeleteMachines(
|
||||
gomock.Any(), ap.manager.config.ClusterResourceGroup,
|
||||
ap.manager.config.ClusterName,
|
||||
vmsAgentPoolName,
|
||||
gomock.Any(), gomock.Any()).Return(fakePoller, nil)
|
||||
node := newVMsNode(0)
|
||||
derr := ap.DeleteNodes([]*apiv1.Node{node})
|
||||
assert.NoError(t, derr)
|
||||
}
|
||||
|
||||
type fakehandler[T any] struct{}
|
||||
|
||||
func (f *fakehandler[T]) Done() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *fakehandler[T]) Poll(ctx context.Context) (*http.Response, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakehandler[T]) Result(ctx context.Context, out *T) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFakeAgentpoolListPager(agentpool ...*armcontainerservice.AgentPool) *runtime.Pager[armcontainerservice.AgentPoolsClientListResponse] {
|
||||
fakeFetcher := func(ctx context.Context, response *armcontainerservice.AgentPoolsClientListResponse) (armcontainerservice.AgentPoolsClientListResponse, error) {
|
||||
return armcontainerservice.AgentPoolsClientListResponse{
|
||||
AgentPoolListResult: armcontainerservice.AgentPoolListResult{
|
||||
Value: agentpool,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return runtime.NewPager(runtime.PagingHandler[armcontainerservice.AgentPoolsClientListResponse]{
|
||||
More: func(response armcontainerservice.AgentPoolsClientListResponse) bool {
|
||||
return false
|
||||
},
|
||||
Fetcher: fakeFetcher,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -125,7 +125,7 @@ data:
|
|||
ClientID: <base64-encoded-client-id>
|
||||
ClientSecret: <base64-encoded-client-secret>
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
TenantID: <base64-encoded-tenant-id>
|
||||
VMType: QUtTCg==
|
||||
kind: Secret
|
||||
|
@ -152,17 +152,7 @@ spec:
|
|||
spec:
|
||||
serviceAccountName: cluster-autoscaler
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
command:
|
||||
- command:
|
||||
- ./cluster-autoscaler
|
||||
- --v=3
|
||||
- --logtostderr=true
|
||||
|
@ -200,4 +190,14 @@ spec:
|
|||
secretKeyRef:
|
||||
key: VMType
|
||||
name: cluster-autoscaler-azure
|
||||
image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
restartPolicy: Always
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -123,7 +123,7 @@ subjects:
|
|||
apiVersion: v1
|
||||
data:
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
Deployment: <base64-encoded-azure-initial-deploy-name>
|
||||
VMType: c3RhbmRhcmQ=
|
||||
kind: Secret
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -123,7 +123,7 @@ subjects:
|
|||
apiVersion: v1
|
||||
data:
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
Deployment: <base64-encoded-azure-initial-deploy-name>
|
||||
VMType: c3RhbmRhcmQ=
|
||||
kind: Secret
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -125,7 +125,7 @@ data:
|
|||
ClientID: <base64-encoded-client-id>
|
||||
ClientSecret: <base64-encoded-client-secret>
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
TenantID: <base64-encoded-tenant-id>
|
||||
Deployment: <base64-encoded-azure-initial-deploy-name>
|
||||
VMType: c3RhbmRhcmQ=
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -125,7 +125,7 @@ data:
|
|||
ClientID: <base64-encoded-client-id>
|
||||
ClientSecret: <base64-encoded-client-secret>
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
TenantID: <base64-encoded-tenant-id>
|
||||
VMType: dm1zcw==
|
||||
kind: Secret
|
||||
|
@ -159,10 +159,7 @@ spec:
|
|||
nodeSelector:
|
||||
kubernetes.io/role: control-plane
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
command:
|
||||
- command:
|
||||
- ./cluster-autoscaler
|
||||
- --v=3
|
||||
- --logtostderr=true
|
||||
|
@ -201,6 +198,9 @@ spec:
|
|||
secretKeyRef:
|
||||
key: VMType
|
||||
name: cluster-autoscaler-azure
|
||||
image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -123,7 +123,7 @@ subjects:
|
|||
apiVersion: v1
|
||||
data:
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
VMType: dm1zcw==
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
@ -157,10 +157,7 @@ spec:
|
|||
nodeSelector:
|
||||
kubernetes.io/role: control-plane
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
command:
|
||||
- command:
|
||||
- ./cluster-autoscaler
|
||||
- --v=3
|
||||
- --logtostderr=true
|
||||
|
@ -186,6 +183,9 @@ spec:
|
|||
secretKeyRef:
|
||||
key: VMType
|
||||
name: cluster-autoscaler-azure
|
||||
image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
|
|
@ -51,7 +51,7 @@ rules:
|
|||
resources: ["statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["watch", "list", "get"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
|
||||
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities", "volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
|
@ -125,7 +125,7 @@ data:
|
|||
ClientID: <base64-encoded-client-id>
|
||||
ClientSecret: <base64-encoded-client-secret>
|
||||
ResourceGroup: <base64-encoded-resource-group>
|
||||
SubscriptionID: <base64-encode-subscription-id>
|
||||
SubscriptionID: <base64-encoded-subscription-id>
|
||||
TenantID: <base64-encoded-tenant-id>
|
||||
VMType: dm1zcw==
|
||||
kind: Secret
|
||||
|
@ -152,17 +152,7 @@ spec:
|
|||
spec:
|
||||
serviceAccountName: cluster-autoscaler
|
||||
containers:
|
||||
- image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
command:
|
||||
- command:
|
||||
- ./cluster-autoscaler
|
||||
- --v=3
|
||||
- --logtostderr=true
|
||||
|
@ -201,6 +191,16 @@ spec:
|
|||
secretKeyRef:
|
||||
key: VMType
|
||||
name: cluster-autoscaler-azure
|
||||
image: registry.k8s.io/autoscaling/cluster-autoscaler:{{ ca_version }}
|
||||
imagePullPolicy: Always
|
||||
name: cluster-autoscaler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs/ca-certificates.crt
|
||||
name: ssl-certs
|
||||
|
|
|
@ -1,85 +1,75 @@
|
|||
module k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure/test
|
||||
|
||||
go 1.22.3
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.4
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
helm.sh/helm/v3 v3.15.2
|
||||
k8s.io/api v0.30.2
|
||||
k8s.io/apimachinery v0.30.2
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.18.4
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
helm.sh/helm/v3 v3.18.3
|
||||
k8s.io/api v0.34.0-alpha.1
|
||||
k8s.io/apimachinery v0.34.0-alpha.1
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/containerd v1.7.27 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v25.0.5+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
|
@ -89,71 +79,62 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rubenv/sql-migrate v1.5.2 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.12.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.28.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.68.1 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.1 // indirect
|
||||
k8s.io/apiserver v0.30.1 // indirect
|
||||
k8s.io/cli-runtime v0.30.2 // indirect
|
||||
k8s.io/client-go v0.30.2 // indirect
|
||||
k8s.io/component-base v0.30.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/kubectl v0.30.0 // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.33.1 // indirect
|
||||
k8s.io/apiserver v0.33.1 // indirect
|
||||
k8s.io/cli-runtime v0.33.1 // indirect
|
||||
k8s.io/client-go v0.34.0-alpha.1 // indirect
|
||||
k8s.io/component-base v0.33.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/kubectl v0.33.1 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
|
@ -13,235 +16,170 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFG
|
|||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
|
||||
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
|
||||
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
|
||||
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
|
||||
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
|
||||
github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
|
||||
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
|
||||
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
|
||||
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
|
||||
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
|
||||
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
|
||||
github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU=
|
||||
github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
|
||||
github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM=
|
||||
github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI=
|
||||
github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
|
||||
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
|
||||
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU=
|
||||
github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs=
|
||||
github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0=
|
||||
github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY=
|
||||
github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY=
|
||||
github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
|
||||
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
|
||||
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
|
||||
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
|
||||
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
|
||||
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
|
@ -255,19 +193,12 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq
|
|||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI=
|
||||
github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc=
|
||||
github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY=
|
||||
github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
|
||||
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
|
@ -278,119 +209,103 @@ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPn
|
|||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
|
||||
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg=
|
||||
github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
|
||||
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
|
||||
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0=
|
||||
github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc=
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
|
||||
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
|
||||
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
|
||||
github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
|
@ -402,206 +317,171 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
|||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
|
||||
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
|
||||
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
|
||||
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
||||
helm.sh/helm/v3 v3.15.2 h1:/3XINUFinJOBjQplGnjw92eLGpgXXp1L8chWPkCkDuw=
|
||||
helm.sh/helm/v3 v3.15.2/go.mod h1:FzSIP8jDQaa6WAVg9F+OkKz7J0ZmAga4MABtTbsb9WQ=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
|
||||
k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
|
||||
k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws=
|
||||
k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4=
|
||||
k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
|
||||
k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8=
|
||||
k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo=
|
||||
k8s.io/cli-runtime v0.30.2 h1:ooM40eEJusbgHNEqnHziN9ZpLN5U4WcQGsdLKVxpkKE=
|
||||
k8s.io/cli-runtime v0.30.2/go.mod h1:Y4g/2XezFyTATQUbvV5WaChoUGhojv/jZAtdp5Zkm0A=
|
||||
k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
|
||||
k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
|
||||
k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ=
|
||||
k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/kubectl v0.30.0 h1:xbPvzagbJ6RNYVMVuiHArC1grrV5vSmmIcSZuCdzRyk=
|
||||
k8s.io/kubectl v0.30.0/go.mod h1:zgolRw2MQXLPwmic2l/+iHs239L49fhSeICuMhQQXTI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=
|
||||
oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo=
|
||||
sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw=
|
||||
sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
helm.sh/helm/v3 v3.18.3 h1:+cvyGKgs7Jt7BN3Klmb4SsG4IkVpA7GAZVGvMz6VO4I=
|
||||
helm.sh/helm/v3 v3.18.3/go.mod h1:wUc4n3txYBocM7S9RjTeZBN9T/b5MjffpcSsWEjSIpw=
|
||||
k8s.io/api v0.34.0-alpha.1 h1:Hye5ehH+riYQU/M/y/F8/L7hE6ZO5QZrH53zxcySa2Q=
|
||||
k8s.io/api v0.34.0-alpha.1/go.mod h1:Dl+4wVA5vZVlN4ckJ34aAQXRDciXazH930XZh92Lubk=
|
||||
k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI=
|
||||
k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA=
|
||||
k8s.io/apimachinery v0.34.0-alpha.1 h1:pA/Biuywm6Us4cZb5FLIHi8idQZXq3/8Bw3h2dqtop4=
|
||||
k8s.io/apimachinery v0.34.0-alpha.1/go.mod h1:EZ7eIfFAwky7ktmG4Pu9XWxBxFG++4dxPDOM0GL3abw=
|
||||
k8s.io/apiserver v0.33.1 h1:yLgLUPDVC6tHbNcw5uE9mo1T6ELhJj7B0geifra3Qdo=
|
||||
k8s.io/apiserver v0.33.1/go.mod h1:VMbE4ArWYLO01omz+k8hFjAdYfc3GVAYPrhP2tTKccs=
|
||||
k8s.io/cli-runtime v0.33.1 h1:TvpjEtF71ViFmPeYMj1baZMJR4iWUEplklsUQ7D3quA=
|
||||
k8s.io/cli-runtime v0.33.1/go.mod h1:9dz5Q4Uh8io4OWCLiEf/217DXwqNgiTS/IOuza99VZE=
|
||||
k8s.io/client-go v0.34.0-alpha.1 h1:u9jrtaizUQ1sdchbf5v72ZKC8rj1XI9RAMsDlN4Gcy4=
|
||||
k8s.io/client-go v0.34.0-alpha.1/go.mod h1:MyOhbMoeBUilHgYvjBP7U5BIBkbCUBCdZPzWZuj9i8g=
|
||||
k8s.io/component-base v0.33.1 h1:EoJ0xA+wr77T+G8p6T3l4efT2oNwbqBVKR71E0tBIaI=
|
||||
k8s.io/component-base v0.33.1/go.mod h1:guT/w/6piyPfTgq7gfvgetyXMIh10zuXA6cRRm3rDuY=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/kubectl v0.33.1 h1:OJUXa6FV5bap6iRy345ezEjU9dTLxqv1zFTVqmeHb6A=
|
||||
k8s.io/kubectl v0.33.1/go.mod h1:Z07pGqXoP4NgITlPRrnmiM3qnoo1QrK1zjw85Aiz8J0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
|
||||
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
|
||||
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
|
||||
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
|
||||
sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o=
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//go:build !gce && !aws && !azure && !kubemark && !alicloud && !magnum && !digitalocean && !clusterapi && !huaweicloud && !ionoscloud && !linode && !hetzner && !bizflycloud && !brightbox && !equinixmetal && !oci && !vultr && !tencentcloud && !scaleway && !externalgrpc && !civo && !rancher && !volcengine && !baiducloud && !cherry && !cloudstack && !exoscale && !kamatera && !ovhcloud
|
||||
// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi,!huaweicloud,!ionoscloud,!linode,!hetzner,!bizflycloud,!brightbox,!equinixmetal,!oci,!vultr,!tencentcloud,!scaleway,!externalgrpc,!civo,!rancher,!volcengine,!baiducloud,!cherry,!cloudstack,!exoscale,!kamatera,!ovhcloud
|
||||
//go:build !gce && !aws && !azure && !kubemark && !alicloud && !magnum && !digitalocean && !clusterapi && !huaweicloud && !ionoscloud && !linode && !hetzner && !bizflycloud && !brightbox && !equinixmetal && !oci && !vultr && !tencentcloud && !scaleway && !externalgrpc && !civo && !rancher && !volcengine && !baiducloud && !cherry && !cloudstack && !exoscale && !kamatera && !ovhcloud && !kwok
|
||||
// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi,!huaweicloud,!ionoscloud,!linode,!hetzner,!bizflycloud,!brightbox,!equinixmetal,!oci,!vultr,!tencentcloud,!scaleway,!externalgrpc,!civo,!rancher,!volcengine,!baiducloud,!cherry,!cloudstack,!exoscale,!kamatera,!ovhcloud,!kwok
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
|
|
@ -31,7 +31,7 @@ var AvailableCloudProviders = []string{
|
|||
cloudprovider.BizflyCloudProviderName,
|
||||
}
|
||||
|
||||
// DefaultCloudProvider build is Bizflycloud..
|
||||
// DefaultCloudProvider for Bizflycloud-only build is Bizflycloud.
|
||||
const DefaultCloudProvider = cloudprovider.BizflyCloudProviderName
|
||||
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
|
|
|
@ -31,7 +31,7 @@ var AvailableCloudProviders = []string{
|
|||
cloudprovider.BrightboxProviderName,
|
||||
}
|
||||
|
||||
// DefaultCloudProvider is Brightbox
|
||||
// DefaultCloudProvider for Brightbox-only build is Brightbox.
|
||||
const DefaultCloudProvider = cloudprovider.BrightboxProviderName
|
||||
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
|
|
|
@ -31,7 +31,7 @@ var AvailableCloudProviders = []string{
|
|||
cherry.ProviderName,
|
||||
}
|
||||
|
||||
// DefaultCloudProvider for Cherry-only build is Cherry
|
||||
// DefaultCloudProvider for Cherry-only build is Cherry.
|
||||
const DefaultCloudProvider = cherry.ProviderName
|
||||
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
|
|
|
@ -31,7 +31,7 @@ var AvailableCloudProviders = []string{
|
|||
cloudprovider.CivoProviderName,
|
||||
}
|
||||
|
||||
// DefaultCloudProvider for civo-only build is Civo.
|
||||
// DefaultCloudProvider for Civo-only build is Civo.
|
||||
const DefaultCloudProvider = cloudprovider.CivoProviderName
|
||||
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
|
|
|
@ -34,7 +34,7 @@ var AvailableCloudProviders = []string{
|
|||
// DefaultCloudProvider for cloudstack-only build is cloudstack.
|
||||
const DefaultCloudProvider = cloudprovider.CloudStackProviderName
|
||||
|
||||
func BuildCloudStack(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
switch opts.CloudProviderName {
|
||||
case cloudprovider.CloudStackProviderName:
|
||||
return cloudstack.BuildCloudStack(opts, do, rl)
|
||||
|
|
|
@ -31,7 +31,7 @@ var AvailableCloudProviders = []string{
|
|||
cloudprovider.ClusterAPIProviderName,
|
||||
}
|
||||
|
||||
// DefaultCloudProvider for machineapi-only build.
|
||||
// DefaultCloudProvider for Cluster API-only build is Cluster API.
|
||||
const DefaultCloudProvider = cloudprovider.ClusterAPIProviderName
|
||||
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
|
|
|
@ -26,12 +26,12 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
)
|
||||
|
||||
// AvailableCloudProviders supported by the digtalocean cloud provider builder.
|
||||
// AvailableCloudProviders supported by the DigitalOcean cloud provider builder.
|
||||
var AvailableCloudProviders = []string{
|
||||
cloudprovider.DigitalOceanProviderName,
|
||||
}
|
||||
|
||||
// DefaultCloudProvider for do-only build is DigitalOcean.
|
||||
// DefaultCloudProvider for DigitalOcean-only build is DigitalOcean.
|
||||
const DefaultCloudProvider = cloudprovider.DigitalOceanProviderName
|
||||
|
||||
func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue