mirror of https://github.com/wasmCloud/wadm.git
Compare commits
607 Commits
v0.5.0-rc.
...
main
| Author | SHA1 | Date |
|---|---|---|
|
|
50d41559ab | |
|
|
a8ea265933 | |
|
|
dad082b6c7 | |
|
|
6271e697ed | |
|
|
b1dd4e650a | |
|
|
5e7e3eddb2 | |
|
|
7f3652c9b4 | |
|
|
7948300cf5 | |
|
|
6eb78120b7 | |
|
|
18464c2ac8 | |
|
|
34b054122b | |
|
|
41f01ba0df | |
|
|
c6f6b44b51 | |
|
|
d3a82c8b2b | |
|
|
4c8c73e603 | |
|
|
30f49b6cef | |
|
|
66502de4f0 | |
|
|
e0ec996d4d | |
|
|
8515084c01 | |
|
|
8c037d3406 | |
|
|
abf0702404 | |
|
|
67d8b25f27 | |
|
|
b376c3ae2b | |
|
|
eec6ca1c03 | |
|
|
cf9ef590b3 | |
|
|
2009753535 | |
|
|
6ffc096379 | |
|
|
62b573183b | |
|
|
254765a5db | |
|
|
9ad8b52ffe | |
|
|
cc394fb963 | |
|
|
4f0be1c2ec | |
|
|
c6177f1ec0 | |
|
|
9ab6ef3f3a | |
|
|
aab70fa276 | |
|
|
04862520cb | |
|
|
d24a275f69 | |
|
|
dc85b32bed | |
|
|
a5a61d2749 | |
|
|
c065b3e17e | |
|
|
4239d6d898 | |
|
|
d240b53a5d | |
|
|
4e014223b8 | |
|
|
96aa54bd5e | |
|
|
67b1d85ba9 | |
|
|
b5133163ae | |
|
|
d5a77cc74c | |
|
|
ef80b684ba | |
|
|
ee40750113 | |
|
|
73dc76b72a | |
|
|
aac1e46d0b | |
|
|
e843cfb824 | |
|
|
0ef3162684 | |
|
|
726a6c0bc7 | |
|
|
f1a3acbf1e | |
|
|
e92e526dfe | |
|
|
15ae8c4d6a | |
|
|
22fc78860f | |
|
|
c7953f95e9 | |
|
|
7f0fc3a396 | |
|
|
37b47154e3 | |
|
|
3c8b0742a5 | |
|
|
8a3d21ce7d | |
|
|
c09d40d335 | |
|
|
0748b04b60 | |
|
|
dc1955370f | |
|
|
ebd113e51a | |
|
|
8def8fe075 | |
|
|
1ae4e8e2cb | |
|
|
db80173177 | |
|
|
6b4946dd32 | |
|
|
897192b894 | |
|
|
d715170d01 | |
|
|
8a1cd9e8e4 | |
|
|
93fbb9f4a3 | |
|
|
6e57d6f197 | |
|
|
b3ebcd2e2a | |
|
|
6c8dd444ba | |
|
|
005d599bcd | |
|
|
86af1498cb | |
|
|
60f0014449 | |
|
|
a329be44a3 | |
|
|
14f7ed1bab | |
|
|
39b79638ad | |
|
|
ac747cd8bc | |
|
|
77f33f08f6 | |
|
|
130c8f4a70 | |
|
|
e9f017b809 | |
|
|
1365854fbb | |
|
|
8164b443fc | |
|
|
445622df2e | |
|
|
e218cdae70 | |
|
|
f74f7f8f54 | |
|
|
734c726f14 | |
|
|
0fba847245 | |
|
|
a2c022b462 | |
|
|
4db8763a0f | |
|
|
7958bfbced | |
|
|
37eb784b82 | |
|
|
16191d081a | |
|
|
a5424b7e4c | |
|
|
2e3abbcba0 | |
|
|
720113d026 | |
|
|
80bba4fb9f | |
|
|
2e474c5d0c | |
|
|
ceda608718 | |
|
|
6b9d6fd26f | |
|
|
44753eb992 | |
|
|
c5694226c8 | |
|
|
c808f7a07a | |
|
|
eaebdd918e | |
|
|
e756aa038f | |
|
|
ba04447356 | |
|
|
386eebd33f | |
|
|
1926bf070f | |
|
|
ddb912553a | |
|
|
bdf06dc5d9 | |
|
|
ffc655e749 | |
|
|
7218266206 | |
|
|
cb00233aaa | |
|
|
7a94b8565c | |
|
|
66ca4cc9f5 | |
|
|
c8e715a088 | |
|
|
a5066c16dd | |
|
|
e4de5fc83e | |
|
|
b26427c3ec | |
|
|
2113aa3781 | |
|
|
55444f27f2 | |
|
|
797eddf5c1 | |
|
|
55be7d8558 | |
|
|
7d59eb4746 | |
|
|
4bb74d04fe | |
|
|
1f902b248c | |
|
|
34fb5e69b2 | |
|
|
efeb6a020d | |
|
|
e492823998 | |
|
|
ad2cb51238 | |
|
|
95633628af | |
|
|
9fbc598eff | |
|
|
830b02545a | |
|
|
9475e4c542 | |
|
|
84d4f48783 | |
|
|
95d256215b | |
|
|
7e97f6e615 | |
|
|
bcc2b7f461 | |
|
|
2aa35a9514 | |
|
|
f504e8c1b2 | |
|
|
7658a4e654 | |
|
|
64e3d93118 | |
|
|
41e6e352cc | |
|
|
d169b1be62 | |
|
|
4676947211 | |
|
|
78e077604e | |
|
|
a7a287ce7b | |
|
|
90dac77412 | |
|
|
ab9ad612ee | |
|
|
18a66b2640 | |
|
|
13faa57248 | |
|
|
b167486f48 | |
|
|
52500b4787 | |
|
|
8df7924598 | |
|
|
59e7e66562 | |
|
|
f88140893b | |
|
|
77f5bc8961 | |
|
|
e67c9e580c | |
|
|
4243efdc8f | |
|
|
40d8b50c0e | |
|
|
5a4c13fe75 | |
|
|
b6b398ecd7 | |
|
|
6fc79d3c81 | |
|
|
7a811a6737 | |
|
|
1448671649 | |
|
|
f596dadcb8 | |
|
|
ca868c5f79 | |
|
|
11aa88b73f | |
|
|
6b768c1607 | |
|
|
c26eb6d2fd | |
|
|
f34b19a79b | |
|
|
532e4930ef | |
|
|
6004c9a136 | |
|
|
4af2a727c3 | |
|
|
d92b0b7e6a | |
|
|
ab26db73b7 | |
|
|
229411893a | |
|
|
e2de3fe6b8 | |
|
|
062130e6f1 | |
|
|
df0bf72cde | |
|
|
dad1bd9f66 | |
|
|
a0da5ef75e | |
|
|
f1d68a87d5 | |
|
|
b67193a9f8 | |
|
|
764e90ba1b | |
|
|
50b672ad30 | |
|
|
265f732fc8 | |
|
|
b2a1082559 | |
|
|
341ae617ec | |
|
|
a6223a3f74 | |
|
|
38cb50f364 | |
|
|
2b50ef2877 | |
|
|
97e9e32066 | |
|
|
c2ae9f2643 | |
|
|
864acfd28e | |
|
|
994b881701 | |
|
|
2cc4092daa | |
|
|
e1d665416e | |
|
|
6e8eb504c9 | |
|
|
7d80eca6aa | |
|
|
54bf5cbb61 | |
|
|
65cfd337f6 | |
|
|
87c64bdcd9 | |
|
|
505debf7ff | |
|
|
c898e2eb20 | |
|
|
5919660776 | |
|
|
c1db5ff946 | |
|
|
163c28269a | |
|
|
e9c7cf4ab1 | |
|
|
f137a9ab60 | |
|
|
d9c3627547 | |
|
|
e8fe31f0ed | |
|
|
18e5566a5e | |
|
|
2561838039 | |
|
|
8c0ea8263d | |
|
|
ae8ab69f24 | |
|
|
61b81112bd | |
|
|
b2207ef41f | |
|
|
0cc63485f4 | |
|
|
31cf33a9b7 | |
|
|
fb2b74532b | |
|
|
ca5a63104a | |
|
|
21feab093f | |
|
|
eb6fce9255 | |
|
|
087203cdbc | |
|
|
6e35596a22 | |
|
|
2d47f32fc5 | |
|
|
2c00cada86 | |
|
|
d1b9d925d2 | |
|
|
db38c50600 | |
|
|
964a586ab6 | |
|
|
6c425a198c | |
|
|
0fb04cfee4 | |
|
|
066eccdbd2 | |
|
|
4bd2560bdd | |
|
|
57e1807be8 | |
|
|
ef32c26fa0 | |
|
|
1c4b706b17 | |
|
|
c48802566e | |
|
|
42cc8672d1 | |
|
|
9272799f62 | |
|
|
cebb511d28 | |
|
|
d0faba952d | |
|
|
f59cfa2f7d | |
|
|
0e78489a56 | |
|
|
466f6ff402 | |
|
|
bd2cc980c7 | |
|
|
955905148c | |
|
|
b9da5ee9f6 | |
|
|
81d41b3cd8 | |
|
|
fbf29a9350 | |
|
|
cfc7c4504a | |
|
|
6f29e72932 | |
|
|
9ac409a28d | |
|
|
1309c9bf1f | |
|
|
54740fbf62 | |
|
|
eb34a928c6 | |
|
|
4d2fc1a406 | |
|
|
08da607ad9 | |
|
|
9972d4d903 | |
|
|
b459bea3fb | |
|
|
b7ef888072 | |
|
|
aa2689ab36 | |
|
|
ec08ba7316 | |
|
|
471f07fe67 | |
|
|
0dbb3d102c | |
|
|
8830527b43 | |
|
|
434aeafbb8 | |
|
|
05d5242d27 | |
|
|
77c012d6d1 | |
|
|
3a066c35c6 | |
|
|
e07481a66c | |
|
|
4b7233af2c | |
|
|
e4d453fa34 | |
|
|
1e2bbc2111 | |
|
|
5fda091b50 | |
|
|
e0d4e23758 | |
|
|
1b768f8d20 | |
|
|
980d8ef926 | |
|
|
12880bf5e1 | |
|
|
75c45fa750 | |
|
|
51692b7156 | |
|
|
eb57ec900a | |
|
|
78caba43e1 | |
|
|
3e769f5708 | |
|
|
e39e1f1c63 | |
|
|
967c047f05 | |
|
|
8724621dc0 | |
|
|
1d085cab07 | |
|
|
fbf06f624e | |
|
|
f1ef62d6cd | |
|
|
a5486595a2 | |
|
|
5c4094c1c7 | |
|
|
55caf37442 | |
|
|
6521d4e2c4 | |
|
|
fa51184cfc | |
|
|
eb0b2eab9b | |
|
|
1136744fe6 | |
|
|
efe9a8a5f6 | |
|
|
ee427db054 | |
|
|
5ea118e235 | |
|
|
5719f0e57e | |
|
|
78343c264e | |
|
|
ee8f8ea555 | |
|
|
2d2320bc61 | |
|
|
71e3138355 | |
|
|
4c31bc24c1 | |
|
|
7aedd8ac5c | |
|
|
2b0dd9efec | |
|
|
c4a3c7978a | |
|
|
89c9e77f6e | |
|
|
fd75aaa8ef | |
|
|
c64f28dd03 | |
|
|
8011f09570 | |
|
|
5a22fd1258 | |
|
|
b1fb8894f6 | |
|
|
2e77266224 | |
|
|
1e2c90645d | |
|
|
b78d4bf1b6 | |
|
|
524579a1f4 | |
|
|
e339b6cae2 | |
|
|
86ce562d7f | |
|
|
d30c092942 | |
|
|
aa074af58b | |
|
|
2fc3f6974b | |
|
|
b9f65ffb0a | |
|
|
ce7c1b4bb2 | |
|
|
b5c471ea2a | |
|
|
afc0d916e3 | |
|
|
a37ab6dd95 | |
|
|
c3d00c714d | |
|
|
b9e1cc611b | |
|
|
203a91f1e0 | |
|
|
78291c79bb | |
|
|
7e03d060b3 | |
|
|
f1237363c1 | |
|
|
5def02caaf | |
|
|
2d6327b943 | |
|
|
c295cf0e33 | |
|
|
e2764c720b | |
|
|
2b43beb831 | |
|
|
066e50e4eb | |
|
|
6f4abcf389 | |
|
|
19cbd5a44d | |
|
|
172db98f1e | |
|
|
72170e9a8e | |
|
|
ffe20a6177 | |
|
|
8d8adfe54e | |
|
|
c6c481f930 | |
|
|
43ba03790a | |
|
|
95bdf2a6bb | |
|
|
50d2b76213 | |
|
|
392347dfe9 | |
|
|
ce536fbdc8 | |
|
|
05cfd3e84e | |
|
|
b0212e548c | |
|
|
409f61fa74 | |
|
|
e020955fac | |
|
|
0755307d78 | |
|
|
bb9650198d | |
|
|
535b5f44f9 | |
|
|
66f54eed4c | |
|
|
39a0857a4b | |
|
|
1dc35d584f | |
|
|
4e624ffd4a | |
|
|
1febfc92d2 | |
|
|
2de7f9eca2 | |
|
|
0d92cd8b92 | |
|
|
cb98a9911b | |
|
|
e4d2f569dc | |
|
|
c6777e6bca | |
|
|
4a5dcae3cf | |
|
|
669791b685 | |
|
|
5c643f9d9b | |
|
|
7549fd3500 | |
|
|
050f4ecbc9 | |
|
|
5cecde8718 | |
|
|
85fb9c4ec7 | |
|
|
bead045d24 | |
|
|
4b0d5171cb | |
|
|
8db4eb791f | |
|
|
aa4cb16a0f | |
|
|
d43e92d5c2 | |
|
|
f597e61680 | |
|
|
e0d9e2f90e | |
|
|
a2ff5a0411 | |
|
|
d699e20866 | |
|
|
24c3be8559 | |
|
|
7ed42a1077 | |
|
|
f529890cca | |
|
|
48e8d4caec | |
|
|
9989da7c5f | |
|
|
544547ca9e | |
|
|
34e95975d5 | |
|
|
32a4bb5c50 | |
|
|
737aa8259f | |
|
|
3274f121b2 | |
|
|
819978970a | |
|
|
3c53f462b6 | |
|
|
ba63c290ea | |
|
|
0366132f9b | |
|
|
fbef4df02f | |
|
|
9e003d6944 | |
|
|
f82f3ddeb7 | |
|
|
b0114f5268 | |
|
|
9cb7732635 | |
|
|
d86b504c6f | |
|
|
d41c8fc9e5 | |
|
|
b03ee1ce32 | |
|
|
46975b0547 | |
|
|
fcec438f82 | |
|
|
aa7017ca3a | |
|
|
600b419088 | |
|
|
87862d4534 | |
|
|
f93cc2cb99 | |
|
|
1087bba408 | |
|
|
956bdeb161 | |
|
|
a92cc37510 | |
|
|
0d329750b3 | |
|
|
85c655d3f6 | |
|
|
a751827f93 | |
|
|
6040ee35be | |
|
|
bbef11d1fe | |
|
|
836d48d6bd | |
|
|
cc396cdae2 | |
|
|
8bdaba7f26 | |
|
|
8f3efe3899 | |
|
|
f3f9ed351b | |
|
|
c82fdc044e | |
|
|
41ed2459d8 | |
|
|
6295e86490 | |
|
|
5d76ab006d | |
|
|
acf80e2748 | |
|
|
e45e1e7e90 | |
|
|
ed2e999b60 | |
|
|
f79d5cdf15 | |
|
|
4d31dc9c1b | |
|
|
272b1029b4 | |
|
|
61c94fc559 | |
|
|
5e3d5272b4 | |
|
|
389664eecd | |
|
|
d9e7de62ee | |
|
|
6e6ad37650 | |
|
|
b70d019799 | |
|
|
7c3c5a5ac7 | |
|
|
a54277ae88 | |
|
|
e63bfc66f1 | |
|
|
81c8207439 | |
|
|
d36c5efd42 | |
|
|
74a71f356d | |
|
|
334db5431c | |
|
|
fde119ffec | |
|
|
b9fd65e990 | |
|
|
4a8117439e | |
|
|
4e14010f39 | |
|
|
f85bed3396 | |
|
|
3eb06a0023 | |
|
|
152eb99f75 | |
|
|
15da88eb2b | |
|
|
c01e4dd7b8 | |
|
|
90c3eebb9c | |
|
|
6a35dd09bc | |
|
|
d9af83ca19 | |
|
|
a5ee1c0e11 | |
|
|
7f5f9247a5 | |
|
|
26f3c84b1b | |
|
|
99fc559b6a | |
|
|
a4e101bd2b | |
|
|
12224b23d0 | |
|
|
903115a6c8 | |
|
|
9817134240 | |
|
|
2dda8601e8 | |
|
|
9243793d5e | |
|
|
b2084a2c91 | |
|
|
346235bcaa | |
|
|
3be058f48b | |
|
|
f1cb64dafd | |
|
|
02d4b1d064 | |
|
|
1c6c627884 | |
|
|
e1838f0b21 | |
|
|
ee302525a6 | |
|
|
2f7e8a9915 | |
|
|
48763ff2ea | |
|
|
e798b1305c | |
|
|
b997d8becd | |
|
|
404523f240 | |
|
|
19690b44f5 | |
|
|
efbbba58ac | |
|
|
b6febbbaa3 | |
|
|
f9558af7b4 | |
|
|
04ca08124f | |
|
|
37f59146a1 | |
|
|
2a311e106f | |
|
|
372cc755b9 | |
|
|
59bfdcdcba | |
|
|
483922153c | |
|
|
85a6f9a830 | |
|
|
d6c6bfdf2c | |
|
|
ec7333312d | |
|
|
3ad452e14b | |
|
|
e672e37f31 | |
|
|
61fe7e2c83 | |
|
|
28bb06b51d | |
|
|
eae4d9e806 | |
|
|
8d38c77dc2 | |
|
|
fd4ae29023 | |
|
|
ea900d0a0c | |
|
|
f83bdff194 | |
|
|
a99d7242f6 | |
|
|
ff0e21a3a7 | |
|
|
d07c29481b | |
|
|
2c55b59203 | |
|
|
dcf8774031 | |
|
|
35b9df8e26 | |
|
|
b30f6e3928 | |
|
|
1703502463 | |
|
|
f8b9f23466 | |
|
|
1506a3b594 | |
|
|
12805a1aea | |
|
|
e5a40286b0 | |
|
|
7416824597 | |
|
|
eec8dd851b | |
|
|
52cfa300cc | |
|
|
3799e1b569 | |
|
|
0686159c73 | |
|
|
6e93fd883a | |
|
|
e2e3efd3d6 | |
|
|
5ae9bbafe3 | |
|
|
49dde268b7 | |
|
|
74c49823f9 | |
|
|
727ac2f153 | |
|
|
aafef190ed | |
|
|
ac9962c807 | |
|
|
4b2237f1eb | |
|
|
dbe3c284dd | |
|
|
86fdecc4ca | |
|
|
48bd5482af | |
|
|
7c5f5d8259 | |
|
|
79bfee3075 | |
|
|
68580bb7dd | |
|
|
571adff020 | |
|
|
f628a17eb2 | |
|
|
54ee1a43ca | |
|
|
0354619dff | |
|
|
1d2c7f082d | |
|
|
acec86fe59 | |
|
|
9bd9e9b5f7 | |
|
|
9ca8b96b08 | |
|
|
b5abc1ba71 | |
|
|
fcc19d86fd | |
|
|
85b53acdbf | |
|
|
30da210e31 | |
|
|
c995f193ec | |
|
|
4d75405c7e | |
|
|
4bae1adb67 | |
|
|
e0f9bc1f3f | |
|
|
f7acd7e865 | |
|
|
68b3503ef3 | |
|
|
78aca99a7a | |
|
|
b68652e292 | |
|
|
6f216c4cc6 | |
|
|
fe2964a773 | |
|
|
15c6b4f2b2 | |
|
|
625cbc3afc | |
|
|
5ee3fa495f | |
|
|
867793f871 | |
|
|
1d3a879c41 | |
|
|
81d754d49c | |
|
|
4813b4fbb0 | |
|
|
c19a3f6c8c | |
|
|
4a6f6fd11c | |
|
|
c02f7ab243 | |
|
|
4778f6a391 | |
|
|
bb41b6b195 | |
|
|
8de866c418 | |
|
|
c34929b136 | |
|
|
d4be374ab0 | |
|
|
c2e30e63a9 | |
|
|
d5b18a0d45 | |
|
|
261dae2604 | |
|
|
064237ab1c | |
|
|
786ca22424 | |
|
|
1321bcd818 | |
|
|
2a378fcd4a | |
|
|
47d54442ab | |
|
|
942a811079 | |
|
|
1b3c76f58a | |
|
|
e7ea2cf3e6 | |
|
|
226bb82ff6 | |
|
|
b1810873d8 | |
|
|
1bc07979d0 | |
|
|
ca2108a1ae | |
|
|
5c1739d8fa | |
|
|
c2b189abcb | |
|
|
f04c4177a2 | |
|
|
ed4430c8e4 | |
|
|
71cd136ae8 | |
|
|
54f1bbdb2b | |
|
|
1b1cb4cae0 | |
|
|
e3cfa7efb5 |
|
|
@ -19,8 +19,7 @@
|
|||
},
|
||||
"extensions": [
|
||||
"rust-lang.rust-analyzer",
|
||||
"tamasfe.even-better-toml",
|
||||
"serayuzgur.crates"
|
||||
"tamasfe.even-better-toml"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM="
|
||||
fi
|
||||
watch_file rust-toolchain.toml
|
||||
use flake
|
||||
|
|
@ -1,2 +1,2 @@
|
|||
# wasmCloud team members
|
||||
* @autodidaddict @brooksmtownsend @thomastaylor312 @connorsmith256
|
||||
# wasmCloud wadm maintainers
|
||||
* @wasmCloud/wadm-maintainers
|
||||
|
|
|
|||
|
|
@ -0,0 +1,38 @@
|
|||
name: Install and configure wkg (linux only)
|
||||
|
||||
inputs:
|
||||
wkg-version:
|
||||
description: version of wkg to install. Should be a valid tag from https://github.com/bytecodealliance/wasm-pkg-tools/releases
|
||||
default: "v0.6.0"
|
||||
oci-username:
|
||||
description: username for oci registry
|
||||
required: true
|
||||
oci-password:
|
||||
description: password for oci registry
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download wkg
|
||||
shell: bash
|
||||
run: |
|
||||
curl --fail -L https://github.com/bytecodealliance/wasm-pkg-tools/releases/download/${{ inputs.wkg-version }}/wkg-x86_64-unknown-linux-gnu -o wkg
|
||||
chmod +x wkg;
|
||||
echo "$(realpath .)" >> "$GITHUB_PATH";
|
||||
- name: Generate and set wkg config
|
||||
shell: bash
|
||||
env:
|
||||
WKG_OCI_USERNAME: ${{ inputs.oci-username }}
|
||||
WKG_OCI_PASSWORD: ${{ inputs.oci-password }}
|
||||
run: |
|
||||
cat << EOF > wkg-config.toml
|
||||
[namespace_registries]
|
||||
wasmcloud = "wasmcloud.com"
|
||||
wrpc = "bytecodealliance.org"
|
||||
wasi = "wasi.dev"
|
||||
|
||||
[registry."wasmcloud.com".oci]
|
||||
auth = { username = "${WKG_OCI_USERNAME}", password = "${WKG_OCI_PASSWORD}" }
|
||||
EOF
|
||||
echo "WKG_CONFIG_FILE=$(realpath wkg-config.toml)" >> $GITHUB_ENV
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
time: "09:00"
|
||||
timezone: "America/New_York"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
time: "09:00"
|
||||
timezone: "America/New_York"
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
# .github/release.yml
|
||||
|
||||
changelog:
|
||||
exclude:
|
||||
authors:
|
||||
- dependabot
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
name: chart
|
||||
|
||||
env:
|
||||
HELM_VERSION: v3.14.0
|
||||
CHART_TESTING_NAMESPACE: chart-testing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'chart-v[0-9].[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/chart.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch main branch for chart-testing
|
||||
run: |
|
||||
git fetch origin main:main
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# Used by helm chart-testing below
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.12.2'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
|
||||
with:
|
||||
version: v3.10.1
|
||||
yamllint_version: 1.35.1
|
||||
yamale_version: 5.0.0
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/wadm/ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
version: "v0.22.0"
|
||||
|
||||
- name: Install nats in the test cluster
|
||||
run: |
|
||||
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
|
||||
helm repo update
|
||||
helm install nats nats/nats -f charts/wadm/ci/nats.yaml --namespace ${{ env.CHART_TESTING_NAMESPACE }} --create-namespace
|
||||
|
||||
- name: Run chart-testing install / same namespace
|
||||
run: |
|
||||
ct install --config charts/wadm/ct.yaml --namespace ${{ env.CHART_TESTING_NAMESPACE }}
|
||||
|
||||
- name: Run chart-testing install / across namespaces
|
||||
run: |
|
||||
ct install --config charts/wadm/ct.yaml --helm-extra-set-args "--set=wadm.config.nats.server=nats://nats-headless.${{ env.CHART_TESTING_NAMESPACE }}.svc.cluster.local"
|
||||
|
||||
publish:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/chart-v') }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs: validate
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
helm package charts/wadm -d .helm-charts
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Lowercase the organization name for ghcr.io
|
||||
run: |
|
||||
echo "GHCR_REPO_NAMESPACE=${GITHUB_REPOSITORY_OWNER,,}" >>${GITHUB_ENV}
|
||||
|
||||
- name: Publish
|
||||
run: |
|
||||
for chart in .helm-charts/*; do
|
||||
if [ -z "${chart:-}" ]; then
|
||||
break
|
||||
fi
|
||||
helm push "${chart}" "oci://ghcr.io/${{ env.GHCR_REPO_NAMESPACE }}/charts"
|
||||
done
|
||||
|
|
@ -5,6 +5,9 @@ on:
|
|||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: e2e
|
||||
|
|
@ -12,36 +15,42 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
e2e_test: [e2e_multiple_hosts, e2e_multitenant, e2e_upgrades]
|
||||
test: [e2e_multiple_hosts, e2e_upgrades, e2e_shared]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
components: clippy, rustfmt
|
||||
|
||||
# Cache: rust
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
with:
|
||||
key: "ubuntu-22.04-rust-cache"
|
||||
key: 'ubuntu-22.04-rust-cache'
|
||||
|
||||
# If the test uses a docker compose file, pre-emptively pull images used in docker compose
|
||||
- name: Pull images for test ${{ matrix.test }}
|
||||
shell: bash
|
||||
run: |
|
||||
export DOCKER_COMPOSE_FILE=tests/docker-compose-${{ matrix.test }}.yaml;
|
||||
[[ -f "$DOCKER_COMPOSE_FILE" ]] && docker compose -f $DOCKER_COMPOSE_FILE pull;
|
||||
|
||||
# Run e2e tests in a matrix for efficiency
|
||||
- name: Run tests ${{ matrix.e2e_test }}
|
||||
- name: Run tests ${{ matrix.test }}
|
||||
id: test
|
||||
env:
|
||||
WADM_E2E_TEST: ${{ matrix.e2e_test }}
|
||||
WADM_E2E_TEST: ${{ matrix.test }}
|
||||
run: make test-individual-e2e
|
||||
|
||||
# if the previous step fails, upload logs
|
||||
- name: Upload logs for debugging
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ failure() && steps.test.outcome == 'failure' }}
|
||||
with:
|
||||
name: e2e-logs-${{ matrix.e2e_test }}
|
||||
path: ./test/e2e_log/*
|
||||
name: e2e-logs-${{ matrix.test }}
|
||||
path: ./tests/e2e_log/*
|
||||
# Be nice and only retain the logs for 7 days
|
||||
retention-days: 7
|
||||
|
|
|
|||
|
|
@ -4,156 +4,137 @@ on:
|
|||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- 'v*'
|
||||
- 'types-v*'
|
||||
- 'client-v*'
|
||||
workflow_dispatch: # Allow manual creation of artifacts without a release
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: build release assets
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.runnerOs }}
|
||||
outputs:
|
||||
version_output: ${{ steps.version_output.outputs.version }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
# NOTE: We are building on an older version of ubuntu because of libc compatibility
|
||||
# issues. Namely, if we build on a new version of libc, it isn't backwards compatible with
|
||||
# old versions. But if we build on the old version, it is compatible with the newer
|
||||
# versions running in ubuntu 22 and its ilk
|
||||
- {
|
||||
os: "ubuntu-20.04",
|
||||
arch: "amd64",
|
||||
extension: "",
|
||||
targetPath: "target/release/",
|
||||
runnerOs: 'ubuntu-latest',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'x86_64-unknown-linux-musl',
|
||||
uploadArtifactSuffix: 'linux-amd64',
|
||||
buildOutputPath: 'target/x86_64-unknown-linux-musl/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: "ubuntu-20.04",
|
||||
arch: "aarch64",
|
||||
extension: "",
|
||||
targetPath: "target/aarch64-unknown-linux-gnu/release/",
|
||||
runnerOs: 'ubuntu-latest',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'aarch64-unknown-linux-musl',
|
||||
uploadArtifactSuffix: 'linux-aarch64',
|
||||
buildOutputPath: 'target/aarch64-unknown-linux-musl/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: "macos-latest",
|
||||
arch: "amd64",
|
||||
extension: "",
|
||||
targetPath: "target/release/",
|
||||
runnerOs: 'macos-14',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'x86_64-apple-darwin',
|
||||
uploadArtifactSuffix: 'macos-amd64',
|
||||
buildOutputPath: 'target/x86_64-apple-darwin/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: "windows-latest",
|
||||
arch: "amd64",
|
||||
extension: ".exe",
|
||||
targetPath: "target/release/",
|
||||
runnerOs: 'macos-14',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'aarch64-apple-darwin',
|
||||
uploadArtifactSuffix: 'macos-aarch64',
|
||||
buildOutputPath: 'target/aarch64-apple-darwin/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: "macos-latest",
|
||||
arch: "aarch64",
|
||||
extension: "",
|
||||
targetPath: "target/aarch64-apple-darwin/release/",
|
||||
runnerOs: 'windows-latest',
|
||||
buildCommand: 'cargo build',
|
||||
target: 'x86_64-pc-windows-msvc',
|
||||
uploadArtifactSuffix: 'windows-amd64',
|
||||
buildOutputPath: 'target/x86_64-pc-windows-msvc/release/wadm.exe',
|
||||
}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: set the release version (tag)
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
shell: bash
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
echo "RELEASE_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV
|
||||
|
||||
- name: set the release version (main)
|
||||
if: github.ref == 'refs/heads/main'
|
||||
shell: bash
|
||||
run: echo "RELEASE_VERSION=canary" >> $GITHUB_ENV
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: |
|
||||
echo "RELEASE_VERSION=canary" >> $GITHUB_ENV
|
||||
|
||||
- name: Output Version
|
||||
id: version_output
|
||||
run: echo "version=$RELEASE_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: lowercase the runner OS name
|
||||
shell: bash
|
||||
- name: Install Zig
|
||||
uses: mlugg/setup-zig@8d6198c65fb0feaa111df26e6b467fea8345e46f # v2.0.5
|
||||
with:
|
||||
version: 0.13.0
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
target: ${{ matrix.config.target }}
|
||||
|
||||
- name: Install cargo zigbuild
|
||||
uses: taiki-e/install-action@2c73a741d1544cc346e9b0af11868feba03eb69d # v2.58.9
|
||||
with:
|
||||
tool: cargo-zigbuild
|
||||
|
||||
- name: Build wadm
|
||||
run: |
|
||||
OS=$(echo "${{ runner.os }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "RUNNER_OS=$OS" >> $GITHUB_ENV
|
||||
${{ matrix.config.buildCommand }} --release --bin wadm --target ${{ matrix.config.target }}
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
if: matrix.config.arch != 'aarch64'
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
|
||||
- name: setup for cross-compile builds
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
|
||||
rustup toolchain install stable-aarch64-unknown-linux-gnu
|
||||
rustup target add --toolchain stable-aarch64-unknown-linux-gnu aarch64-unknown-linux-gnu
|
||||
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
echo "CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
echo "CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++" >> $GITHUB_ENV
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'macos-latest'
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
target: aarch64-apple-darwin
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
- name: build release
|
||||
if: matrix.config.arch != 'aarch64'
|
||||
run: "cargo build --release --bin wadm --features cli"
|
||||
|
||||
- name: build release
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'macos-latest'
|
||||
run: "cargo build --release --bin wadm --features cli --target aarch64-apple-darwin"
|
||||
|
||||
- name: build release
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
|
||||
run: "cargo build --release --bin wadm --features cli --target aarch64-unknown-linux-gnu"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-${{ env.RUNNER_OS }}-${{ matrix.config.arch }}
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-${{ matrix.config.uploadArtifactSuffix }}
|
||||
if-no-files-found: error
|
||||
path: |
|
||||
${{ matrix.config.targetPath }}wadm${{ matrix.config.extension }}
|
||||
${{ matrix.config.buildOutputPath }}
|
||||
|
||||
publish:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
name: publish release assets
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
contents: write
|
||||
env:
|
||||
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
|
||||
steps:
|
||||
- name: download release assets
|
||||
uses: actions/download-artifact@v3
|
||||
- name: Generate Checksums
|
||||
- name: Download release assets
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
|
||||
- name: Prepare release
|
||||
run: |
|
||||
for dir in */; do
|
||||
cd "$dir" || continue
|
||||
sum=$(sha256sum * | awk '{ print $1 }')
|
||||
echo "$dir:$sum" >> checksums-${{ env.RELEASE_VERSION }}.txt
|
||||
cd ..
|
||||
test -d "$dir" || continue
|
||||
tarball="${dir%/}.tar.gz"
|
||||
tar -czvf "${tarball}" "$dir"
|
||||
sha256sum "${tarball}" >> SHA256SUMS
|
||||
done
|
||||
- name: Package Binaries
|
||||
run: for dir in */; do tar -czvf "${dir%/}.tar.gz" "$dir"; done
|
||||
- name: Publish to GHCR
|
||||
uses: softprops/action-gh-release@v1
|
||||
|
||||
- name: Create github release
|
||||
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
|
||||
with:
|
||||
token: ${{ secrets.WADM_GITHUB_TOKEN }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
prerelease: false
|
||||
draft: false
|
||||
files: |
|
||||
checksums-${{ env.RELEASE_VERSION }}.txt
|
||||
SHA256SUMS
|
||||
wadm-${{ env.RELEASE_VERSION }}-linux-aarch64.tar.gz
|
||||
wadm-${{ env.RELEASE_VERSION }}-linux-amd64.tar.gz
|
||||
wadm-${{ env.RELEASE_VERSION }}-macos-aarch64.tar.gz
|
||||
|
|
@ -161,65 +142,92 @@ jobs:
|
|||
wadm-${{ env.RELEASE_VERSION }}-windows-amd64.tar.gz
|
||||
|
||||
crate:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/types-v') || startsWith(github.ref, 'refs/tags/client-v') }}
|
||||
name: Publish crate
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: build
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Cargo login
|
||||
run: cargo login ${{ secrets.CRATES_TOKEN }}
|
||||
shell: bash
|
||||
|
||||
- name: Cargo publish
|
||||
run: cargo publish
|
||||
shell: bash
|
||||
- name: Cargo login
|
||||
run: |
|
||||
cargo login ${{ secrets.CRATES_TOKEN }}
|
||||
|
||||
- name: Cargo publish wadm-types
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/types-v') }}
|
||||
working-directory: ./crates/wadm-types
|
||||
run: |
|
||||
cargo publish
|
||||
|
||||
- name: Cargo publish wadm lib
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
working-directory: ./crates/wadm
|
||||
run: |
|
||||
cargo publish
|
||||
|
||||
- name: Cargo publish wadm-client
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/client-v') }}
|
||||
working-directory: ./crates/wadm-client
|
||||
run: |
|
||||
cargo publish
|
||||
|
||||
docker-image:
|
||||
name: Build and push docker images
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
env:
|
||||
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
path: ./artifacts
|
||||
- run: mv ./artifacts/wadm ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64 && chmod +x ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
pattern: '*linux*'
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
path: ./artifacts
|
||||
- run: mv ./artifacts/wadm ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64 && chmod +x ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
- name: Prepare container artifacts
|
||||
working-directory: ./artifacts
|
||||
run: |
|
||||
for dir in */; do
|
||||
name="${dir%/}"
|
||||
mv "${name}/wadm" wadm
|
||||
chmod +x wadm
|
||||
rmdir "${name}"
|
||||
mv wadm "${name}"
|
||||
done
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.WADM_GITHUB_TOKEN }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: lowercase repository owner
|
||||
run: |
|
||||
echo "OWNER=${GITHUB_REPOSITORY_OWNER,,}" >>$GITHUB_ENV
|
||||
|
||||
- name: Set the formatted release version for the docker tag
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
echo "RELEASE_VERSION_DOCKER_TAG=${RELEASE_VERSION#v}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push (tag)
|
||||
uses: docker/build-push-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
|
@ -227,11 +235,30 @@ jobs:
|
|||
build-args: |
|
||||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: ghcr.io/${{ env.OWNER }}/wadm:latest,ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}
|
||||
tags: |
|
||||
ghcr.io/${{ env.OWNER }}/wadm:latest
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }},
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}
|
||||
|
||||
- name: Build and push wolfi (tag)
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: ./
|
||||
file: ./Dockerfile.wolfi
|
||||
build-args: |
|
||||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: |
|
||||
ghcr.io/${{ env.OWNER }}/wadm:latest-wolfi
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}-wolfi
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}-wolfi
|
||||
|
||||
- name: Build and push (main)
|
||||
uses: docker/build-push-action@v3
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
|
@ -240,3 +267,16 @@ jobs:
|
|||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: ghcr.io/${{ env.OWNER }}/wadm:canary
|
||||
|
||||
- name: Build and push (main)
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: ./
|
||||
file: ./Dockerfile.wolfi
|
||||
build-args: |
|
||||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: ghcr.io/${{ env.OWNER }}/wadm:canary-wolfi
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
# This workflow uses actions that are not certified by GitHub. They are provided
|
||||
# by a third-party and are governed by separate terms of service, privacy
|
||||
# policy, and support documentation.
|
||||
|
||||
name: Scorecard supply-chain security
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '28 13 * * 3'
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecard on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
|
||||
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
|
||||
|
||||
# Public repositories:
|
||||
# - Publish results to OpenSSF REST API for easy access by consumers
|
||||
# - Allows the repository to include the Scorecard badge.
|
||||
# - See https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories:
|
||||
# - `publish_results` will always be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v3.pre.node20
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
@ -5,6 +5,9 @@ on:
|
|||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
|
|
@ -12,32 +15,55 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04]
|
||||
nats_version: [2.9.15]
|
||||
nats_version: [2.10.22]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
components: clippy, rustfmt
|
||||
|
||||
# Cache: rust
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
with:
|
||||
key: "${{ matrix.os }}-rust-cache"
|
||||
key: '${{ matrix.os }}-rust-cache'
|
||||
|
||||
- name: Install wash
|
||||
uses: wasmCloud/common-actions/install-wash@main
|
||||
- name: Check that Wadm JSON Schema is up-to-date
|
||||
shell: bash
|
||||
run: |
|
||||
cargo run --bin wadm-schema
|
||||
if [ $(git diff --exit-code > /dev/null) ]; then
|
||||
echo 'Wadm JSON Schema is out of date. Please run `cargo run --bin wadm-schema` and commit the changes.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: install wash
|
||||
uses: taiki-e/install-action@2c73a741d1544cc346e9b0af11868feba03eb69d # v2.58.9
|
||||
with:
|
||||
tool: wash@0.38.0
|
||||
|
||||
# GH Actions doesn't currently support passing args to service containers and there is no way
|
||||
# to use an environment variable to turn on jetstream for nats, so we manually start it here
|
||||
- name: Start NATS
|
||||
run: docker run --rm -d --name wadm-test -p 127.0.0.1:4222:4222 nats:${{ matrix.nats_version }} -js
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cargo build --all-features --all-targets --workspace
|
||||
|
||||
# Make sure the wadm crate works well with feature combinations
|
||||
# The above command builds the workspace and tests with no features
|
||||
- name: Check wadm crate with features
|
||||
run: |
|
||||
cargo check -p wadm --no-default-features
|
||||
cargo check -p wadm --features cli
|
||||
cargo check -p wadm --features http_admin
|
||||
cargo check -p wadm --features cli,http_admin
|
||||
|
||||
# Run all tests
|
||||
- name: Run tests
|
||||
run: |
|
||||
cargo test -- --nocapture
|
||||
cargo test --workspace -- --nocapture
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
name: wit-wasmcloud-wadm-publish
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "wit-wasmcloud-wadm-v*"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
sparse-checkout: |
|
||||
wit
|
||||
.github
|
||||
- name: Extract tag context
|
||||
id: ctx
|
||||
run: |
|
||||
version=${GITHUB_REF_NAME#wit-wasmcloud-wadm-v}
|
||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
||||
echo "tarball=wit-wasmcloud-wadm-${version}.tar.gz" >> "$GITHUB_OUTPUT"
|
||||
echo "version is ${version}"
|
||||
- uses: ./.github/actions/configure-wkg
|
||||
with:
|
||||
oci-username: ${{ github.repository_owner }}
|
||||
oci-password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: wkg wit build --wit-dir wit/wadm -o package.wasm
|
||||
- name: Push version-tagged WebAssembly binary to GHCR
|
||||
run: wkg publish package.wasm
|
||||
- name: Package tarball for release
|
||||
run: |
|
||||
mkdir -p release/wit
|
||||
cp wit/wadm/*.wit release/wit/
|
||||
tar cvzf ${{ steps.ctx.outputs.tarball }} -C release wit
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
|
||||
with:
|
||||
files: ${{ steps.ctx.outputs.tarball }}
|
||||
make_latest: "false"
|
||||
|
|
@ -1,4 +1,14 @@
|
|||
/target
|
||||
test/e2e_log/
|
||||
tests/e2e_log/
|
||||
|
||||
*.dump
|
||||
*.dump
|
||||
|
||||
# Thanks MacOS
|
||||
.DS_Store
|
||||
|
||||
# Ignore IDE specific files
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
.direnv/
|
||||
result
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
111
Cargo.toml
111
Cargo.toml
|
|
@ -1,67 +1,118 @@
|
|||
[package]
|
||||
name = "wadm"
|
||||
name = "wadm-cli"
|
||||
description = "wasmCloud Application Deployment Manager: A tool for running Wasm applications in wasmCloud"
|
||||
version = "0.5.0-rc.1"
|
||||
version.workspace = true
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
license = "Apache-2.0"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/wasmcloud/wadm"
|
||||
default-run = "wadm"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.21.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
cli = ["clap", "tracing-opentelemetry", "tracing-subscriber", "opentelemetry", "opentelemetry-otlp", "atty"]
|
||||
# internal feature for e2e tests
|
||||
_e2e_tests = []
|
||||
|
||||
[workspace]
|
||||
members = ["crates/*"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive", "cargo", "env"] }
|
||||
# One version back to avoid clashes with 0.10 of otlp
|
||||
opentelemetry = { workspace = true, features = ["rt-tokio"] }
|
||||
# 0.10 to avoid protoc dep
|
||||
opentelemetry-otlp = { workspace = true, features = [
|
||||
"http-proto",
|
||||
"reqwest-client",
|
||||
] }
|
||||
schemars = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-opentelemetry = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "json"] }
|
||||
wadm = { workspace = true, features = ["cli", "http_admin"] }
|
||||
wadm-types = { workspace = true }
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = "1"
|
||||
async-nats = "0.31"
|
||||
async-nats = "0.39"
|
||||
async-trait = "0.1"
|
||||
atty = { version = "0.2", optional = true }
|
||||
bytes = "1"
|
||||
chrono = "0.4"
|
||||
clap = { version = "4", features = ["derive", "cargo", "env"], optional = true }
|
||||
cloudevents-sdk = "0.7"
|
||||
clap = { version = "4", features = ["derive", "cargo", "env"] }
|
||||
cloudevents-sdk = "0.8"
|
||||
futures = "0.3"
|
||||
indexmap = { version = "1", features = ["serde-1"] }
|
||||
http = { version = "1", default-features = false }
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
hyper = { version = "1", default-features = false }
|
||||
hyper-util = { version = "0.1", default-features = false }
|
||||
indexmap = { version = "2", features = ["serde"] }
|
||||
jsonschema = "0.29"
|
||||
lazy_static = "1"
|
||||
nkeys = "0.3.0"
|
||||
nkeys = "0.4.5"
|
||||
# One version back to avoid clashes with 0.10 of otlp
|
||||
opentelemetry = { version = "0.17", features = ["rt-tokio"], optional = true }
|
||||
opentelemetry = { version = "0.17", features = ["rt-tokio"] }
|
||||
# 0.10 to avoid protoc dep
|
||||
opentelemetry-otlp = { version = "0.10", features = [
|
||||
"http-proto",
|
||||
"reqwest-client",
|
||||
], optional = true }
|
||||
# TODO: Actually leverage prometheus
|
||||
prometheus = { version = "0.13", optional = true }
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
] }
|
||||
rand = { version = "0.9", features = ["small_rng"] }
|
||||
# NOTE(thomastaylor312): Pinning this temporarily to 1.10 due to transitive dependency with oci
|
||||
# crates that are pinned to 1.10
|
||||
regex = "~1.10"
|
||||
schemars = "0.8"
|
||||
semver = { version = "1.0.25", features = ["serde"] }
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_yaml = "0.9"
|
||||
sha2 = "0.10.2"
|
||||
thiserror = "1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
sha2 = "0.10.9"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", default-features = false }
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-futures = "0.2"
|
||||
tracing-opentelemetry = { version = "0.17", optional = true }
|
||||
tracing-subscriber = { version = "0.3.7", features = [
|
||||
"env-filter",
|
||||
"json",
|
||||
], optional = true }
|
||||
tracing-opentelemetry = { version = "0.17" }
|
||||
tracing-subscriber = { version = "0.3.7", features = ["env-filter", "json"] }
|
||||
ulid = { version = "1", features = ["serde"] }
|
||||
utoipa = "5"
|
||||
uuid = "1"
|
||||
wasmbus-rpc = "0.14"
|
||||
wasmcloud-control-interface = "0.28.1"
|
||||
semver = { version = "1.0.16", features = ["serde"] }
|
||||
regex = "1.9.3"
|
||||
base64 = "0.21.2"
|
||||
wadm = { version = "0.21", path = "./crates/wadm" }
|
||||
wadm-client = { version = "0.10", path = "./crates/wadm-client" }
|
||||
wadm-types = { version = "0.8", path = "./crates/wadm-types" }
|
||||
wasmcloud-control-interface = "2.4.0"
|
||||
wasmcloud-secrets-types = "0.5.0"
|
||||
wit-bindgen-wrpc = { version = "0.9", default-features = false }
|
||||
wit-bindgen = { version = "0.36.0", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "1"
|
||||
async-nats = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
serial_test = "3"
|
||||
wadm-client = { workspace = true }
|
||||
wadm-types = { workspace = true }
|
||||
wasmcloud-control-interface = { workspace = true }
|
||||
testcontainers = "0.25"
|
||||
|
||||
[build-dependencies]
|
||||
schemars = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
wadm-types = { workspace = true }
|
||||
|
||||
[[bin]]
|
||||
name = "wadm"
|
||||
path = "bin/main.rs"
|
||||
required-features = ["cli"]
|
||||
path = "src/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "wadm-schema"
|
||||
path = "src/schema.rs"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
FROM cgr.dev/chainguard/wolfi-base:latest AS base
|
||||
|
||||
FROM base AS base-amd64
|
||||
ARG BIN_AMD64
|
||||
ARG BIN=$BIN_AMD64
|
||||
|
||||
FROM base AS base-arm64
|
||||
ARG BIN_ARM64
|
||||
ARG BIN=$BIN_ARM64
|
||||
|
||||
FROM base-$TARGETARCH
|
||||
|
||||
# Copy application binary from disk
|
||||
COPY ${BIN} /usr/local/bin/wadm
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["/usr/local/bin/wadm"]
|
||||
16
EVENTS.md
16
EVENTS.md
|
|
@ -1,16 +0,0 @@
|
|||
# wasmCloud Application Deployment Manager - Events
|
||||
**wadm** emits all events on the `wadm.evt` subject in the form of [CloudEvents]().
|
||||
|
||||
The following is a list of the events emitted by wadm and the field names of the payload
|
||||
carried within the cloud event's `data` field (stored as JSON). Each of the following events are in the `com.wasmcloud.wadm` namespace, so the event type for `model_version_created` is actually `com.wasmcloud.wadm.model_version_created`
|
||||
|
||||
| Event Type | Fields | Description |
|
||||
| --- | --- | --- |
|
||||
| `model_version_created` | name, version, lattice_id | Indicates that a new version of a model has been stored |
|
||||
| `model_version_deleted` | name, version, lattice_id | Indicates that a specific model version has been deleted |
|
||||
| `model_deployed` | name, version, lattice_id | Indicates that a deployment monitor process has started (and nothing more) |
|
||||
| `model_undeployed` | name, version, lattice_id | Indicates that a deployment monitor process has been stopped (and nothing more) |
|
||||
| `deployment_state_changed` | name, version, lattice_id, state | Indicates that a deployment monitor has changed state |
|
||||
| `control_action_taken` | name, version, lattice_id, action_type, params(map) | Indicates that a deployment monitor has taken corrective action as a result of reconciliation |
|
||||
| `control_action_failed` | name, version, lattice_id, action_type, message, params(map) | Indicates a failure to submit corrective action to a lattice control API |
|
||||
| `reconciliation_error_occurred` | name, version, lattice_id, message, params(map) | Indicates that required corrective action as a result of reconciliation cannot be performed (e.g. insufficient resources) |
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# MAINTAINERS
|
||||
|
||||
The following individuals are responsible for reviewing code, managing issues, and ensuring the overall quality of `wadm`.
|
||||
|
||||
## @wasmCloud/wadm-maintainers
|
||||
|
||||
Name: Joonas Bergius
|
||||
GitHub: @joonas
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Dan Norris
|
||||
GitHub: @protochron
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Taylor Thomas
|
||||
GitHub: @thomastaylor312
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Ahmed Tadde
|
||||
GitHub: @ahmedtadde
|
||||
Organization: PreciseTarget
|
||||
|
||||
Name: Brooks Townsend
|
||||
GitHub: @brooksmtownsend
|
||||
Organization: Cosmonic
|
||||
23
Makefile
23
Makefile
|
|
@ -7,6 +7,13 @@ MAKEFLAGS += --no-builtin-rules
|
|||
MAKEFLAGS += --no-print-directory
|
||||
MAKEFLAGS += -S
|
||||
|
||||
OS_NAME := $(shell uname -s | tr '[:upper:]' '[:lower:]')
|
||||
ifeq ($(OS_NAME),darwin)
|
||||
NC_FLAGS := -czt
|
||||
else
|
||||
NC_FLAGS := -Czt
|
||||
endif
|
||||
|
||||
.DEFAULT: help
|
||||
|
||||
CARGO ?= cargo
|
||||
|
|
@ -44,7 +51,7 @@ lint: check-cargo-clippy ## Run code lint
|
|||
$(CARGO) clippy --all-features --all-targets --workspace
|
||||
|
||||
build: ## Build wadm
|
||||
$(CARGO) build --bin wadm --features cli
|
||||
$(CARGO) build --bin wadm
|
||||
|
||||
build-docker: ## Build wadm docker image
|
||||
ifndef BIN_AMD64
|
||||
|
|
@ -70,8 +77,8 @@ build-docker: ## Build wadm docker image
|
|||
CARGO_TEST_TARGET ?=
|
||||
|
||||
test:: ## Run tests
|
||||
ifeq ($(shell nc -czt -w1 127.0.0.1 4222 || echo fail),fail)
|
||||
$(DOCKER) run --rm -d --name wadm-test -p 127.0.0.1:4222:4222 nats:2.9 -js
|
||||
ifeq ($(shell nc $(NC_FLAGS) -w1 127.0.0.1 4222 || echo fail),fail)
|
||||
$(DOCKER) run --rm -d --name wadm-test -p 127.0.0.1:4222:4222 nats:2.10 -js
|
||||
$(CARGO) test $(CARGO_TEST_TARGET) -- --nocapture
|
||||
$(DOCKER) stop wadm-test
|
||||
else
|
||||
|
|
@ -79,9 +86,10 @@ else
|
|||
endif
|
||||
|
||||
test-e2e:: ## Run e2e tests
|
||||
ifeq ($(shell nc -czt -w1 127.0.0.1 4222 || echo fail),fail)
|
||||
ifeq ($(shell nc $(NC_FLAGS) -w1 127.0.0.1 4222 || echo fail),fail)
|
||||
@$(MAKE) build
|
||||
RUST_BACKTRACE=1 $(CARGO) test --test e2e_multitenant --features _e2e_tests -- --nocapture
|
||||
@# Reenable this once we've enabled all tests
|
||||
@# RUST_BACKTRACE=1 $(CARGO) test --test e2e_multitenant --features _e2e_tests -- --nocapture
|
||||
RUST_BACKTRACE=1 $(CARGO) test --test e2e_multiple_hosts --features _e2e_tests -- --nocapture
|
||||
RUST_BACKTRACE=1 $(CARGO) test --test e2e_upgrades --features _e2e_tests -- --nocapture
|
||||
else
|
||||
|
|
@ -90,7 +98,7 @@ else
|
|||
endif
|
||||
|
||||
test-individual-e2e:: ## Runs an individual e2e test based on the WADM_E2E_TEST env var
|
||||
ifeq ($(shell nc -czt -w1 127.0.0.1 4222 || echo fail),fail)
|
||||
ifeq ($(shell nc $(NC_FLAGS) -w1 127.0.0.1 4222 || echo fail),fail)
|
||||
@$(MAKE) build
|
||||
RUST_BACKTRACE=1 $(CARGO) test --test $(WADM_E2E_TEST) --features _e2e_tests -- --nocapture
|
||||
else
|
||||
|
|
@ -105,9 +113,8 @@ endif
|
|||
stream-cleanup: ## Removes all streams that wadm creates
|
||||
-$(NATS) stream del wadm_commands --force
|
||||
-$(NATS) stream del wadm_events --force
|
||||
-$(NATS) stream del wadm_event_consumer --force
|
||||
-$(NATS) stream del wadm_notify --force
|
||||
-$(NATS) stream del wadm_mirror --force
|
||||
-$(NATS) stream del wadm_multitenant_mirror --force
|
||||
-$(NATS) stream del wadm_status --force
|
||||
-$(NATS) stream del KV_wadm_state --force
|
||||
-$(NATS) stream del KV_wadm_manifests --force
|
||||
|
|
|
|||
166
README.md
166
README.md
|
|
@ -1,11 +1,19 @@
|
|||
<img align="right" src="./wadm.png" alt="wadm logo" style="width: 200px" />
|
||||
<img align="right" src="./static/images/wadm_128.png" alt="wadm logo" />
|
||||
|
||||
# wasmCloud Application Deployment Manager (wadm)
|
||||
|
||||
The wasmCloud Application Deployment Manager (**wadm**) enables declarative wasmCloud applications.
|
||||
It's responsible for managing a set of application deployment specifications, monitoring the current
|
||||
state of an entire [lattice](https://wasmcloud.com/docs/reference/lattice/), and issuing the
|
||||
appropriate lattice control commands required to close the gap between observed and desired state.
|
||||
Wadm is a Wasm-native orchestrator for managing and scaling declarative wasmCloud applications.
|
||||
|
||||
## Responsibilities
|
||||
|
||||
**wadm** is powerful because it focuses on a small set of core responsibilities, making it efficient and easy to manage.
|
||||
|
||||
- **Manage application specifications** - Manage applications which represent _desired state_. This includes
|
||||
the creation, deletion, upgrades and rollback of applications to previous versions. Application
|
||||
specifications are defined using the [Open Application Model](https://oam.dev/). For more
|
||||
information on wadm's specific OAM features, see our [OAM README](./oam/README.md).
|
||||
- **Observe state** - Monitor wasmCloud [CloudEvents](https://wasmcloud.com/docs/reference/cloud-event-list) from all hosts in a [lattice](https://wasmcloud.com/docs/deployment/lattice/) to build the current state.
|
||||
- **Reconcile with compensating commands** - When the current state doesn't match the desired state, issue commands to wasmCloud hosts in the lattice with the [control interface](https://wasmcloud.com/docs/hosts/lattice-protocols/control-interface) to reach desired state. Wadm is constantly reconciling and will react immediately to ensure applications stay deployed. For example, if a host stops, wadm will reconcile the `host_stopped` event and issue any necessary commands to start components on other available hosts.
|
||||
|
||||
## Using wadm
|
||||
|
||||
|
|
@ -13,16 +21,17 @@ appropriate lattice control commands required to close the gap between observed
|
|||
|
||||
### Install & Run
|
||||
|
||||
You can easily run **wadm** by downloading [wash](https://wasmcloud.com/docs/installation) and launching it alongside NATS and wasmCloud. Then, you can use the `wash app` command to query, create, and deploy applications.
|
||||
You can easily run **wadm** by downloading the [`wash`](https://wasmcloud.com/docs/installation) CLI, which automatically launches wadm alongside NATS and a wasmCloud host when you run `wash up`. You can use `wash` to query, create, and deploy applications.
|
||||
|
||||
```
|
||||
```bash
|
||||
wash up -d # Start NATS, wasmCloud, and wadm in the background
|
||||
wash app list # Query the list of applications
|
||||
```
|
||||
|
||||
Follow the [wasmCloud quickstart](https://wasmcloud.com/docs/tour/hello-world) to get started building and deploying an application, or follow the **Deploying an application** example below to simply try a deploy.
|
||||
|
||||
If you prefer to run **wadm** separately and/or connect to running wasmCloud hosts, you can instead opt for using the latest GitHub release artifact and executing the binary. Simply replace the latest version, your operating system, and architecture below. Please note that wadm requires a wasmCloud host version >=0.63.0
|
||||
|
||||
```
|
||||
```bash
|
||||
# Install wadm
|
||||
curl -fLO https://github.com/wasmCloud/wadm/releases/download/<version>/wadm-<version>-<os>-<arch>.tar.gz
|
||||
tar -xvf wadm-<version>-<os>-<arch>.tar.gz
|
||||
|
|
@ -32,155 +41,112 @@ cd wadm-<version>-<os>-<arch>
|
|||
|
||||
### Deploying an application
|
||||
|
||||
Take the following manifest and save it locally (you can also download this from
|
||||
[echo.yaml](./oam/echo.yaml)):
|
||||
Copy the following manifest and save it locally as `hello.yaml` (you can also find it in the `oam`
|
||||
[directory](./oam/hello.yaml)):
|
||||
|
||||
```yaml
|
||||
# Metadata
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: echo
|
||||
name: hello-world
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: "This is my app"
|
||||
description: 'HTTP hello world demo'
|
||||
spec:
|
||||
components:
|
||||
- name: echo
|
||||
type: actor
|
||||
- name: http-component
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/echo:0.3.7
|
||||
# Run components from OCI registries as below or from a local .wasm component binary.
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
# One replica of this component will run
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 1
|
||||
- type: linkdef
|
||||
properties:
|
||||
target: httpserver
|
||||
values:
|
||||
address: 0.0.0.0:8080
|
||||
|
||||
instances: 1
|
||||
# The httpserver capability provider, started from the official wasmCloud OCI artifact
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
contract: wasmcloud:httpserver
|
||||
image: wasmcloud.azurecr.io/httpserver:0.17.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.22.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
# Link the HTTP server and set it to listen on the local machine's port 8080
|
||||
- type: link
|
||||
properties:
|
||||
replicas: 1
|
||||
target: http-component
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source:
|
||||
config:
|
||||
- name: default-http
|
||||
properties:
|
||||
ADDRESS: 127.0.0.1:8080
|
||||
```
|
||||
|
||||
Then, use **wadm** to put the manifest and deploy it.
|
||||
Then use `wash` to deploy the manifest:
|
||||
|
||||
```
|
||||
wash app put ./echo.yaml
|
||||
wash app deploy echo
|
||||
```bash
|
||||
wash app deploy hello.yaml
|
||||
```
|
||||
|
||||
🎉 You've just launched your first application with **wadm**! Try `curl localhost:8080/wadm` and see
|
||||
the response from the [echo](https://github.com/wasmCloud/examples/tree/main/actor/echo) WebAssembly
|
||||
module.
|
||||
🎉 You've just launched your first application with **wadm**! Try `curl localhost:8080`.
|
||||
|
||||
When you're done, you can use **wadm** to undeploy the application.
|
||||
When you're done, you can use `wash` to undeploy the application:
|
||||
|
||||
```
|
||||
wash app undeploy echo
|
||||
```bash
|
||||
wash app undeploy hello-world
|
||||
```
|
||||
|
||||
### Modifying applications
|
||||
|
||||
**wadm** supports upgrading applications by `put`ting new versions of manifests and then `deploy`ing
|
||||
them. Try changing the manifest you created above by updating the number of echo replicas.
|
||||
**wadm** supports upgrading applications by deploying new versions of manifests. Try changing the manifest you created above by updating the number of instances.
|
||||
|
||||
```yaml
|
||||
<<ELIDED>>
|
||||
name: echo
|
||||
metadata:
|
||||
name: hello-world
|
||||
annotations:
|
||||
version: v0.0.2 # Note the changed version
|
||||
description: "wasmCloud echo Example"
|
||||
description: 'HTTP hello world demo'
|
||||
spec:
|
||||
components:
|
||||
- name: echo
|
||||
type: actor
|
||||
- name: http-component
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/echo:0.3.5
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 10 # Let's run 10!
|
||||
instances: 10 # Let's have 10!
|
||||
<<ELIDED>>
|
||||
```
|
||||
|
||||
Then, simply deploy the new version:
|
||||
Then simply deploy the new manifest:
|
||||
|
||||
```
|
||||
wash app put ./echo.yaml
|
||||
wash app deploy echo v0.0.2
|
||||
```bash
|
||||
wash app deploy hello.yaml
|
||||
```
|
||||
|
||||
If you navigate to the [wasmCloud dashboard](http://localhost:4000/), you'll see that you now have
|
||||
10 instances of the echo actor.
|
||||
|
||||
_Documentation for configuring the spreadscaler to spread actors and providers across multiple hosts
|
||||
in a lattice is forthcoming._
|
||||
|
||||
## Responsibilities
|
||||
|
||||
**wadm** has a very small set of responsibilities, which actually contributes to its power.
|
||||
|
||||
- **Manage Application Specifications** - Manage models consisting of _desired state_. This includes
|
||||
the creation and deletion and _rollback_ of models to previous versions. Application
|
||||
specifications are defined using the [Open Application Model](https://oam.dev/). For more
|
||||
information on wadm's specific OAM features, see our [OAM README](./oam/README.md).
|
||||
- **Observe State** - Monitor wasmCloud [CloudEvents](https://cloudevents.io/) from all hosts in a
|
||||
lattice to build the current state.
|
||||
- **Take Compensating Actions** - When indicated, issue commands to the [lattice control
|
||||
interface](https://github.com/wasmCloud/interfaces/tree/main/lattice-control) to bring about the
|
||||
changes necessary to make the desired and observed state match.
|
||||
Now wasmCloud is configured to automatically scale your component to 10 instances based on incoming load.
|
||||
|
||||
## 🚧 Advanced
|
||||
|
||||
You can find a Docker Compose file for deploying an end-to-end multi-tenant example in the [test](https://github.com/wasmCloud/wadm/blob/main/tests/docker-compose-e2e-multitenant.yaml) directory.
|
||||
|
||||
In advanced use cases, **wadm** is also capable of:
|
||||
|
||||
- Monitoring multiple lattices.
|
||||
- Running multiple replicas to distribute load among multiple processes, or for a high-availability
|
||||
- Running multiple instances to distribute load among multiple processes, or for a high-availability
|
||||
architecture.
|
||||
|
||||
🚧 The above functionality is somewhat tested, but not as rigorously as a single instance monitoring
|
||||
🚧 Multi-lattice and multi-process functionality is somewhat tested, but not as rigorously as a single instance monitoring
|
||||
a single lattice. Proceed with caution while we do further testing.
|
||||
|
||||
### API
|
||||
|
||||
Interacting with **wadm** is done over NATS on the root topic `wadm.api.{prefix}` where `prefix` is
|
||||
the lattice namespace prefix. For more information on this API, please consult the [wadm
|
||||
Reference](https://wasmcloud.dev/reference/wadm).
|
||||
|
||||
## Known Issues/Missing functionality
|
||||
|
||||
As this is a new project there are some things we know are missing or buggy. A non-exhaustive list
|
||||
of these can be found below:
|
||||
|
||||
- It is _technically_ possible as things stand right now for a race condition with manifests when a
|
||||
manifest is updated/created and deleted simultaneously. In this case, one of the operations will
|
||||
win and you will end up with a manifest that still exists after you delete it or a manifest that
|
||||
does not exist after you create it. This is a very unlikely scenario as only one person or process
|
||||
is interacting with a specific, but it is possible. If this becomes a problem for you, please let
|
||||
us know and we will consider additional ways of how we can address it.
|
||||
- Manifest validation is not yet implemented. Right now wadm will accept the manifest blindly so
|
||||
long as it can parse it. It will not validate that the model name is valid or if you specified
|
||||
entirely invalid properties. This will be added in a future version.
|
||||
- Nondestructive (e.g. orphaning resources) undeploys are not currently implemented. You can set the
|
||||
field in the request, but it won't do anything
|
||||
- If wadm discovers a provider in the lattice that isn't already started via a manifest, the
|
||||
manifest won't be able to reconcile until another start provider event is received. This will
|
||||
require a feature add to the ctl client to fix
|
||||
- All manifests belonging to a lattice must be using the same version of actors and providers. This
|
||||
is an important limitation of the host RPC protocol. We are open to ideas about how to best handle
|
||||
manifests with different actor/provider versions
|
||||
- When running multiple wadm processes, you can still get a little bit of jitter with starting
|
||||
actors and providers (in some cases). This will always resolve after a few ticks and isn't a huge
|
||||
problem as actors are "cheap" from a compute standpoint. If anyone would like to submit a PR to
|
||||
make this better, we'd love to see it!
|
||||
Reference](https://wasmcloud.com/docs/ecosystem/wadm/).
|
||||
|
||||
## References
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
# Reporting a security issue
|
||||
|
||||
Please refer to the [wasmCloud Security Process and Policy](https://github.com/wasmCloud/wasmCloud/blob/main/SECURITY.md) for details on how to report security issues and vulnerabilities.
|
||||
413
bin/main.rs
413
bin/main.rs
|
|
@ -1,413 +0,0 @@
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_nats::jetstream::{stream::Stream, Context};
|
||||
use clap::Parser;
|
||||
use tokio::sync::Semaphore;
|
||||
use tracing::log::debug;
|
||||
|
||||
use wadm::{
|
||||
consumers::{
|
||||
manager::{ConsumerManager, WorkerCreator},
|
||||
*,
|
||||
},
|
||||
mirror::Mirror,
|
||||
nats_utils::LatticeIdParser,
|
||||
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
|
||||
server::{ManifestNotifier, Server, DEFAULT_WADM_TOPIC_PREFIX},
|
||||
storage::{nats_kv::NatsKvStore, reaper::Reaper},
|
||||
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
|
||||
DEFAULT_COMMANDS_TOPIC, DEFAULT_EVENTS_TOPIC, DEFAULT_MULTITENANT_EVENTS_TOPIC,
|
||||
DEFAULT_STATUS_TOPIC, DEFAULT_WADM_EVENTS_TOPIC,
|
||||
};
|
||||
|
||||
mod connections;
|
||||
mod logging;
|
||||
mod nats;
|
||||
mod observer;
|
||||
|
||||
use connections::{ControlClientConfig, ControlClientConstructor};
|
||||
|
||||
const EVENT_STREAM_NAME: &str = "wadm_events";
|
||||
const COMMAND_STREAM_NAME: &str = "wadm_commands";
|
||||
const STATUS_STREAM_NAME: &str = "wadm_status";
|
||||
const MIRROR_STREAM_NAME: &str = "wadm_mirror";
|
||||
const MULTITENANT_MIRROR_STREAM_NAME: &str = "wadm_multitenant_mirror";
|
||||
const NOTIFY_STREAM_NAME: &str = "wadm_notify";
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None)]
|
||||
struct Args {
|
||||
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
|
||||
/// to help with debugging when identifying which process is doing the work
|
||||
#[arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")]
|
||||
host_id: Option<String>,
|
||||
|
||||
/// Whether or not to use structured log output (as JSON)
|
||||
#[arg(
|
||||
short = 'l',
|
||||
long = "structured-logging",
|
||||
default_value = "false",
|
||||
env = "WADM_STRUCTURED_LOGGING"
|
||||
)]
|
||||
structured_logging: bool,
|
||||
|
||||
/// Whether or not to enable opentelemetry tracing
|
||||
#[arg(
|
||||
short = 't',
|
||||
long = "tracing",
|
||||
default_value = "false",
|
||||
env = "WADM_TRACING_ENABLED"
|
||||
)]
|
||||
tracing_enabled: bool,
|
||||
|
||||
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
|
||||
/// to false. Defaults to http://localhost:55681/v1/traces if not set and tracing is enabled
|
||||
#[arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")]
|
||||
tracing_endpoint: Option<String>,
|
||||
|
||||
/// The NATS JetStream domain to connect to
|
||||
#[arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN")]
|
||||
domain: Option<String>,
|
||||
|
||||
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
|
||||
/// careful how you use this as it can affect performance
|
||||
#[arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")]
|
||||
max_jobs: Option<usize>,
|
||||
|
||||
/// The URL of the nats server you want to connect to
|
||||
#[arg(
|
||||
short = 's',
|
||||
long = "nats-server",
|
||||
env = "WADM_NATS_SERVER",
|
||||
default_value = "127.0.0.1:4222"
|
||||
)]
|
||||
nats_server: String,
|
||||
|
||||
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
|
||||
#[arg(
|
||||
long = "nats-seed",
|
||||
env = "WADM_NATS_NKEY",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_jwt"
|
||||
)]
|
||||
nats_seed: Option<String>,
|
||||
|
||||
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
|
||||
#[arg(
|
||||
long = "nats-jwt",
|
||||
env = "WADM_NATS_JWT",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_seed"
|
||||
)]
|
||||
nats_jwt: Option<String>,
|
||||
|
||||
/// (Optional) NATS credential file to use when authenticating
|
||||
#[arg(
|
||||
long = "nats-creds-file",
|
||||
env = "WADM_NATS_CREDS_FILE",
|
||||
conflicts_with_all = ["nats_seed", "nats_jwt"],
|
||||
)]
|
||||
nats_creds: Option<PathBuf>,
|
||||
|
||||
/// Name of the bucket used for storage of lattice state
|
||||
#[arg(
|
||||
long = "state-bucket-name",
|
||||
env = "WADM_STATE_BUCKET_NAME",
|
||||
default_value = "wadm_state"
|
||||
)]
|
||||
state_bucket: String,
|
||||
|
||||
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
|
||||
/// store. By default, this is 120s because it is 4x the host heartbeat interval
|
||||
#[arg(
|
||||
long = "cleanup-interval",
|
||||
env = "WADM_CLEANUP_INTERVAL",
|
||||
default_value = "120"
|
||||
)]
|
||||
cleanup_interval: u64,
|
||||
|
||||
/// The API topic prefix to use. This is an advanced setting that should only be used if you
|
||||
/// know what you are doing
|
||||
#[arg(
|
||||
long = "api-prefix",
|
||||
env = "WADM_API_PREFIX",
|
||||
default_value = DEFAULT_WADM_TOPIC_PREFIX
|
||||
)]
|
||||
api_prefix: String,
|
||||
|
||||
/// Name of the bucket used for storage of manifests
|
||||
#[arg(
|
||||
long = "manifest-bucket-name",
|
||||
env = "WADM_MANIFEST_BUCKET_NAME",
|
||||
default_value = "wadm_manifests"
|
||||
)]
|
||||
manifest_bucket: String,
|
||||
|
||||
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
|
||||
/// account traffic and not simple cases where all lattices use credentials from the same
|
||||
/// account. See the deployment guide for more information
|
||||
#[arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)]
|
||||
multitenant: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
logging::configure_tracing(
|
||||
args.structured_logging,
|
||||
args.tracing_enabled,
|
||||
args.tracing_endpoint,
|
||||
);
|
||||
|
||||
// Build storage adapter for lattice state (on by default)
|
||||
let (client, context) = nats::get_client_and_context(
|
||||
args.nats_server.clone(),
|
||||
args.domain.clone(),
|
||||
args.nats_seed.clone(),
|
||||
args.nats_jwt.clone(),
|
||||
args.nats_creds.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
|
||||
let connection_pool = ControlClientConstructor::new(
|
||||
client.clone(),
|
||||
ControlClientConfig {
|
||||
js_domain: args.domain,
|
||||
topic_prefix: None,
|
||||
},
|
||||
);
|
||||
|
||||
let trimmer: &[_] = &['.', '>', '*'];
|
||||
|
||||
let store = nats::ensure_kv_bucket(&context, args.state_bucket, 1).await?;
|
||||
|
||||
let state_storage = NatsKvStore::new(store);
|
||||
|
||||
let manifest_storage = nats::ensure_kv_bucket(&context, args.manifest_bucket, 1).await?;
|
||||
|
||||
debug!("Ensuring event stream");
|
||||
|
||||
let event_stream = nats::ensure_stream(
|
||||
&context,
|
||||
EVENT_STREAM_NAME.to_owned(),
|
||||
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wasmbus.evt topics in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring command stream");
|
||||
|
||||
let command_stream = nats::ensure_stream(
|
||||
&context,
|
||||
COMMAND_STREAM_NAME.to_owned(),
|
||||
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
|
||||
Some("A stream that stores all commands for wadm".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let status_stream = nats::ensure_status_stream(
|
||||
&context,
|
||||
STATUS_STREAM_NAME.to_owned(),
|
||||
vec![DEFAULT_STATUS_TOPIC.to_owned()],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (event_stream_topics, mirror_stream) = if args.multitenant {
|
||||
debug!("Running in multitenant mode");
|
||||
(
|
||||
vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
|
||||
MULTITENANT_MIRROR_STREAM_NAME,
|
||||
)
|
||||
} else {
|
||||
(vec![DEFAULT_EVENTS_TOPIC.to_owned()], MIRROR_STREAM_NAME)
|
||||
};
|
||||
|
||||
debug!("Ensuring mirror stream");
|
||||
|
||||
let mirror_stream = nats::ensure_stream(
|
||||
&context,
|
||||
mirror_stream.to_owned(),
|
||||
event_stream_topics.clone(),
|
||||
Some("A stream that publishes all events to the same stream".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring notify stream");
|
||||
|
||||
let notify_stream = nats::ensure_notify_stream(
|
||||
&context,
|
||||
NOTIFY_STREAM_NAME.to_owned(),
|
||||
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Creating event consumer manager");
|
||||
|
||||
let permit_pool = Arc::new(Semaphore::new(
|
||||
args.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
|
||||
));
|
||||
let event_worker_creator = EventWorkerCreator {
|
||||
state_store: state_storage.clone(),
|
||||
manifest_store: manifest_storage.clone(),
|
||||
pool: connection_pool.clone(),
|
||||
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
|
||||
publisher: context.clone(),
|
||||
notify_stream,
|
||||
};
|
||||
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
event_stream,
|
||||
event_worker_creator.clone(),
|
||||
args.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
debug!("Creating command consumer manager");
|
||||
|
||||
let command_worker_creator = CommandWorkerCreator {
|
||||
pool: connection_pool,
|
||||
};
|
||||
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
command_stream,
|
||||
command_worker_creator.clone(),
|
||||
args.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
|
||||
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
|
||||
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
|
||||
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
|
||||
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
|
||||
let reaper = Reaper::new(
|
||||
state_storage.clone(),
|
||||
Duration::from_secs(args.cleanup_interval / 2),
|
||||
[],
|
||||
);
|
||||
|
||||
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
|
||||
|
||||
debug!("Creating lattice observer");
|
||||
|
||||
let observer = observer::Observer {
|
||||
parser: LatticeIdParser::new("wasmbus", args.multitenant),
|
||||
command_manager: commands_manager,
|
||||
event_manager: events_manager,
|
||||
mirror: Mirror::new(mirror_stream, wadm_event_prefix),
|
||||
reaper,
|
||||
client: client.clone(),
|
||||
command_worker_creator,
|
||||
event_worker_creator,
|
||||
};
|
||||
|
||||
debug!("Subscribing to API topic");
|
||||
|
||||
let server = Server::new(
|
||||
manifest_storage,
|
||||
client,
|
||||
Some(&args.api_prefix),
|
||||
args.multitenant,
|
||||
status_stream,
|
||||
ManifestNotifier::new(wadm_event_prefix, context),
|
||||
)
|
||||
.await?;
|
||||
tokio::select! {
|
||||
res = server.serve() => {
|
||||
res?
|
||||
}
|
||||
res = observer.observe(event_stream_topics) => {
|
||||
res?
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CommandWorkerCreator {
|
||||
pool: ControlClientConstructor,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WorkerCreator for CommandWorkerCreator {
|
||||
type Output = CommandWorker;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
self.pool
|
||||
.get_connection(lattice_id, multitenant_prefix)
|
||||
.await
|
||||
.map(CommandWorker::new)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct EventWorkerCreator<StateStore> {
|
||||
state_store: StateStore,
|
||||
manifest_store: async_nats::jetstream::kv::Store,
|
||||
pool: ControlClientConstructor,
|
||||
command_topic_prefix: String,
|
||||
publisher: Context,
|
||||
notify_stream: Stream,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
|
||||
where
|
||||
StateStore: wadm::storage::Store + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
match self
|
||||
.pool
|
||||
.get_connection(lattice_id, multitenant_prefix)
|
||||
.await
|
||||
{
|
||||
Ok(client) => {
|
||||
let command_publisher = CommandPublisher::new(
|
||||
self.publisher.clone(),
|
||||
&format!("{}.{lattice_id}", self.command_topic_prefix),
|
||||
);
|
||||
let status_publisher = StatusPublisher::new(
|
||||
self.publisher.clone(),
|
||||
&format!("wadm.status.{lattice_id}"),
|
||||
);
|
||||
let manager = ScalerManager::new(
|
||||
self.publisher.clone(),
|
||||
self.notify_stream.clone(),
|
||||
lattice_id,
|
||||
multitenant_prefix,
|
||||
self.state_store.clone(),
|
||||
self.manifest_store.clone(),
|
||||
command_publisher.clone(),
|
||||
client.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok(EventWorker::new(
|
||||
self.state_store.clone(),
|
||||
client,
|
||||
command_publisher,
|
||||
status_publisher,
|
||||
manager,
|
||||
))
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
224
bin/nats.rs
224
bin/nats.rs
|
|
@ -1,224 +0,0 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_nats::{
|
||||
jetstream::{
|
||||
self,
|
||||
kv::{Config as KvConfig, Store},
|
||||
stream::{Config as StreamConfig, Stream},
|
||||
Context,
|
||||
},
|
||||
Client, ConnectOptions,
|
||||
};
|
||||
|
||||
use wadm::DEFAULT_EXPIRY_TIME;
|
||||
|
||||
/// Creates a NATS client from the given options
|
||||
pub async fn get_client_and_context(
|
||||
url: String,
|
||||
js_domain: Option<String>,
|
||||
seed: Option<String>,
|
||||
jwt: Option<String>,
|
||||
creds_path: Option<PathBuf>,
|
||||
) -> Result<(Client, Context)> {
|
||||
let client = if seed.is_none() && jwt.is_none() && creds_path.is_none() {
|
||||
async_nats::connect(url).await?
|
||||
} else {
|
||||
let opts = build_nats_options(seed, jwt, creds_path).await?;
|
||||
async_nats::connect_with_options(url, opts).await?
|
||||
};
|
||||
|
||||
let context = if let Some(domain) = js_domain {
|
||||
jetstream::with_domain(client.clone(), domain)
|
||||
} else {
|
||||
jetstream::new(client.clone())
|
||||
};
|
||||
|
||||
Ok((client, context))
|
||||
}
|
||||
|
||||
async fn build_nats_options(
|
||||
seed: Option<String>,
|
||||
jwt: Option<String>,
|
||||
creds_path: Option<PathBuf>,
|
||||
) -> Result<ConnectOptions> {
|
||||
match (seed, jwt, creds_path) {
|
||||
(Some(seed), Some(jwt), None) => {
|
||||
let jwt = resolve_jwt(jwt).await?;
|
||||
let kp = std::sync::Arc::new(get_seed(seed).await?);
|
||||
|
||||
Ok(async_nats::ConnectOptions::with_jwt(jwt, move |nonce| {
|
||||
let key_pair = kp.clone();
|
||||
async move { key_pair.sign(&nonce).map_err(async_nats::AuthError::new) }
|
||||
}))
|
||||
}
|
||||
(None, None, Some(creds)) => async_nats::ConnectOptions::with_credentials_file(creds)
|
||||
.await
|
||||
.map_err(anyhow::Error::from),
|
||||
_ => {
|
||||
// We shouldn't ever get here due to the requirements on the flags, but return a helpful error just in case
|
||||
Err(anyhow::anyhow!(
|
||||
"Got too many options. Make sure to provide a seed and jwt or a creds path"
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes a string that could be a raw seed, or a path and does all the necessary loading and parsing steps
|
||||
async fn get_seed(seed: String) -> Result<nkeys::KeyPair> {
|
||||
// MAGIC NUMBER: Length of a seed key
|
||||
let raw_seed = if seed.len() == 58 && seed.starts_with('S') {
|
||||
seed
|
||||
} else {
|
||||
tokio::fs::read_to_string(seed).await?
|
||||
};
|
||||
|
||||
nkeys::KeyPair::from_seed(&raw_seed).map_err(anyhow::Error::from)
|
||||
}
|
||||
|
||||
/// Resolves a JWT value by either returning the string itself if it's a valid JWT
|
||||
/// or by loading the contents of a file specified by the JWT value.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `jwt_or_file` - A string that represents either a JWT or a file path containing a JWT.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A `Result` containing a string if successful, or an error if the JWT value
|
||||
/// is invalid or the file cannot be read.
|
||||
async fn resolve_jwt(jwt_or_file: String) -> Result<String> {
|
||||
if tokio::fs::metadata(&jwt_or_file)
|
||||
.await
|
||||
.map(|metadata| metadata.is_file())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
tokio::fs::read_to_string(jwt_or_file)
|
||||
.await
|
||||
.map_err(|e| anyhow!("Error loading JWT from file: {e}"))
|
||||
} else {
|
||||
// We could do more validation on the JWT here, but if the JWT is invalid then
|
||||
// connecting will fail anyways
|
||||
Ok(jwt_or_file)
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper that ensures that the given stream name exists, using defaults to create if it does
|
||||
/// not. Returns the handle to the stream
|
||||
pub async fn ensure_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
description: Option<String>,
|
||||
) -> Result<Stream> {
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
description,
|
||||
num_replicas: 1,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::WorkQueue,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
allow_rollup: false,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
pub async fn ensure_status_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
) -> Result<Stream> {
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
description: Some(
|
||||
"A stream that stores all status updates for wadm applications".into(),
|
||||
),
|
||||
num_replicas: 1,
|
||||
allow_direct: true,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::Limits,
|
||||
max_messages_per_subject: 10,
|
||||
subjects,
|
||||
max_age: std::time::Duration::from_nanos(0),
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
/// A helper that ensures that the notify stream exists
|
||||
pub async fn ensure_notify_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
) -> Result<Stream> {
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
description: Some("A stream for capturing all notification events for wadm".into()),
|
||||
num_replicas: 1,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::Interest,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
/// A helper that ensures that the given KV bucket exists, using defaults to create if it does
|
||||
/// not. Returns the handle to the stream
|
||||
pub async fn ensure_kv_bucket(
|
||||
context: &Context,
|
||||
name: String,
|
||||
history_to_keep: i64,
|
||||
) -> Result<Store> {
|
||||
if let Ok(kv) = context.get_key_value(&name).await {
|
||||
Ok(kv)
|
||||
} else {
|
||||
context
|
||||
.create_key_value(KvConfig {
|
||||
bucket: name,
|
||||
history: history_to_keep,
|
||||
num_replicas: 1,
|
||||
storage: jetstream::stream::StorageType::File,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::resolve_jwt;
|
||||
use anyhow::Result;
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_resolve_jwt_value_and_file() -> Result<()> {
|
||||
let my_jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ2aWRlb0lkIjoiUWpVaUxYSnVjMjl0IiwiaWF0IjoxNjIwNjAzNDY5fQ.2PKx6y2ym6IWbeM6zFgHOkDnZEtGTR3YgYlQ2_Jki5g";
|
||||
let jwt_path = "./test/data/nats.jwt";
|
||||
let jwt_inside_file = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdHJpbmciOiAiQWNjb3JkIHRvIGFsbCBrbm93biBsb3dzIG9mIGF2aWF0aW9uLCB0aGVyZSBpcyBubyB3YXkgdGhhdCBhIGJlZSBhYmxlIHRvIGZseSJ9.GyU6pTRhflcOg6KBCU6wZedP8BQzLXbdgYIoU6KzzD8";
|
||||
|
||||
assert_eq!(
|
||||
resolve_jwt(my_jwt.to_string())
|
||||
.await
|
||||
.expect("should resolve jwt string to itself"),
|
||||
my_jwt.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
resolve_jwt(jwt_path.to_string())
|
||||
.await
|
||||
.expect("should be able to read jwt file"),
|
||||
jwt_inside_file.to_string()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
ci/
|
||||
.helmignore
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v2
|
||||
name: wadm
|
||||
description: A Helm chart for deploying wadm on Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: '0.2.10'
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 'v0.21.0'
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
config:
|
||||
jetstream:
|
||||
enabled: true
|
||||
fileStore:
|
||||
pvc:
|
||||
enabled: false
|
||||
merge:
|
||||
domain: default
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
validate-maintainers: false
|
||||
target-branch: main # TODO: Remove this once chart-testing 3.10.1+ is released
|
||||
helm-extra-args: --timeout 60s
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "wadm.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "wadm.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "wadm.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "wadm.labels" -}}
|
||||
helm.sh/chart: {{ include "wadm.chart" . }}
|
||||
{{ include "wadm.selectorLabels" . }}
|
||||
app.kubernetes.io/component: wadm
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: wadm
|
||||
{{- with .Values.additionalLabels }}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "wadm.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "wadm.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.server" -}}
|
||||
- name: WADM_NATS_SERVER
|
||||
{{- if .Values.wadm.config.nats.server }}
|
||||
value: {{ .Values.wadm.config.nats.server | quote }}
|
||||
{{- else }}
|
||||
value: nats-headless.{{ .Release.Namespace }}.svc.cluster.local
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.auth" -}}
|
||||
{{- if .Values.wadm.config.nats.creds.secretName -}}
|
||||
- name: WADM_NATS_CREDS_FILE
|
||||
value: {{ include "wadm.nats.creds_file_path" . | quote }}
|
||||
{{- else if and .Values.wadm.config.nats.creds.jwt .Values.wadm.config.nats.creds.seed -}}
|
||||
- name: WADM_NATS_NKEY
|
||||
value: {{ .Values.wadm.config.nats.creds.seed | quote }}
|
||||
- name: WADM_NATS_JWT
|
||||
value: {{ .Values.wadm.config.nats.creds.jwt | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.creds_file_path" }}
|
||||
{{- if .Values.wadm.config.nats.creds.secretName -}}
|
||||
/etc/nats-creds/nats.creds
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.creds_volume_mount" -}}
|
||||
{{- if .Values.wadm.config.nats.creds.secretName -}}
|
||||
volumeMounts:
|
||||
- name: nats-creds-secret-volume
|
||||
mountPath: "/etc/nats-creds"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.creds_volume" -}}
|
||||
{{- with .Values.wadm.config.nats.creds -}}
|
||||
{{- if .secretName -}}
|
||||
volumes:
|
||||
- name: nats-creds-secret-volume
|
||||
secret:
|
||||
secretName: {{ .secretName }}
|
||||
items:
|
||||
- key: {{ .key }}
|
||||
path: "nats.creds"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "wadm.fullname" . }}
|
||||
labels:
|
||||
{{- include "wadm.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "wadm.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "wadm.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.wadm.image.repository }}:{{ .Values.wadm.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.wadm.image.pullPolicy }}
|
||||
env:
|
||||
{{- include "wadm.nats.server" . | nindent 12 }}
|
||||
{{- include "wadm.nats.auth" . | nindent 12 }}
|
||||
{{- if .Values.wadm.config.nats.tlsCaFile }}
|
||||
- name: WADM_NATS_TLS_CA_FILE
|
||||
value: {{ .Values.wadm.config.nats.tlsCaFile | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.hostId }}
|
||||
- name: WADM_HOST_ID
|
||||
value: {{ .Values.wadm.config.hostId | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.structuredLogging }}
|
||||
- name: WADM_STRUCTURED_LOGGING
|
||||
value: {{ .Values.wadm.config.structuredLogging | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.tracing }}
|
||||
- name: WADM_TRACING_ENABLED
|
||||
value: {{ .Values.wadm.config.tracing | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.tracingEndpoint }}
|
||||
- name: WADM_TRACING_ENDPOINT
|
||||
value: {{ .Values.wadm.config.tracingEndpoint | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.nats.jetstreamDomain }}
|
||||
- name: WADM_JETSTREAM_DOMAIN
|
||||
value: {{ .Values.wadm.config.nats.jetstreamDomain | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.maxJobs }}
|
||||
- name: WADM_MAX_JOBS
|
||||
value: {{ .Values.wadm.config.maxJobs }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.stateBucket }}
|
||||
- name: WADM_STATE_BUCKET_NAME
|
||||
value: {{ .Values.wadm.config.stateBucket | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.manifestBucket }}
|
||||
- name: WADM_MANIFEST_BUCKET_NAME
|
||||
value: {{ .Values.wadm.config.manifestBucket | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.cleanupInterval }}
|
||||
- name: WADM_CLEANUP_INTERVAL
|
||||
value: {{ .Values.wadm.config.cleanupInterval }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.apiPrefix }}
|
||||
- name: WADM_API_PREFIX
|
||||
value: {{ .Values.wadm.config.apiPrefix }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.streamPrefix }}
|
||||
- name: WADM_STREAM_PREFIX
|
||||
value: {{ .Values.wadm.config.streamPrefix }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.multitenant }}
|
||||
- name: WADM_MULTITENANT
|
||||
value: {{ .Values.wadm.config.multitenant | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- include "wadm.nats.creds_volume_mount" . | nindent 10 -}}
|
||||
{{- include "wadm.nats.creds_volume" . | nindent 6 -}}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
wadm:
|
||||
# replicas represents the number of copies of wadm to run
|
||||
replicas: 1
|
||||
# image represents the image and tag for running wadm
|
||||
image:
|
||||
repository: ghcr.io/wasmcloud/wadm
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
config:
|
||||
apiPrefix: ""
|
||||
streamPrefix: ""
|
||||
cleanupInterval: ""
|
||||
hostId: ""
|
||||
logLevel: ""
|
||||
nats:
|
||||
server: ""
|
||||
jetstreamDomain: ""
|
||||
tlsCaFile: ""
|
||||
creds:
|
||||
jwt: ""
|
||||
seed: ""
|
||||
secretName: ""
|
||||
key: "nats.creds"
|
||||
maxJobs: ""
|
||||
stateBucket: ""
|
||||
manifestBucket: ""
|
||||
multitenant: false
|
||||
structuredLogging: false
|
||||
tracing: false
|
||||
tracingEndpoint: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
additionalLabels: {}
|
||||
# app: wadm
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 1000
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
[package]
|
||||
name = "wadm-client"
|
||||
description = "A client library for interacting with the wadm API"
|
||||
version = "0.10.0"
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/wasmcloud/wadm"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
nkeys = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
wadm-types = { workspace = true }
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
use thiserror::Error;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, ClientError>;
|
||||
|
||||
/// Errors that can occur when interacting with the wadm client.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ClientError {
|
||||
/// Unable to load the manifest from a given source. The underlying error is anyhow::Error to
|
||||
/// allow for flexibility in loading from different sources.
|
||||
#[error("Unable to load manifest: {0:?}")]
|
||||
ManifestLoad(anyhow::Error),
|
||||
/// An error occurred with the NATS transport
|
||||
#[error(transparent)]
|
||||
NatsError(#[from] async_nats::RequestError),
|
||||
/// An API error occurred with the request
|
||||
#[error("Invalid request: {0}")]
|
||||
ApiError(String),
|
||||
/// The named model was not found
|
||||
#[error("Model not found: {0}")]
|
||||
NotFound(String),
|
||||
/// Unable to serialize or deserialize YAML or JSON data.
|
||||
#[error("Unable to parse manifest: {0:?}")]
|
||||
Serialization(#[from] SerializationError),
|
||||
/// Any other errors that are not covered by the other error cases
|
||||
#[error(transparent)]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
/// Errors that can occur when serializing or deserializing YAML or JSON data.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SerializationError {
|
||||
#[error(transparent)]
|
||||
Yaml(#[from] serde_yaml::Error),
|
||||
#[error(transparent)]
|
||||
Json(#[from] serde_json::Error),
|
||||
}
|
||||
|
|
@ -0,0 +1,297 @@
|
|||
//! A client for interacting with Wadm.
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
use async_nats::{HeaderMap, Message};
|
||||
use error::{ClientError, SerializationError};
|
||||
use futures::Stream;
|
||||
use topics::TopicGenerator;
|
||||
use wadm_types::{
|
||||
api::{
|
||||
DeleteModelRequest, DeleteModelResponse, DeleteResult, DeployModelRequest,
|
||||
DeployModelResponse, DeployResult, GetModelRequest, GetModelResponse, GetResult,
|
||||
ModelSummary, PutModelResponse, PutResult, Status, StatusResponse, StatusResult,
|
||||
VersionInfo, VersionResponse,
|
||||
},
|
||||
Manifest,
|
||||
};
|
||||
|
||||
mod nats;
|
||||
|
||||
pub mod error;
|
||||
pub use error::Result;
|
||||
pub mod loader;
|
||||
pub use loader::ManifestLoader;
|
||||
pub mod topics;
|
||||
|
||||
/// Headers for `Content-Type: application/json`
|
||||
static HEADERS_CONTENT_TYPE_JSON: OnceLock<HeaderMap> = OnceLock::new();
|
||||
/// Retrieve static content type headers
|
||||
fn get_headers_content_type_json() -> &'static HeaderMap {
|
||||
HEADERS_CONTENT_TYPE_JSON.get_or_init(|| {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("Content-Type", "application/json");
|
||||
headers
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Client {
|
||||
topics: Arc<TopicGenerator>,
|
||||
client: async_nats::Client,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
/// Options for connecting to a NATS server for a Wadm client. Setting none of these options will
|
||||
/// default to anonymous authentication with a localhost NATS server running on port 4222
|
||||
pub struct ClientConnectOptions {
|
||||
/// The URL of the NATS server to connect to. If not provided, the client will connect to the
|
||||
/// default NATS address of 127.0.0.1:4222
|
||||
pub url: Option<String>,
|
||||
/// An nkey seed to use for authenticating with the NATS server. This can either be the raw seed
|
||||
/// or a path to a file containing the seed. If used, the `jwt` option must be provided
|
||||
pub seed: Option<String>,
|
||||
/// A JWT to use for authenticating with the NATS server. This can either be the raw JWT or a
|
||||
/// path to a file containing the JWT. If used, the `seed` option must be provided
|
||||
pub jwt: Option<String>,
|
||||
/// A path to a file containing the credentials to use for authenticating with the NATS server.
|
||||
/// If used, the `seed` and `jwt` options must not be provided
|
||||
pub creds_path: Option<PathBuf>,
|
||||
/// An optional path to a file containing the root CA certificates to use for authenticating
|
||||
/// with the NATS server.
|
||||
pub ca_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// Creates a new client with the given lattice ID, optional API prefix, and connection options.
|
||||
/// Errors if it is unable to connect to the NATS server
|
||||
pub async fn new(
|
||||
lattice: &str,
|
||||
prefix: Option<&str>,
|
||||
opts: ClientConnectOptions,
|
||||
) -> anyhow::Result<Self> {
|
||||
let topics = TopicGenerator::new(lattice, prefix);
|
||||
let nats_client =
|
||||
nats::get_client(opts.url, opts.seed, opts.jwt, opts.creds_path, opts.ca_path).await?;
|
||||
Ok(Client {
|
||||
topics: Arc::new(topics),
|
||||
client: nats_client,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new client with the given lattice ID, optional API prefix, and NATS client. This
|
||||
/// is not recommended and is hidden because the async-nats crate is not 1.0 yet. That means it
|
||||
/// is a breaking API change every time we upgrade versions. DO NOT use this function unless you
|
||||
/// are willing to accept this breaking change. This function is explicitly excluded from our
|
||||
/// semver guarantees until async-nats is 1.0.
|
||||
#[doc(hidden)]
|
||||
pub fn from_nats_client(
|
||||
lattice: &str,
|
||||
prefix: Option<&str>,
|
||||
nats_client: async_nats::Client,
|
||||
) -> Self {
|
||||
let topics = TopicGenerator::new(lattice, prefix);
|
||||
Client {
|
||||
topics: Arc::new(topics),
|
||||
client: nats_client,
|
||||
}
|
||||
}
|
||||
|
||||
/// Puts the given manifest into the lattice. The lattice can be anything that implements the
|
||||
/// [`ManifestLoader`] trait (a path to a file, raw bytes, or an already parsed manifest).
|
||||
///
|
||||
/// Returns the name and version of the manifest that was put into the lattice
|
||||
pub async fn put_manifest(&self, manifest: impl ManifestLoader) -> Result<(String, String)> {
|
||||
let manifest = manifest.load_manifest().await?;
|
||||
let manifest_bytes = serde_json::to_vec(&manifest).map_err(SerializationError::from)?;
|
||||
let topic = self.topics.model_put_topic();
|
||||
let resp = self
|
||||
.client
|
||||
.request_with_headers(
|
||||
topic,
|
||||
get_headers_content_type_json().clone(),
|
||||
manifest_bytes.into(),
|
||||
)
|
||||
.await?;
|
||||
let body: PutModelResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
if matches!(body.result, PutResult::Error) {
|
||||
return Err(ClientError::ApiError(body.message));
|
||||
}
|
||||
Ok((body.name, body.current_version))
|
||||
}
|
||||
|
||||
/// Gets a list of all manifests in the lattice. This does not return the full manifest, just a
|
||||
/// summary of its metadata and status
|
||||
pub async fn list_manifests(&self) -> Result<Vec<ModelSummary>> {
|
||||
let topic = self.topics.model_list_topic();
|
||||
let resp = self
|
||||
.client
|
||||
.request(topic, Vec::with_capacity(0).into())
|
||||
.await?;
|
||||
let body: Vec<ModelSummary> =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
Ok(body)
|
||||
}
|
||||
|
||||
/// Gets a manifest from the lattice by name and optionally its version. If no version is set,
|
||||
/// the latest version will be returned
|
||||
pub async fn get_manifest(&self, name: &str, version: Option<&str>) -> Result<Manifest> {
|
||||
let topic = self.topics.model_get_topic(name);
|
||||
let body = if let Some(version) = version {
|
||||
serde_json::to_vec(&GetModelRequest {
|
||||
version: Some(version.to_string()),
|
||||
})
|
||||
.map_err(SerializationError::from)?
|
||||
} else {
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
let resp = self.client.request(topic, body.into()).await?;
|
||||
let body: GetModelResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
|
||||
match body.result {
|
||||
GetResult::Error => Err(ClientError::ApiError(body.message)),
|
||||
GetResult::NotFound => Err(ClientError::NotFound(name.to_string())),
|
||||
GetResult::Success => body.manifest.ok_or_else(|| {
|
||||
ClientError::ApiError("API returned success but didn't set a manifest".to_string())
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Deletes a manifest from the lattice by name and optionally its version. If no version is
|
||||
/// set, all versions will be deleted
|
||||
///
|
||||
/// Returns true if the manifest was deleted, false if it was a noop (meaning it wasn't found or
|
||||
/// was already deleted)
|
||||
pub async fn delete_manifest(&self, name: &str, version: Option<&str>) -> Result<bool> {
|
||||
let topic = self.topics.model_delete_topic(name);
|
||||
let body = if let Some(version) = version {
|
||||
serde_json::to_vec(&DeleteModelRequest {
|
||||
version: Some(version.to_string()),
|
||||
})
|
||||
.map_err(SerializationError::from)?
|
||||
} else {
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
let resp = self.client.request(topic, body.into()).await?;
|
||||
let body: DeleteModelResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
match body.result {
|
||||
DeleteResult::Error => Err(ClientError::ApiError(body.message)),
|
||||
DeleteResult::Noop => Ok(false),
|
||||
DeleteResult::Deleted => Ok(true),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a list of all versions of a manifest in the lattice
|
||||
pub async fn list_versions(&self, name: &str) -> Result<Vec<VersionInfo>> {
|
||||
let topic = self.topics.model_versions_topic(name);
|
||||
let resp = self
|
||||
.client
|
||||
.request(topic, Vec::with_capacity(0).into())
|
||||
.await?;
|
||||
let body: VersionResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
match body.result {
|
||||
GetResult::Error => Err(ClientError::ApiError(body.message)),
|
||||
GetResult::NotFound => Err(ClientError::NotFound(name.to_string())),
|
||||
GetResult::Success => Ok(body.versions),
|
||||
}
|
||||
}
|
||||
|
||||
/// Deploys a manifest to the lattice. The optional version parameter can be used to deploy a
|
||||
/// specific version of a manifest. If no version is set, the latest version will be deployed
|
||||
///
|
||||
/// Please note that an OK response does not necessarily mean that the manifest was deployed
|
||||
/// successfully, just that the server accepted the deployment request.
|
||||
///
|
||||
/// Returns a tuple of the name and version of the manifest that was deployed
|
||||
pub async fn deploy_manifest(
|
||||
&self,
|
||||
name: &str,
|
||||
version: Option<&str>,
|
||||
) -> Result<(String, Option<String>)> {
|
||||
let topic = self.topics.model_deploy_topic(name);
|
||||
let body = if let Some(version) = version {
|
||||
serde_json::to_vec(&DeployModelRequest {
|
||||
version: Some(version.to_string()),
|
||||
})
|
||||
.map_err(SerializationError::from)?
|
||||
} else {
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
let resp = self.client.request(topic, body.into()).await?;
|
||||
let body: DeployModelResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
match body.result {
|
||||
DeployResult::Error => Err(ClientError::ApiError(body.message)),
|
||||
DeployResult::NotFound => Err(ClientError::NotFound(name.to_string())),
|
||||
DeployResult::Acknowledged => Ok((body.name, body.version)),
|
||||
}
|
||||
}
|
||||
|
||||
/// A shorthand method that is the equivalent of calling [`put_manifest`](Self::put_manifest)
|
||||
/// and then [`deploy_manifest`](Self::deploy_manifest)
|
||||
///
|
||||
/// Returns the name and version of the manifest that was deployed. Note that this will always
|
||||
/// deploy the latest version of the manifest (i.e. the one that was just put)
|
||||
pub async fn put_and_deploy_manifest(
|
||||
&self,
|
||||
manifest: impl ManifestLoader,
|
||||
) -> Result<(String, String)> {
|
||||
let (name, version) = self.put_manifest(manifest).await?;
|
||||
// We don't technically need to put the version since we just deployed, but to make sure we
|
||||
// maintain that behvior we'll put it here just in case
|
||||
self.deploy_manifest(&name, Some(&version)).await?;
|
||||
Ok((name, version))
|
||||
}
|
||||
|
||||
/// Undeploys the given manifest from the lattice
|
||||
///
|
||||
/// Returns Ok(manifest_name) if the manifest undeploy request was acknowledged
|
||||
pub async fn undeploy_manifest(&self, name: &str) -> Result<String> {
|
||||
let topic = self.topics.model_undeploy_topic(name);
|
||||
let resp = self
|
||||
.client
|
||||
.request(topic, Vec::with_capacity(0).into())
|
||||
.await?;
|
||||
let body: DeployModelResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
match body.result {
|
||||
DeployResult::Error => Err(ClientError::ApiError(body.message)),
|
||||
DeployResult::NotFound => Err(ClientError::NotFound(name.to_string())),
|
||||
DeployResult::Acknowledged => Ok(body.name),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the status of the given manifest
|
||||
pub async fn get_manifest_status(&self, name: &str) -> Result<Status> {
|
||||
let topic = self.topics.model_status_topic(name);
|
||||
let resp = self
|
||||
.client
|
||||
.request(topic, Vec::with_capacity(0).into())
|
||||
.await?;
|
||||
let body: StatusResponse =
|
||||
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
|
||||
match body.result {
|
||||
StatusResult::Error => Err(ClientError::ApiError(body.message)),
|
||||
StatusResult::NotFound => Err(ClientError::NotFound(name.to_string())),
|
||||
StatusResult::Ok => body.status.ok_or_else(|| {
|
||||
ClientError::ApiError("API returned success but didn't set a status".to_string())
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribes to the status of a given manifest
|
||||
pub async fn subscribe_to_status(&self, name: &str) -> Result<impl Stream<Item = Message>> {
|
||||
let subject = self.topics.wadm_status_topic(name);
|
||||
let subscriber = self
|
||||
.client
|
||||
.subscribe(subject)
|
||||
.await
|
||||
.map_err(|e| ClientError::ApiError(e.to_string()))?;
|
||||
|
||||
Ok(subscriber)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
//! Various helpers and traits for loading and parsing manifests
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use wadm_types::Manifest;
|
||||
|
||||
use crate::{error::ClientError, Result};
|
||||
|
||||
/// A trait for loading a [`Manifest`] from a variety of sources. This is also used as a convenience
|
||||
/// trait in the client for easily passing in any type of Manifest
|
||||
pub trait ManifestLoader {
|
||||
fn load_manifest(self) -> impl Future<Output = Result<Manifest>>;
|
||||
}
|
||||
|
||||
impl ManifestLoader for &Manifest {
|
||||
async fn load_manifest(self) -> Result<Manifest> {
|
||||
Ok(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl ManifestLoader for Manifest {
|
||||
async fn load_manifest(self) -> Result<Manifest> {
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl ManifestLoader for Vec<u8> {
|
||||
async fn load_manifest(self) -> Result<Manifest> {
|
||||
parse_yaml_or_json(self).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl ManifestLoader for &[u8] {
|
||||
async fn load_manifest(self) -> Result<Manifest> {
|
||||
parse_yaml_or_json(self).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper macro for implementing `ManifestLoader` for anything that implements `AsRef<Path>` (which
|
||||
// results in a compiler error if we do it generically)
|
||||
macro_rules! impl_manifest_loader_for_path {
|
||||
($($ty:ty),*) => {
|
||||
$(
|
||||
impl ManifestLoader for $ty {
|
||||
async fn load_manifest(self) -> Result<Manifest> {
|
||||
let raw = tokio::fs::read(self).await.map_err(|e| ClientError::ManifestLoad(e.into()))?;
|
||||
parse_yaml_or_json(raw).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_manifest_loader_for_path!(&Path, &str, &String, String, PathBuf, &PathBuf);
|
||||
|
||||
/// A simple function that attempts to parse the given bytes as YAML or JSON. This is used in the
|
||||
/// implementations of `ManifestLoader`
|
||||
pub fn parse_yaml_or_json(
|
||||
raw: impl AsRef<[u8]>,
|
||||
) -> std::result::Result<Manifest, crate::error::SerializationError> {
|
||||
// Attempt to parse as YAML first, then JSON
|
||||
serde_yaml::from_slice(raw.as_ref())
|
||||
.or_else(|_| serde_json::from_slice(raw.as_ref()))
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
//! Helpers for creating a NATS client without exposing the NATS client in the API
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use async_nats::{Client, ConnectOptions};
|
||||
|
||||
const DEFAULT_NATS_ADDR: &str = "nats://127.0.0.1:4222";
|
||||
|
||||
/// Creates a NATS client from the given options
|
||||
pub async fn get_client(
|
||||
url: Option<String>,
|
||||
seed: Option<String>,
|
||||
jwt: Option<String>,
|
||||
creds_path: Option<PathBuf>,
|
||||
ca_path: Option<PathBuf>,
|
||||
) -> Result<Client> {
|
||||
let mut opts = ConnectOptions::new();
|
||||
opts = match (seed, jwt, creds_path) {
|
||||
(Some(seed), Some(jwt), None) => {
|
||||
let jwt = resolve_jwt(jwt).await?;
|
||||
let kp = std::sync::Arc::new(get_seed(seed).await?);
|
||||
|
||||
opts.jwt(jwt, move |nonce| {
|
||||
let key_pair = kp.clone();
|
||||
async move { key_pair.sign(&nonce).map_err(async_nats::AuthError::new) }
|
||||
})
|
||||
}
|
||||
(None, None, Some(creds)) => opts.credentials_file(creds).await?,
|
||||
(None, None, None) => opts,
|
||||
_ => {
|
||||
// We shouldn't ever get here due to the requirements on the flags, but return a helpful error just in case
|
||||
return Err(anyhow::anyhow!(
|
||||
"Got incorrect combination of connection options. Should either have nothing set, a seed, a jwt, or a credentials file"
|
||||
));
|
||||
}
|
||||
};
|
||||
if let Some(ca) = ca_path {
|
||||
opts = opts.add_root_certificates(ca).require_tls(true);
|
||||
}
|
||||
opts.connect(url.unwrap_or_else(|| DEFAULT_NATS_ADDR.to_string()))
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Takes a string that could be a raw seed, or a path and does all the necessary loading and parsing steps
|
||||
async fn get_seed(seed: String) -> Result<nkeys::KeyPair> {
|
||||
// MAGIC NUMBER: Length of a seed key
|
||||
let raw_seed = if seed.len() == 58 && seed.starts_with('S') {
|
||||
seed
|
||||
} else {
|
||||
tokio::fs::read_to_string(seed)
|
||||
.await
|
||||
.context("Unable to read seed file")?
|
||||
};
|
||||
|
||||
nkeys::KeyPair::from_seed(&raw_seed).map_err(anyhow::Error::from)
|
||||
}
|
||||
|
||||
/// Resolves a JWT value by either returning the string itself if it's a valid JWT
|
||||
/// or by loading the contents of a file specified by the JWT value.
|
||||
async fn resolve_jwt(jwt_or_file: String) -> Result<String> {
|
||||
if tokio::fs::metadata(&jwt_or_file)
|
||||
.await
|
||||
.map(|metadata| metadata.is_file())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
tokio::fs::read_to_string(jwt_or_file)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Error loading JWT from file: {e}"))
|
||||
} else {
|
||||
// We could do more validation on the JWT here, but if the JWT is invalid then
|
||||
// connecting will fail anyways
|
||||
Ok(jwt_or_file)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
use wadm_types::api::{DEFAULT_WADM_TOPIC_PREFIX, WADM_STATUS_API_PREFIX};
|
||||
|
||||
/// A generator that uses various config options to generate the proper topic names for the wadm API
|
||||
pub struct TopicGenerator {
|
||||
topic_prefix: String,
|
||||
model_prefix: String,
|
||||
}
|
||||
|
||||
impl TopicGenerator {
|
||||
/// Creates a new topic generator with a lattice ID and an optional API prefix
|
||||
pub fn new(lattice: &str, prefix: Option<&str>) -> TopicGenerator {
|
||||
let topic_prefix = format!(
|
||||
"{}.{}",
|
||||
prefix.unwrap_or(DEFAULT_WADM_TOPIC_PREFIX),
|
||||
lattice
|
||||
);
|
||||
let model_prefix = format!("{}.model", topic_prefix);
|
||||
TopicGenerator {
|
||||
topic_prefix,
|
||||
model_prefix,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the full prefix for the topic, including the API prefix and the lattice ID
|
||||
pub fn prefix(&self) -> &str {
|
||||
&self.topic_prefix
|
||||
}
|
||||
|
||||
/// Returns the full prefix for model operations (currently the only operations supported in the
|
||||
/// API)
|
||||
pub fn model_prefix(&self) -> &str {
|
||||
&self.model_prefix
|
||||
}
|
||||
|
||||
/// Returns the full topic for a model put operation
|
||||
pub fn model_put_topic(&self) -> String {
|
||||
format!("{}.put", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for a model get operation
|
||||
pub fn model_get_topic(&self, model_name: &str) -> String {
|
||||
format!("{}.get.{model_name}", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for a model delete operation
|
||||
pub fn model_delete_topic(&self, model_name: &str) -> String {
|
||||
format!("{}.del.{model_name}", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for a model list operation
|
||||
pub fn model_list_topic(&self) -> String {
|
||||
format!("{}.list", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for listing the versions of a model
|
||||
pub fn model_versions_topic(&self, model_name: &str) -> String {
|
||||
format!("{}.versions.{model_name}", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for a model deploy operation
|
||||
pub fn model_deploy_topic(&self, model_name: &str) -> String {
|
||||
format!("{}.deploy.{model_name}", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for a model undeploy operation
|
||||
pub fn model_undeploy_topic(&self, model_name: &str) -> String {
|
||||
format!("{}.undeploy.{model_name}", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for getting a model status
|
||||
pub fn model_status_topic(&self, model_name: &str) -> String {
|
||||
format!("{}.status.{model_name}", self.model_prefix())
|
||||
}
|
||||
|
||||
/// Returns the full topic for WADM status subscriptions
|
||||
pub fn wadm_status_topic(&self, app_name: &str) -> String {
|
||||
// Extract just the lattice name from topic_prefix
|
||||
let lattice = self.topic_prefix.split('.').last().unwrap_or("default");
|
||||
format!("{}.{}.{}", WADM_STATUS_API_PREFIX, lattice, app_name)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
[package]
|
||||
name = "wadm-types"
|
||||
description = "Types and validators for the wadm API"
|
||||
version = "0.8.3"
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/wasmcloud/wadm"
|
||||
|
||||
[features]
|
||||
wit = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
utoipa = { workspace = true }
|
||||
|
||||
[target.'cfg(not(target_family = "wasm"))'.dependencies]
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
wit-bindgen-wrpc = { workspace = true }
|
||||
|
||||
[target.'cfg(target_family = "wasm")'.dependencies]
|
||||
wit-bindgen = { workspace = true, features = ["macros"] }
|
||||
|
|
@ -1,6 +1,10 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::model::Manifest;
|
||||
use crate::Manifest;
|
||||
|
||||
/// The default topic prefix for the wadm API;
|
||||
pub const DEFAULT_WADM_TOPIC_PREFIX: &str = "wadm.api";
|
||||
pub const WADM_STATUS_API_PREFIX: &str = "wadm.status";
|
||||
|
||||
/// The request body for getting a manifest
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
|
@ -19,6 +23,14 @@ pub struct GetModelResponse {
|
|||
pub manifest: Option<Manifest>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ListModelsResponse {
|
||||
pub result: GetResult,
|
||||
#[serde(default)]
|
||||
pub message: String,
|
||||
pub models: Vec<ModelSummary>,
|
||||
}
|
||||
|
||||
/// Possible outcomes of a get request
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
|
|
@ -52,13 +64,17 @@ pub enum PutResult {
|
|||
}
|
||||
|
||||
/// Summary of a given model returned when listing
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct ModelSummary {
|
||||
pub name: String,
|
||||
pub version: String,
|
||||
pub description: Option<String>,
|
||||
pub deployed_version: Option<String>,
|
||||
#[serde(default)]
|
||||
pub detailed_status: Status,
|
||||
#[deprecated(since = "0.14.0", note = "Use detailed_status instead")]
|
||||
pub status: StatusType,
|
||||
#[deprecated(since = "0.14.0", note = "Use detailed_status instead")]
|
||||
pub status_message: Option<String>,
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +88,7 @@ pub struct VersionResponse {
|
|||
}
|
||||
|
||||
/// Information about a given version of a model, returned as part of a list of all versions
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct VersionInfo {
|
||||
pub version: String,
|
||||
pub deployed: bool,
|
||||
|
|
@ -82,9 +98,7 @@ pub struct VersionInfo {
|
|||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DeleteModelRequest {
|
||||
#[serde(default)]
|
||||
pub version: String,
|
||||
#[serde(default)]
|
||||
pub delete_all: bool,
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
/// A response from a delete request
|
||||
|
|
@ -98,7 +112,7 @@ pub struct DeleteModelResponse {
|
|||
}
|
||||
|
||||
/// All possible outcomes of a delete operation
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum DeleteResult {
|
||||
Deleted,
|
||||
|
|
@ -115,12 +129,16 @@ pub struct DeployModelRequest {
|
|||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
/// A response from a deploy request
|
||||
/// A response from a deploy or undeploy request
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DeployModelResponse {
|
||||
pub result: DeployResult,
|
||||
#[serde(default)]
|
||||
pub message: String,
|
||||
#[serde(default)]
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
/// All possible outcomes of a deploy operation
|
||||
|
|
@ -133,10 +151,10 @@ pub enum DeployResult {
|
|||
}
|
||||
|
||||
/// A request to undeploy a model
|
||||
///
|
||||
/// Right now this is just an empty struct, but it is reserved for future use
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UndeployModelRequest {
|
||||
pub non_destructive: bool,
|
||||
}
|
||||
pub struct UndeployModelRequest {}
|
||||
|
||||
/// A response to a status request
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
|
@ -158,27 +176,46 @@ pub enum StatusResult {
|
|||
}
|
||||
|
||||
/// The current status of a model
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq, Eq)]
|
||||
pub struct Status {
|
||||
pub version: String,
|
||||
#[serde(rename = "status")]
|
||||
pub info: StatusInfo,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub scalers: Vec<ScalerStatus>,
|
||||
#[serde(default)]
|
||||
#[deprecated(since = "0.14.0")]
|
||||
pub version: String,
|
||||
#[serde(default)]
|
||||
#[deprecated(since = "0.14.0")]
|
||||
pub components: Vec<ComponentStatus>,
|
||||
}
|
||||
|
||||
impl Status {
|
||||
pub fn new(info: StatusInfo, scalers: Vec<ScalerStatus>) -> Self {
|
||||
#[allow(deprecated)]
|
||||
Status {
|
||||
info,
|
||||
scalers,
|
||||
version: String::with_capacity(0),
|
||||
components: Vec::with_capacity(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The current status of a component
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
|
||||
pub struct ComponentStatus {
|
||||
pub name: String,
|
||||
#[serde(rename = "type")]
|
||||
pub component_type: String,
|
||||
#[serde(rename = "status")]
|
||||
pub info: StatusInfo,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub traits: Vec<TraitStatus>,
|
||||
}
|
||||
|
||||
/// The current status of a trait
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
|
||||
pub struct TraitStatus {
|
||||
#[serde(rename = "type")]
|
||||
pub trait_type: String,
|
||||
|
|
@ -186,6 +223,22 @@ pub struct TraitStatus {
|
|||
pub info: StatusInfo,
|
||||
}
|
||||
|
||||
/// The current status of a scaler
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
|
||||
pub struct ScalerStatus {
|
||||
/// The id of the scaler
|
||||
#[serde(default)]
|
||||
pub id: String,
|
||||
/// The kind of scaler
|
||||
#[serde(default)]
|
||||
pub kind: String,
|
||||
/// The human-readable name of the scaler
|
||||
#[serde(default)]
|
||||
pub name: String,
|
||||
#[serde(rename = "status")]
|
||||
pub info: StatusInfo,
|
||||
}
|
||||
|
||||
/// Common high-level status information
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
|
||||
pub struct StatusInfo {
|
||||
|
|
@ -203,9 +256,9 @@ impl StatusInfo {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn ready(message: &str) -> Self {
|
||||
pub fn deployed(message: &str) -> Self {
|
||||
StatusInfo {
|
||||
status_type: StatusType::Ready,
|
||||
status_type: StatusType::Deployed,
|
||||
message: message.to_owned(),
|
||||
}
|
||||
}
|
||||
|
|
@ -217,9 +270,23 @@ impl StatusInfo {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn compensating(message: &str) -> Self {
|
||||
pub fn reconciling(message: &str) -> Self {
|
||||
StatusInfo {
|
||||
status_type: StatusType::Compensating,
|
||||
status_type: StatusType::Reconciling,
|
||||
message: message.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn waiting(message: &str) -> Self {
|
||||
StatusInfo {
|
||||
status_type: StatusType::Waiting,
|
||||
message: message.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unhealthy(message: &str) -> Self {
|
||||
StatusInfo {
|
||||
status_type: StatusType::Unhealthy,
|
||||
message: message.to_owned(),
|
||||
}
|
||||
}
|
||||
|
|
@ -229,11 +296,15 @@ impl StatusInfo {
|
|||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Copy, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum StatusType {
|
||||
Waiting,
|
||||
#[default]
|
||||
Undeployed,
|
||||
Compensating,
|
||||
Ready,
|
||||
#[serde(alias = "compensating")]
|
||||
Reconciling,
|
||||
#[serde(alias = "ready")]
|
||||
Deployed,
|
||||
Failed,
|
||||
Unhealthy,
|
||||
}
|
||||
|
||||
// Implementing add makes it easy for use to get an aggregate status by summing all of them together
|
||||
|
|
@ -256,9 +327,15 @@ impl std::ops::Add for StatusType {
|
|||
// If anything is undeployed, the whole thing is
|
||||
(Self::Undeployed, _) => Self::Undeployed,
|
||||
(_, Self::Undeployed) => Self::Undeployed,
|
||||
(Self::Compensating, _) => Self::Compensating,
|
||||
(_, Self::Compensating) => Self::Compensating,
|
||||
_ => unreachable!("aggregating StatusType failure. This is programmer error"),
|
||||
// If anything is waiting, the whole thing is
|
||||
(Self::Waiting, _) => Self::Waiting,
|
||||
(_, Self::Waiting) => Self::Waiting,
|
||||
(Self::Reconciling, _) => Self::Reconciling,
|
||||
(_, Self::Reconciling) => Self::Reconciling,
|
||||
(Self::Unhealthy, _) => Self::Unhealthy,
|
||||
(_, Self::Unhealthy) => Self::Unhealthy,
|
||||
// This is technically covered in the first comparison, but we'll be explicit
|
||||
(Self::Deployed, Self::Deployed) => Self::Deployed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -278,8 +355,10 @@ mod test {
|
|||
#[test]
|
||||
fn test_status_aggregate() {
|
||||
assert!(matches!(
|
||||
[StatusType::Ready, StatusType::Ready].into_iter().sum(),
|
||||
StatusType::Ready
|
||||
[StatusType::Deployed, StatusType::Deployed]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Deployed
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
|
|
@ -297,14 +376,14 @@ mod test {
|
|||
));
|
||||
|
||||
assert!(matches!(
|
||||
[StatusType::Compensating, StatusType::Undeployed]
|
||||
[StatusType::Reconciling, StatusType::Undeployed]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Undeployed
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
[StatusType::Ready, StatusType::Undeployed]
|
||||
[StatusType::Deployed, StatusType::Undeployed]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Undeployed
|
||||
|
|
@ -312,8 +391,8 @@ mod test {
|
|||
|
||||
assert!(matches!(
|
||||
[
|
||||
StatusType::Ready,
|
||||
StatusType::Compensating,
|
||||
StatusType::Deployed,
|
||||
StatusType::Reconciling,
|
||||
StatusType::Undeployed,
|
||||
StatusType::Failed
|
||||
]
|
||||
|
|
@ -322,6 +401,20 @@ mod test {
|
|||
StatusType::Failed
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
[StatusType::Deployed, StatusType::Unhealthy]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Unhealthy
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
[StatusType::Reconciling, StatusType::Unhealthy]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Reconciling
|
||||
));
|
||||
|
||||
let empty: Vec<StatusType> = Vec::new();
|
||||
assert!(matches!(empty.into_iter().sum(), StatusType::Undeployed));
|
||||
}
|
||||
|
|
@ -0,0 +1,621 @@
|
|||
use crate::{
|
||||
api::{
|
||||
ComponentStatus, DeleteResult, GetResult, ModelSummary, PutResult, Status, StatusInfo,
|
||||
StatusResult, StatusType, TraitStatus, VersionInfo,
|
||||
},
|
||||
CapabilityProperties, Component, ComponentProperties, ConfigDefinition, ConfigProperty,
|
||||
LinkProperty, Manifest, Metadata, Policy, Properties, SecretProperty, SecretSourceProperty,
|
||||
SharedApplicationComponentProperties, Specification, Spread, SpreadScalerProperty,
|
||||
TargetConfig, Trait, TraitProperty,
|
||||
};
|
||||
use wasmcloud::wadm;
|
||||
|
||||
#[cfg(all(feature = "wit", target_family = "wasm"))]
|
||||
wit_bindgen::generate!({
|
||||
path: "wit",
|
||||
additional_derives: [
|
||||
serde::Serialize,
|
||||
serde::Deserialize,
|
||||
],
|
||||
with: {
|
||||
"wasmcloud:wadm/types@0.2.0": generate,
|
||||
"wasmcloud:wadm/client@0.2.0": generate,
|
||||
"wasmcloud:wadm/handler@0.2.0": generate
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(feature = "wit", not(target_family = "wasm")))]
|
||||
wit_bindgen_wrpc::generate!({
|
||||
generate_unused_types: true,
|
||||
additional_derives: [
|
||||
serde::Serialize,
|
||||
serde::Deserialize,
|
||||
],
|
||||
with: {
|
||||
"wasmcloud:wadm/types@0.2.0": generate,
|
||||
"wasmcloud:wadm/client@0.2.0": generate,
|
||||
"wasmcloud:wadm/handler@0.2.0": generate
|
||||
}
|
||||
});
|
||||
|
||||
// Trait implementations for converting types in the API module to the generated types
|
||||
|
||||
impl From<Manifest> for wadm::types::OamManifest {
|
||||
fn from(manifest: Manifest) -> Self {
|
||||
wadm::types::OamManifest {
|
||||
api_version: manifest.api_version.to_string(),
|
||||
kind: manifest.kind.to_string(),
|
||||
metadata: manifest.metadata.into(),
|
||||
spec: manifest.spec.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Metadata> for wadm::types::Metadata {
|
||||
fn from(metadata: Metadata) -> Self {
|
||||
wadm::types::Metadata {
|
||||
name: metadata.name,
|
||||
annotations: metadata.annotations.into_iter().collect(),
|
||||
labels: metadata.labels.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Specification> for wadm::types::Specification {
|
||||
fn from(spec: Specification) -> Self {
|
||||
wadm::types::Specification {
|
||||
components: spec.components.into_iter().map(|c| c.into()).collect(),
|
||||
policies: spec.policies.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Component> for wadm::types::Component {
|
||||
fn from(component: Component) -> Self {
|
||||
wadm::types::Component {
|
||||
name: component.name,
|
||||
properties: component.properties.into(),
|
||||
traits: component
|
||||
.traits
|
||||
.map(|traits| traits.into_iter().map(|t| t.into()).collect()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Policy> for wadm::types::Policy {
|
||||
fn from(policy: Policy) -> Self {
|
||||
wadm::types::Policy {
|
||||
name: policy.name,
|
||||
properties: policy.properties.into_iter().collect(),
|
||||
type_: policy.policy_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Properties> for wadm::types::Properties {
|
||||
fn from(properties: Properties) -> Self {
|
||||
match properties {
|
||||
Properties::Component { properties } => {
|
||||
wadm::types::Properties::Component(properties.into())
|
||||
}
|
||||
Properties::Capability { properties } => {
|
||||
wadm::types::Properties::Capability(properties.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ComponentProperties> for wadm::types::ComponentProperties {
|
||||
fn from(properties: ComponentProperties) -> Self {
|
||||
wadm::types::ComponentProperties {
|
||||
application: properties.application.map(Into::into),
|
||||
image: properties.image,
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CapabilityProperties> for wadm::types::CapabilityProperties {
|
||||
fn from(properties: CapabilityProperties) -> Self {
|
||||
wadm::types::CapabilityProperties {
|
||||
application: properties.application.map(Into::into),
|
||||
image: properties.image,
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ConfigProperty> for wadm::types::ConfigProperty {
|
||||
fn from(property: ConfigProperty) -> Self {
|
||||
wadm::types::ConfigProperty {
|
||||
name: property.name,
|
||||
properties: property.properties.map(|props| props.into_iter().collect()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SecretProperty> for wadm::types::SecretProperty {
|
||||
fn from(property: SecretProperty) -> Self {
|
||||
wadm::types::SecretProperty {
|
||||
name: property.name,
|
||||
properties: property.properties.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SecretSourceProperty> for wadm::types::SecretSourceProperty {
|
||||
fn from(property: SecretSourceProperty) -> Self {
|
||||
wadm::types::SecretSourceProperty {
|
||||
policy: property.policy,
|
||||
key: property.key,
|
||||
field: property.field,
|
||||
version: property.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SharedApplicationComponentProperties>
|
||||
for wadm::types::SharedApplicationComponentProperties
|
||||
{
|
||||
fn from(properties: SharedApplicationComponentProperties) -> Self {
|
||||
wadm::types::SharedApplicationComponentProperties {
|
||||
name: properties.name,
|
||||
component: properties.component,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Trait> for wadm::types::Trait {
|
||||
fn from(trait_: Trait) -> Self {
|
||||
wadm::types::Trait {
|
||||
trait_type: trait_.trait_type,
|
||||
properties: trait_.properties.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TraitProperty> for wadm::types::TraitProperty {
|
||||
fn from(property: TraitProperty) -> Self {
|
||||
match property {
|
||||
TraitProperty::Link(link) => wadm::types::TraitProperty::Link(link.into()),
|
||||
TraitProperty::SpreadScaler(spread) => {
|
||||
wadm::types::TraitProperty::Spreadscaler(spread.into())
|
||||
}
|
||||
TraitProperty::Custom(custom) => wadm::types::TraitProperty::Custom(custom.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LinkProperty> for wadm::types::LinkProperty {
|
||||
fn from(property: LinkProperty) -> Self {
|
||||
wadm::types::LinkProperty {
|
||||
source: property.source.map(|c| c.into()),
|
||||
target: property.target.into(),
|
||||
namespace: property.namespace,
|
||||
package: property.package,
|
||||
interfaces: property.interfaces,
|
||||
name: property.name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ConfigDefinition> for wadm::types::ConfigDefinition {
|
||||
fn from(definition: ConfigDefinition) -> Self {
|
||||
wadm::types::ConfigDefinition {
|
||||
config: definition.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: definition.secrets.into_iter().map(|s| s.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TargetConfig> for wadm::types::TargetConfig {
|
||||
fn from(config: TargetConfig) -> Self {
|
||||
wadm::types::TargetConfig {
|
||||
name: config.name,
|
||||
config: config.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: config.secrets.into_iter().map(|s| s.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SpreadScalerProperty> for wadm::types::SpreadscalerProperty {
|
||||
fn from(property: SpreadScalerProperty) -> Self {
|
||||
wadm::types::SpreadscalerProperty {
|
||||
instances: property.instances as u32,
|
||||
spread: property.spread.into_iter().map(|s| s.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Spread> for wadm::types::Spread {
|
||||
fn from(spread: Spread) -> Self {
|
||||
wadm::types::Spread {
|
||||
name: spread.name,
|
||||
requirements: spread.requirements.into_iter().collect(),
|
||||
weight: spread.weight.map(|w| w as u32),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ModelSummary> for wadm::types::ModelSummary {
|
||||
fn from(summary: ModelSummary) -> Self {
|
||||
wadm::types::ModelSummary {
|
||||
name: summary.name,
|
||||
version: summary.version,
|
||||
description: summary.description,
|
||||
deployed_version: summary.deployed_version,
|
||||
status: summary.status.into(),
|
||||
status_message: summary.status_message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeleteResult> for wadm::types::DeleteResult {
|
||||
fn from(result: DeleteResult) -> Self {
|
||||
match result {
|
||||
DeleteResult::Deleted => wadm::types::DeleteResult::Deleted,
|
||||
DeleteResult::Error => wadm::types::DeleteResult::Error,
|
||||
DeleteResult::Noop => wadm::types::DeleteResult::Noop,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GetResult> for wadm::types::GetResult {
|
||||
fn from(result: GetResult) -> Self {
|
||||
match result {
|
||||
GetResult::Error => wadm::types::GetResult::Error,
|
||||
GetResult::Success => wadm::types::GetResult::Success,
|
||||
GetResult::NotFound => wadm::types::GetResult::NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PutResult> for wadm::types::PutResult {
|
||||
fn from(result: PutResult) -> Self {
|
||||
match result {
|
||||
PutResult::Error => wadm::types::PutResult::Error,
|
||||
PutResult::Created => wadm::types::PutResult::Created,
|
||||
PutResult::NewVersion => wadm::types::PutResult::NewVersion,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StatusType> for wadm::types::StatusType {
|
||||
fn from(status: StatusType) -> Self {
|
||||
match status {
|
||||
StatusType::Undeployed => wadm::types::StatusType::Undeployed,
|
||||
StatusType::Reconciling => wadm::types::StatusType::Reconciling,
|
||||
StatusType::Deployed => wadm::types::StatusType::Deployed,
|
||||
StatusType::Failed => wadm::types::StatusType::Failed,
|
||||
StatusType::Waiting => wadm::types::StatusType::Waiting,
|
||||
StatusType::Unhealthy => wadm::types::StatusType::Unhealthy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trait implementations for converting generated types to the types in the API module
|
||||
|
||||
impl From<wadm::types::StatusType> for StatusType {
|
||||
fn from(status: wadm::types::StatusType) -> Self {
|
||||
match status {
|
||||
wadm::types::StatusType::Undeployed => StatusType::Undeployed,
|
||||
wadm::types::StatusType::Reconciling => StatusType::Reconciling,
|
||||
wadm::types::StatusType::Deployed => StatusType::Deployed,
|
||||
wadm::types::StatusType::Failed => StatusType::Failed,
|
||||
wadm::types::StatusType::Waiting => StatusType::Waiting,
|
||||
wadm::types::StatusType::Unhealthy => StatusType::Unhealthy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::StatusInfo> for StatusInfo {
|
||||
fn from(info: wadm::types::StatusInfo) -> Self {
|
||||
StatusInfo {
|
||||
status_type: info.status_type.into(),
|
||||
message: info.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::ComponentStatus> for ComponentStatus {
|
||||
fn from(status: wadm::types::ComponentStatus) -> Self {
|
||||
ComponentStatus {
|
||||
name: status.name,
|
||||
component_type: status.component_type,
|
||||
info: status.info.into(),
|
||||
traits: status
|
||||
.traits
|
||||
.into_iter()
|
||||
.map(|t| TraitStatus {
|
||||
trait_type: t.trait_type,
|
||||
info: t.info.into(),
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::TraitStatus> for TraitStatus {
|
||||
fn from(status: wadm::types::TraitStatus) -> Self {
|
||||
TraitStatus {
|
||||
trait_type: status.trait_type,
|
||||
info: status.info.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::StatusResult> for StatusResult {
|
||||
fn from(result: wadm::types::StatusResult) -> Self {
|
||||
match result {
|
||||
wadm::types::StatusResult::Error => StatusResult::Error,
|
||||
wadm::types::StatusResult::Ok => StatusResult::Ok,
|
||||
wadm::types::StatusResult::NotFound => StatusResult::NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::OamManifest> for Manifest {
|
||||
fn from(manifest: wadm::types::OamManifest) -> Self {
|
||||
Manifest {
|
||||
api_version: manifest.api_version,
|
||||
kind: manifest.kind,
|
||||
metadata: manifest.metadata.into(),
|
||||
spec: manifest.spec.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Metadata> for Metadata {
|
||||
fn from(metadata: wadm::types::Metadata) -> Self {
|
||||
Metadata {
|
||||
name: metadata.name,
|
||||
annotations: metadata.annotations.into_iter().collect(),
|
||||
labels: metadata.labels.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Specification> for Specification {
|
||||
fn from(spec: wadm::types::Specification) -> Self {
|
||||
Specification {
|
||||
components: spec.components.into_iter().map(|c| c.into()).collect(),
|
||||
policies: spec.policies.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Component> for Component {
|
||||
fn from(component: wadm::types::Component) -> Self {
|
||||
Component {
|
||||
name: component.name,
|
||||
properties: component.properties.into(),
|
||||
traits: component
|
||||
.traits
|
||||
.map(|traits| traits.into_iter().map(|t| t.into()).collect()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Policy> for Policy {
|
||||
fn from(policy: wadm::types::Policy) -> Self {
|
||||
Policy {
|
||||
name: policy.name,
|
||||
properties: policy.properties.into_iter().collect(),
|
||||
policy_type: policy.type_,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Properties> for Properties {
|
||||
fn from(properties: wadm::types::Properties) -> Self {
|
||||
match properties {
|
||||
wadm::types::Properties::Component(properties) => Properties::Component {
|
||||
properties: properties.into(),
|
||||
},
|
||||
wadm::types::Properties::Capability(properties) => Properties::Capability {
|
||||
properties: properties.into(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::ComponentProperties> for ComponentProperties {
|
||||
fn from(properties: wadm::types::ComponentProperties) -> Self {
|
||||
ComponentProperties {
|
||||
image: properties.image,
|
||||
application: properties.application.map(Into::into),
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::CapabilityProperties> for CapabilityProperties {
|
||||
fn from(properties: wadm::types::CapabilityProperties) -> Self {
|
||||
CapabilityProperties {
|
||||
image: properties.image,
|
||||
application: properties.application.map(Into::into),
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::ConfigProperty> for ConfigProperty {
|
||||
fn from(property: wadm::types::ConfigProperty) -> Self {
|
||||
ConfigProperty {
|
||||
name: property.name,
|
||||
properties: property.properties.map(|props| props.into_iter().collect()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::SecretProperty> for SecretProperty {
|
||||
fn from(property: wadm::types::SecretProperty) -> Self {
|
||||
SecretProperty {
|
||||
name: property.name,
|
||||
properties: property.properties.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::SecretSourceProperty> for SecretSourceProperty {
|
||||
fn from(property: wadm::types::SecretSourceProperty) -> Self {
|
||||
SecretSourceProperty {
|
||||
policy: property.policy,
|
||||
key: property.key,
|
||||
field: property.field,
|
||||
version: property.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::SharedApplicationComponentProperties>
|
||||
for SharedApplicationComponentProperties
|
||||
{
|
||||
fn from(properties: wadm::types::SharedApplicationComponentProperties) -> Self {
|
||||
SharedApplicationComponentProperties {
|
||||
name: properties.name,
|
||||
component: properties.component,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Trait> for Trait {
|
||||
fn from(trait_: wadm::types::Trait) -> Self {
|
||||
Trait {
|
||||
trait_type: trait_.trait_type,
|
||||
properties: trait_.properties.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::TraitProperty> for TraitProperty {
|
||||
fn from(property: wadm::types::TraitProperty) -> Self {
|
||||
match property {
|
||||
wadm::types::TraitProperty::Link(link) => TraitProperty::Link(link.into()),
|
||||
wadm::types::TraitProperty::Spreadscaler(spread) => {
|
||||
TraitProperty::SpreadScaler(spread.into())
|
||||
}
|
||||
wadm::types::TraitProperty::Custom(custom) => {
|
||||
TraitProperty::Custom(serde_json::value::Value::String(custom))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::LinkProperty> for LinkProperty {
|
||||
fn from(property: wadm::types::LinkProperty) -> Self {
|
||||
#[allow(deprecated)]
|
||||
LinkProperty {
|
||||
source: property.source.map(|c| c.into()),
|
||||
target: property.target.into(),
|
||||
namespace: property.namespace,
|
||||
package: property.package,
|
||||
interfaces: property.interfaces,
|
||||
name: property.name,
|
||||
source_config: None,
|
||||
target_config: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::ConfigDefinition> for ConfigDefinition {
|
||||
fn from(definition: wadm::types::ConfigDefinition) -> Self {
|
||||
ConfigDefinition {
|
||||
config: definition.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: definition.secrets.into_iter().map(|s| s.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::TargetConfig> for TargetConfig {
|
||||
fn from(config: wadm::types::TargetConfig) -> Self {
|
||||
TargetConfig {
|
||||
name: config.name,
|
||||
config: config.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: config.secrets.into_iter().map(|s| s.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::SpreadscalerProperty> for SpreadScalerProperty {
|
||||
fn from(property: wadm::types::SpreadscalerProperty) -> Self {
|
||||
SpreadScalerProperty {
|
||||
instances: property.instances as usize,
|
||||
spread: property.spread.into_iter().map(|s| s.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Spread> for Spread {
|
||||
fn from(spread: wadm::types::Spread) -> Self {
|
||||
Spread {
|
||||
name: spread.name,
|
||||
requirements: spread.requirements.into_iter().collect(),
|
||||
weight: spread.weight.map(|w| w as usize),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VersionInfo> for wadm::types::VersionInfo {
|
||||
fn from(info: VersionInfo) -> Self {
|
||||
wasmcloud::wadm::types::VersionInfo {
|
||||
version: info.version,
|
||||
deployed: info.deployed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the From trait for StatusInfo
|
||||
impl From<StatusInfo> for wadm::types::StatusInfo {
|
||||
fn from(info: StatusInfo) -> Self {
|
||||
wadm::types::StatusInfo {
|
||||
status_type: info.status_type.into(),
|
||||
message: info.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the From trait for Status
|
||||
impl From<Status> for wadm::types::Status {
|
||||
fn from(status: Status) -> Self {
|
||||
wadm::types::Status {
|
||||
version: status.version,
|
||||
info: status.info.into(),
|
||||
components: status.components.into_iter().map(|c| c.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the From trait for ComponentStatus
|
||||
impl From<ComponentStatus> for wadm::types::ComponentStatus {
|
||||
fn from(component_status: ComponentStatus) -> Self {
|
||||
wadm::types::ComponentStatus {
|
||||
name: component_status.name,
|
||||
component_type: component_status.component_type,
|
||||
info: component_status.info.into(),
|
||||
traits: component_status
|
||||
.traits
|
||||
.into_iter()
|
||||
.map(|t| t.into())
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the From trait for TraitStatus
|
||||
impl From<TraitStatus> for wadm::types::TraitStatus {
|
||||
fn from(trait_status: TraitStatus) -> Self {
|
||||
wadm::types::TraitStatus {
|
||||
trait_type: trait_status.trait_type,
|
||||
info: trait_status.info.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,987 @@
|
|||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
use schemars::JsonSchema;
|
||||
use serde::{de, Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
pub mod api;
|
||||
#[cfg(feature = "wit")]
|
||||
pub mod bindings;
|
||||
#[cfg(feature = "wit")]
|
||||
pub use bindings::*;
|
||||
pub mod validation;
|
||||
|
||||
/// The default weight for a spread
|
||||
pub const DEFAULT_SPREAD_WEIGHT: usize = 100;
|
||||
/// The expected OAM api version
|
||||
pub const OAM_VERSION: &str = "core.oam.dev/v1beta1";
|
||||
/// The currently supported kind for OAM manifests.
|
||||
// NOTE(thomastaylor312): If we ever end up supporting more than one kind, we should use an enum for
|
||||
// this
|
||||
pub const APPLICATION_KIND: &str = "Application";
|
||||
/// The version key, as predefined by the [OAM
|
||||
/// spec](https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format)
|
||||
pub const VERSION_ANNOTATION_KEY: &str = "version";
|
||||
/// The description key, as predefined by the [OAM
|
||||
/// spec](https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format)
|
||||
pub const DESCRIPTION_ANNOTATION_KEY: &str = "description";
|
||||
/// The annotation key for shared applications
|
||||
pub const SHARED_ANNOTATION_KEY: &str = "experimental.wasmcloud.dev/shared";
|
||||
/// The identifier for the builtin spreadscaler trait type
|
||||
pub const SPREADSCALER_TRAIT: &str = "spreadscaler";
|
||||
/// The identifier for the builtin daemonscaler trait type
|
||||
pub const DAEMONSCALER_TRAIT: &str = "daemonscaler";
|
||||
/// The identifier for the builtin linkdef trait type
|
||||
pub const LINK_TRAIT: &str = "link";
|
||||
/// The string used for indicating a latest version. It is explicitly forbidden to use as a version
|
||||
/// for a manifest
|
||||
pub const LATEST_VERSION: &str = "latest";
|
||||
/// The default link name
|
||||
pub const DEFAULT_LINK_NAME: &str = "default";
|
||||
|
||||
/// Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Manifest {
|
||||
/// The OAM version of the manifest
|
||||
#[serde(rename = "apiVersion")]
|
||||
pub api_version: String,
|
||||
/// The kind or type of manifest described by the spec
|
||||
pub kind: String,
|
||||
/// Metadata describing the manifest
|
||||
pub metadata: Metadata,
|
||||
/// The specification for this manifest
|
||||
pub spec: Specification,
|
||||
}
|
||||
|
||||
impl Manifest {
|
||||
/// Returns a reference to the current version
|
||||
pub fn version(&self) -> &str {
|
||||
self.metadata
|
||||
.annotations
|
||||
.get(VERSION_ANNOTATION_KEY)
|
||||
.map(|v| v.as_str())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns a reference to the current description if it exists
|
||||
pub fn description(&self) -> Option<&str> {
|
||||
self.metadata
|
||||
.annotations
|
||||
.get(DESCRIPTION_ANNOTATION_KEY)
|
||||
.map(|v| v.as_str())
|
||||
}
|
||||
|
||||
/// Indicates if the manifest is shared, meaning it can be used by multiple applications
|
||||
pub fn shared(&self) -> bool {
|
||||
self.metadata
|
||||
.annotations
|
||||
.get(SHARED_ANNOTATION_KEY)
|
||||
.is_some_and(|v| v.parse::<bool>().unwrap_or(false))
|
||||
}
|
||||
|
||||
/// Returns the components in the manifest
|
||||
pub fn components(&self) -> impl Iterator<Item = &Component> {
|
||||
self.spec.components.iter()
|
||||
}
|
||||
|
||||
/// Helper function to find shared components that are missing from the given list of
|
||||
/// deployed applications
|
||||
pub fn missing_shared_components(&self, deployed_apps: &[&Manifest]) -> Vec<&Component> {
|
||||
self.spec
|
||||
.components
|
||||
.iter()
|
||||
.filter(|shared_component| {
|
||||
match &shared_component.properties {
|
||||
Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
image: None,
|
||||
application: Some(shared_app),
|
||||
..
|
||||
},
|
||||
}
|
||||
| Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
image: None,
|
||||
application: Some(shared_app),
|
||||
..
|
||||
},
|
||||
} => {
|
||||
if deployed_apps.iter().filter(|a| a.shared()).any(|m| {
|
||||
m.metadata.name == shared_app.name
|
||||
&& m.components().any(|c| {
|
||||
c.name == shared_app.component
|
||||
// This compares just the enum variant, not the actual properties
|
||||
// For example, if we reference a shared component that's a capability,
|
||||
// we want to make sure the deployed component is a capability.
|
||||
&& std::mem::discriminant(&c.properties)
|
||||
== std::mem::discriminant(&shared_component.properties)
|
||||
})
|
||||
}) {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns only the WebAssembly components in the manifest
|
||||
pub fn wasm_components(&self) -> impl Iterator<Item = &Component> {
|
||||
self.components()
|
||||
.filter(|c| matches!(c.properties, Properties::Component { .. }))
|
||||
}
|
||||
|
||||
/// Returns only the provider components in the manifest
|
||||
pub fn capability_providers(&self) -> impl Iterator<Item = &Component> {
|
||||
self.components()
|
||||
.filter(|c| matches!(c.properties, Properties::Capability { .. }))
|
||||
}
|
||||
|
||||
/// Returns a map of component names to components in the manifest
|
||||
pub fn component_lookup(&self) -> HashMap<&String, &Component> {
|
||||
self.components()
|
||||
.map(|c| (&c.name, c))
|
||||
.collect::<HashMap<&String, &Component>>()
|
||||
}
|
||||
|
||||
/// Returns only links in the manifest
|
||||
pub fn links(&self) -> impl Iterator<Item = &Trait> {
|
||||
self.components()
|
||||
.flat_map(|c| c.traits.as_ref())
|
||||
.flatten()
|
||||
.filter(|t| t.is_link())
|
||||
}
|
||||
|
||||
/// Returns only policies in the manifest
|
||||
pub fn policies(&self) -> impl Iterator<Item = &Policy> {
|
||||
self.spec.policies.iter()
|
||||
}
|
||||
|
||||
/// Returns a map of policy names to policies in the manifest
|
||||
pub fn policy_lookup(&self) -> HashMap<&String, &Policy> {
|
||||
self.spec
|
||||
.policies
|
||||
.iter()
|
||||
.map(|p| (&p.name, p))
|
||||
.collect::<HashMap<&String, &Policy>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// The metadata describing the manifest
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct Metadata {
|
||||
/// The name of the manifest. This must be unique per lattice
|
||||
pub name: String,
|
||||
/// Optional data for annotating this manifest see <https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format>
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
/// Optional data for labeling this manifest, see <https://github.com/oam-dev/spec/blob/master/metadata.md#label-format>
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub labels: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
/// A representation of an OAM specification
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct Specification {
|
||||
/// The list of components for describing an application
|
||||
pub components: Vec<Component>,
|
||||
|
||||
/// The list of policies describing an application. This is for providing application-wide
|
||||
/// setting such as configuration for a secrets backend, how to render Kubernetes services,
|
||||
/// etc. It can be omitted if no policies are needed for an application.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub policies: Vec<Policy>,
|
||||
}
|
||||
|
||||
/// A policy definition
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct Policy {
|
||||
/// The name of this policy
|
||||
pub name: String,
|
||||
/// The properties for this policy
|
||||
pub properties: BTreeMap<String, String>,
|
||||
/// The type of the policy
|
||||
#[serde(rename = "type")]
|
||||
pub policy_type: String,
|
||||
}
|
||||
|
||||
/// A component definition
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
// TODO: figure out why this can't be uncommented
|
||||
// #[serde(deny_unknown_fields)]
|
||||
pub struct Component {
|
||||
/// The name of this component
|
||||
pub name: String,
|
||||
/// The properties for this component
|
||||
// NOTE(thomastaylor312): It would probably be better for us to implement a custom deserialze
|
||||
// and serialize that combines this and the component type. This is good enough for first draft
|
||||
#[serde(flatten)]
|
||||
pub properties: Properties,
|
||||
/// A list of various traits assigned to this component
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub traits: Option<Vec<Trait>>,
|
||||
}
|
||||
|
||||
impl Component {
|
||||
fn secrets(&self) -> Vec<SecretProperty> {
|
||||
let mut secrets = Vec::new();
|
||||
if let Some(traits) = self.traits.as_ref() {
|
||||
let l: Vec<SecretProperty> = traits
|
||||
.iter()
|
||||
.filter_map(|t| {
|
||||
if let TraitProperty::Link(link) = &t.properties {
|
||||
let mut tgt_iter = link.target.secrets.clone();
|
||||
if let Some(src) = &link.source {
|
||||
tgt_iter.extend(src.secrets.clone());
|
||||
}
|
||||
Some(tgt_iter)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
secrets.extend(l);
|
||||
};
|
||||
|
||||
match &self.properties {
|
||||
Properties::Component { properties } => {
|
||||
secrets.extend(properties.secrets.clone());
|
||||
}
|
||||
Properties::Capability { properties } => secrets.extend(properties.secrets.clone()),
|
||||
};
|
||||
secrets
|
||||
}
|
||||
|
||||
/// Returns only links in the component
|
||||
fn links(&self) -> impl Iterator<Item = &Trait> {
|
||||
self.traits.iter().flatten().filter(|t| t.is_link())
|
||||
}
|
||||
}
|
||||
|
||||
/// Properties that can be defined for a component
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum Properties {
|
||||
#[serde(rename = "component", alias = "actor")]
|
||||
Component { properties: ComponentProperties },
|
||||
#[serde(rename = "capability")]
|
||||
Capability { properties: CapabilityProperties },
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ComponentProperties {
|
||||
/// The image reference to use. Required unless the component is a shared component
|
||||
/// that is defined in another shared application.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub image: Option<String>,
|
||||
/// Information to locate a component within a shared application. Cannot be specified
|
||||
/// if the image is specified.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub application: Option<SharedApplicationComponentProperties>,
|
||||
/// The component ID to use for this component. If not supplied, it will be generated
|
||||
/// as a combination of the [Metadata::name] and the image reference.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
/// Named configuration to pass to the component. The component will be able to retrieve
|
||||
/// these values at runtime using `wasi:runtime/config.`
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<ConfigProperty>,
|
||||
/// Named secret references to pass to the component. The component will be able to retrieve
|
||||
/// these values at runtime using `wasmcloud:secrets/store`.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
|
||||
pub struct ConfigDefinition {
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<ConfigProperty>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
|
||||
pub struct SecretProperty {
|
||||
/// The name of the secret. This is used by a reference by the component or capability to
|
||||
/// get the secret value as a resource.
|
||||
pub name: String,
|
||||
/// The properties of the secret that indicate how to retrieve the secret value from a secrets
|
||||
/// backend and which backend to actually query.
|
||||
pub properties: SecretSourceProperty,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
|
||||
pub struct SecretSourceProperty {
|
||||
/// The policy to use for retrieving the secret.
|
||||
pub policy: String,
|
||||
/// The key to use for retrieving the secret from the backend.
|
||||
pub key: String,
|
||||
/// The field to use for retrieving the secret from the backend. This is optional and can be
|
||||
/// used to retrieve a specific field from a secret.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub field: Option<String>,
|
||||
/// The version of the secret to retrieve. If not supplied, the latest version will be used.
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct CapabilityProperties {
|
||||
/// The image reference to use. Required unless the component is a shared component
|
||||
/// that is defined in another shared application.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub image: Option<String>,
|
||||
/// Information to locate a component within a shared application. Cannot be specified
|
||||
/// if the image is specified.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub application: Option<SharedApplicationComponentProperties>,
|
||||
/// The component ID to use for this provider. If not supplied, it will be generated
|
||||
/// as a combination of the [Metadata::name] and the image reference.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
/// Named configuration to pass to the provider. The merged set of configuration will be passed
|
||||
/// to the provider at runtime using the provider SDK's `init()` function.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<ConfigProperty>,
|
||||
/// Named secret references to pass to the t. The provider will be able to retrieve
|
||||
/// these values at runtime using `wasmcloud:secrets/store`.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct SharedApplicationComponentProperties {
|
||||
/// The name of the shared application
|
||||
pub name: String,
|
||||
/// The name of the component in the shared application
|
||||
pub component: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Trait {
|
||||
/// The type of trait specified. This should be a unique string for the type of scaler. As we
|
||||
/// plan on supporting custom scalers, these traits are not enumerated
|
||||
#[serde(rename = "type")]
|
||||
pub trait_type: String,
|
||||
/// The properties of this trait
|
||||
pub properties: TraitProperty,
|
||||
}
|
||||
|
||||
impl Trait {
|
||||
/// Helper that creates a new linkdef type trait with the given properties
|
||||
pub fn new_link(props: LinkProperty) -> Trait {
|
||||
Trait {
|
||||
trait_type: LINK_TRAIT.to_owned(),
|
||||
properties: TraitProperty::Link(props),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a trait is a link
|
||||
pub fn is_link(&self) -> bool {
|
||||
self.trait_type == LINK_TRAIT
|
||||
}
|
||||
|
||||
/// Check if a trait is a scaler
|
||||
pub fn is_scaler(&self) -> bool {
|
||||
self.trait_type == SPREADSCALER_TRAIT || self.trait_type == DAEMONSCALER_TRAIT
|
||||
}
|
||||
|
||||
/// Helper that creates a new spreadscaler type trait with the given properties
|
||||
pub fn new_spreadscaler(props: SpreadScalerProperty) -> Trait {
|
||||
Trait {
|
||||
trait_type: SPREADSCALER_TRAIT.to_owned(),
|
||||
properties: TraitProperty::SpreadScaler(props),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_daemonscaler(props: SpreadScalerProperty) -> Trait {
|
||||
Trait {
|
||||
trait_type: DAEMONSCALER_TRAIT.to_owned(),
|
||||
properties: TraitProperty::SpreadScaler(props),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Properties for defining traits
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(untagged)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum TraitProperty {
|
||||
Link(LinkProperty),
|
||||
SpreadScaler(SpreadScalerProperty),
|
||||
// TODO(thomastaylor312): This is still broken right now with deserializing. If the incoming
|
||||
// type specifies instances, it matches with spreadscaler first. So we need to implement a custom
|
||||
// parser here
|
||||
Custom(serde_json::Value),
|
||||
}
|
||||
|
||||
impl From<LinkProperty> for TraitProperty {
|
||||
fn from(value: LinkProperty) -> Self {
|
||||
Self::Link(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SpreadScalerProperty> for TraitProperty {
|
||||
fn from(value: SpreadScalerProperty) -> Self {
|
||||
Self::SpreadScaler(value)
|
||||
}
|
||||
}
|
||||
|
||||
// impl From<serde_json::Value> for TraitProperty {
|
||||
// fn from(value: serde_json::Value) -> Self {
|
||||
// Self::Custom(value)
|
||||
// }
|
||||
// }
|
||||
|
||||
/// Properties for the config list associated with components, providers, and links
|
||||
///
|
||||
/// ## Usage
|
||||
/// Defining a config block, like so:
|
||||
/// ```yaml
|
||||
/// source_config:
|
||||
/// - name: "external-secret-kv"
|
||||
/// - name: "default-port"
|
||||
/// properties:
|
||||
/// port: "8080"
|
||||
/// ```
|
||||
///
|
||||
/// Will result in two config scalers being created, one with the name `basic-kv` and one with the
|
||||
/// name `default-port`. Wadm will not resolve collisions with configuration names between manifests.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ConfigProperty {
|
||||
/// Name of the config to ensure exists
|
||||
pub name: String,
|
||||
/// Optional properties to put with the configuration. If the properties are
|
||||
/// omitted in the manifest, wadm will assume that the configuration is externally managed
|
||||
/// and will not attempt to create it, only reporting the status as failed if not found.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub properties: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
/// This impl is a helper to help compare a `Vec<String>` to a `Vec<ConfigProperty>`
|
||||
impl PartialEq<ConfigProperty> for String {
|
||||
fn eq(&self, other: &ConfigProperty) -> bool {
|
||||
self == &other.name
|
||||
}
|
||||
}
|
||||
|
||||
/// Properties for links
|
||||
#[derive(Debug, Serialize, Clone, PartialEq, Eq, ToSchema, JsonSchema, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct LinkProperty {
|
||||
/// WIT namespace for the link
|
||||
pub namespace: String,
|
||||
/// WIT package for the link
|
||||
pub package: String,
|
||||
/// WIT interfaces for the link
|
||||
pub interfaces: Vec<String>,
|
||||
/// Configuration to apply to the source of the link
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub source: Option<ConfigDefinition>,
|
||||
/// Configuration to apply to the target of the link
|
||||
pub target: TargetConfig,
|
||||
/// The name of this link
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing)]
|
||||
#[deprecated(since = "0.13.0")]
|
||||
pub source_config: Option<Vec<ConfigProperty>>,
|
||||
|
||||
#[serde(default, skip_serializing)]
|
||||
#[deprecated(since = "0.13.0")]
|
||||
pub target_config: Option<Vec<ConfigProperty>>,
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for LinkProperty {
|
||||
fn deserialize<D>(d: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let json = serde_json::value::Value::deserialize(d)?;
|
||||
let mut target = TargetConfig::default();
|
||||
let mut source = None;
|
||||
|
||||
// Handling the old configuration -- translate to a TargetConfig
|
||||
if let Some(t) = json.get("target") {
|
||||
if t.is_string() {
|
||||
let name = t.as_str().unwrap();
|
||||
let mut tgt = vec![];
|
||||
if let Some(tgt_config) = json.get("target_config") {
|
||||
tgt = serde_json::from_value(tgt_config.clone()).map_err(de::Error::custom)?;
|
||||
}
|
||||
target = TargetConfig {
|
||||
name: name.to_string(),
|
||||
config: tgt,
|
||||
secrets: vec![],
|
||||
};
|
||||
} else {
|
||||
// Otherwise handle normally
|
||||
target =
|
||||
serde_json::from_value(json["target"].clone()).map_err(de::Error::custom)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(s) = json.get("source_config") {
|
||||
let src: Vec<ConfigProperty> =
|
||||
serde_json::from_value(s.clone()).map_err(de::Error::custom)?;
|
||||
source = Some(ConfigDefinition {
|
||||
config: src,
|
||||
secrets: vec![],
|
||||
});
|
||||
}
|
||||
|
||||
// If the source block is present then it takes priority
|
||||
if let Some(s) = json.get("source") {
|
||||
source = Some(serde_json::from_value(s.clone()).map_err(de::Error::custom)?);
|
||||
}
|
||||
|
||||
// Validate that the required keys are all present
|
||||
if json.get("namespace").is_none() {
|
||||
return Err(de::Error::custom("namespace is required"));
|
||||
}
|
||||
|
||||
if json.get("package").is_none() {
|
||||
return Err(de::Error::custom("package is required"));
|
||||
}
|
||||
|
||||
if json.get("interfaces").is_none() {
|
||||
return Err(de::Error::custom("interfaces is required"));
|
||||
}
|
||||
|
||||
Ok(LinkProperty {
|
||||
namespace: json["namespace"].as_str().unwrap().to_string(),
|
||||
package: json["package"].as_str().unwrap().to_string(),
|
||||
interfaces: json["interfaces"]
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|v| v.as_str().unwrap().to_string())
|
||||
.collect(),
|
||||
source,
|
||||
target,
|
||||
name: json.get("name").map(|v| v.as_str().unwrap().to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
|
||||
pub struct TargetConfig {
|
||||
/// The target this link applies to. This should be the name of a component in the manifest
|
||||
pub name: String,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<ConfigProperty>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
impl PartialEq<TargetConfig> for String {
|
||||
fn eq(&self, other: &TargetConfig) -> bool {
|
||||
self == &other.name
|
||||
}
|
||||
}
|
||||
|
||||
/// Properties for spread scalers
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct SpreadScalerProperty {
|
||||
/// Number of instances to spread across matching requirements
|
||||
#[serde(alias = "replicas")]
|
||||
pub instances: usize,
|
||||
/// Requirements for spreading those instances
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub spread: Vec<Spread>,
|
||||
}
|
||||
|
||||
/// Configuration for various spreading requirements
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Spread {
|
||||
/// The name of this spread requirement
|
||||
pub name: String,
|
||||
/// An arbitrary map of labels to match on for scaling requirements
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub requirements: BTreeMap<String, String>,
|
||||
/// An optional weight for this spread. Higher weights are given more precedence
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub weight: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for Spread {
|
||||
fn default() -> Self {
|
||||
Spread {
|
||||
name: "default".to_string(),
|
||||
requirements: BTreeMap::default(),
|
||||
weight: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::io::BufReader;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) fn deserialize_yaml(filepath: impl AsRef<Path>) -> Result<Manifest> {
|
||||
let file = std::fs::File::open(filepath)?;
|
||||
let reader = BufReader::new(file);
|
||||
let yaml_string: Manifest = serde_yaml::from_reader(reader)?;
|
||||
Ok(yaml_string)
|
||||
}
|
||||
|
||||
pub(crate) fn deserialize_json(filepath: impl AsRef<Path>) -> Result<Manifest> {
|
||||
let file = std::fs::File::open(filepath)?;
|
||||
let reader = BufReader::new(file);
|
||||
let json_string: Manifest = serde_json::from_reader(reader)?;
|
||||
Ok(json_string)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_oam_deserializer() {
|
||||
let res = deserialize_json("../../oam/simple1.json");
|
||||
match res {
|
||||
Ok(parse_results) => parse_results,
|
||||
Err(error) => panic!("Error {:?}", error),
|
||||
};
|
||||
|
||||
let res = deserialize_yaml("../../oam/simple1.yaml");
|
||||
match res {
|
||||
Ok(parse_results) => parse_results,
|
||||
Err(error) => panic!("Error {:?}", error),
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore] // see TODO in TraitProperty enum
|
||||
fn test_custom_traits() {
|
||||
let manifest = deserialize_yaml("../../oam/custom.yaml").expect("Should be able to parse");
|
||||
let component = manifest
|
||||
.spec
|
||||
.components
|
||||
.into_iter()
|
||||
.find(|comp| matches!(comp.properties, Properties::Component { .. }))
|
||||
.expect("Should be able to find component");
|
||||
let traits = component.traits.expect("Should have Vec of traits");
|
||||
assert!(
|
||||
traits
|
||||
.iter()
|
||||
.any(|t| matches!(t.properties, TraitProperty::Custom(_))),
|
||||
"Should have found custom property trait: {traits:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config() {
|
||||
let manifest = deserialize_yaml("../../oam/config.yaml").expect("Should be able to parse");
|
||||
let props = match &manifest.spec.components[0].properties {
|
||||
Properties::Component { properties } => properties,
|
||||
_ => panic!("Should have found capability component"),
|
||||
};
|
||||
|
||||
assert_eq!(props.config.len(), 1, "Should have found a config property");
|
||||
let config_property = props.config.first().expect("Should have a config property");
|
||||
assert!(config_property.name == "component_config");
|
||||
assert!(config_property
|
||||
.properties
|
||||
.as_ref()
|
||||
.is_some_and(|p| p.get("lang").is_some_and(|v| v == "EN-US")));
|
||||
|
||||
let props = match &manifest.spec.components[1].properties {
|
||||
Properties::Capability { properties } => properties,
|
||||
_ => panic!("Should have found capability component"),
|
||||
};
|
||||
|
||||
assert_eq!(props.config.len(), 1, "Should have found a config property");
|
||||
let config_property = props.config.first().expect("Should have a config property");
|
||||
assert!(config_property.name == "provider_config");
|
||||
assert!(config_property
|
||||
.properties
|
||||
.as_ref()
|
||||
.is_some_and(|p| p.get("default-port").is_some_and(|v| v == "8080")));
|
||||
assert!(config_property.properties.as_ref().is_some_and(|p| p
|
||||
.get("cache_file")
|
||||
.is_some_and(|v| v == "/tmp/mycache.json")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_component_matching() {
|
||||
let manifest = deserialize_yaml("../../oam/simple2.yaml").expect("Should be able to parse");
|
||||
assert_eq!(
|
||||
manifest
|
||||
.spec
|
||||
.components
|
||||
.iter()
|
||||
.filter(|component| matches!(component.properties, Properties::Component { .. }))
|
||||
.count(),
|
||||
1,
|
||||
"Should have found 1 component property"
|
||||
);
|
||||
assert_eq!(
|
||||
manifest
|
||||
.spec
|
||||
.components
|
||||
.iter()
|
||||
.filter(|component| matches!(component.properties, Properties::Capability { .. }))
|
||||
.count(),
|
||||
2,
|
||||
"Should have found 2 capability properties"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trait_matching() {
|
||||
let manifest = deserialize_yaml("../../oam/simple2.yaml").expect("Should be able to parse");
|
||||
// Validate component traits
|
||||
let traits = manifest
|
||||
.spec
|
||||
.components
|
||||
.clone()
|
||||
.into_iter()
|
||||
.find(|component| matches!(component.properties, Properties::Component { .. }))
|
||||
.expect("Should find component component")
|
||||
.traits
|
||||
.expect("Should have traits object");
|
||||
assert_eq!(traits.len(), 1, "Should have 1 trait");
|
||||
assert!(
|
||||
matches!(traits[0].properties, TraitProperty::SpreadScaler(_)),
|
||||
"Should have spreadscaler properties"
|
||||
);
|
||||
// Validate capability component traits
|
||||
let traits = manifest
|
||||
.spec
|
||||
.components
|
||||
.into_iter()
|
||||
.find(|component| {
|
||||
matches!(
|
||||
&component.properties,
|
||||
Properties::Capability {
|
||||
properties: CapabilityProperties { image, .. }
|
||||
} if image.clone().expect("image to be present") == "wasmcloud.azurecr.io/httpserver:0.13.1"
|
||||
)
|
||||
})
|
||||
.expect("Should find capability component")
|
||||
.traits
|
||||
.expect("Should have traits object");
|
||||
assert_eq!(traits.len(), 1, "Should have 1 trait");
|
||||
assert!(
|
||||
matches!(traits[0].properties, TraitProperty::Link(_)),
|
||||
"Should have link property"
|
||||
);
|
||||
if let TraitProperty::Link(ld) = &traits[0].properties {
|
||||
assert_eq!(ld.source.as_ref().unwrap().config, vec![]);
|
||||
assert_eq!(ld.target.name, "userinfo".to_string());
|
||||
} else {
|
||||
panic!("trait property was not a link definition");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_oam_serializer() {
|
||||
let mut spread_vec: Vec<Spread> = Vec::new();
|
||||
let spread_item = Spread {
|
||||
name: "eastcoast".to_string(),
|
||||
requirements: BTreeMap::from([("zone".to_string(), "us-east-1".to_string())]),
|
||||
weight: Some(80),
|
||||
};
|
||||
spread_vec.push(spread_item);
|
||||
let spread_item = Spread {
|
||||
name: "westcoast".to_string(),
|
||||
requirements: BTreeMap::from([("zone".to_string(), "us-west-1".to_string())]),
|
||||
weight: Some(20),
|
||||
};
|
||||
spread_vec.push(spread_item);
|
||||
let mut trait_vec: Vec<Trait> = Vec::new();
|
||||
let spreadscalerprop = SpreadScalerProperty {
|
||||
instances: 4,
|
||||
spread: spread_vec,
|
||||
};
|
||||
let trait_item = Trait::new_spreadscaler(spreadscalerprop);
|
||||
trait_vec.push(trait_item);
|
||||
let linkdefprop = LinkProperty {
|
||||
target: TargetConfig {
|
||||
name: "webcap".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
namespace: "wasi".to_string(),
|
||||
package: "http".to_string(),
|
||||
interfaces: vec!["incoming-handler".to_string()],
|
||||
source: Some(ConfigDefinition {
|
||||
config: {
|
||||
vec![ConfigProperty {
|
||||
name: "http".to_string(),
|
||||
properties: Some(HashMap::from([("port".to_string(), "8080".to_string())])),
|
||||
}]
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
name: Some("default".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let trait_item = Trait::new_link(linkdefprop);
|
||||
trait_vec.push(trait_item);
|
||||
let mut component_vec: Vec<Component> = Vec::new();
|
||||
let component_item = Component {
|
||||
name: "userinfo".to_string(),
|
||||
properties: Properties::Component {
|
||||
properties: ComponentProperties {
|
||||
image: Some("wasmcloud.azurecr.io/fake:1".to_string()),
|
||||
application: None,
|
||||
id: None,
|
||||
config: vec![],
|
||||
secrets: vec![],
|
||||
},
|
||||
},
|
||||
traits: Some(trait_vec),
|
||||
};
|
||||
component_vec.push(component_item);
|
||||
let component_item = Component {
|
||||
name: "webcap".to_string(),
|
||||
properties: Properties::Capability {
|
||||
properties: CapabilityProperties {
|
||||
image: Some("wasmcloud.azurecr.io/httpserver:0.13.1".to_string()),
|
||||
application: None,
|
||||
id: None,
|
||||
config: vec![],
|
||||
secrets: vec![],
|
||||
},
|
||||
},
|
||||
traits: None,
|
||||
};
|
||||
component_vec.push(component_item);
|
||||
|
||||
let mut spread_vec: Vec<Spread> = Vec::new();
|
||||
let spread_item = Spread {
|
||||
name: "haslights".to_string(),
|
||||
requirements: BTreeMap::from([("zone".to_string(), "enabled".to_string())]),
|
||||
weight: Some(DEFAULT_SPREAD_WEIGHT),
|
||||
};
|
||||
spread_vec.push(spread_item);
|
||||
let spreadscalerprop = SpreadScalerProperty {
|
||||
instances: 1,
|
||||
spread: spread_vec,
|
||||
};
|
||||
let mut trait_vec: Vec<Trait> = Vec::new();
|
||||
let trait_item = Trait::new_spreadscaler(spreadscalerprop);
|
||||
trait_vec.push(trait_item);
|
||||
let component_item = Component {
|
||||
name: "ledblinky".to_string(),
|
||||
properties: Properties::Capability {
|
||||
properties: CapabilityProperties {
|
||||
image: Some("wasmcloud.azurecr.io/ledblinky:0.0.1".to_string()),
|
||||
application: None,
|
||||
id: None,
|
||||
config: vec![],
|
||||
secrets: vec![],
|
||||
},
|
||||
},
|
||||
traits: Some(trait_vec),
|
||||
};
|
||||
component_vec.push(component_item);
|
||||
|
||||
let spec = Specification {
|
||||
components: component_vec,
|
||||
policies: vec![],
|
||||
};
|
||||
let metadata = Metadata {
|
||||
name: "my-example-app".to_string(),
|
||||
annotations: BTreeMap::from([
|
||||
(VERSION_ANNOTATION_KEY.to_string(), "v0.0.1".to_string()),
|
||||
(
|
||||
DESCRIPTION_ANNOTATION_KEY.to_string(),
|
||||
"This is my app".to_string(),
|
||||
),
|
||||
]),
|
||||
labels: BTreeMap::from([(
|
||||
"prefix.dns.prefix/name-for_a.123".to_string(),
|
||||
"this is a valid label".to_string(),
|
||||
)]),
|
||||
};
|
||||
let manifest = Manifest {
|
||||
api_version: OAM_VERSION.to_owned(),
|
||||
kind: APPLICATION_KIND.to_owned(),
|
||||
metadata,
|
||||
spec,
|
||||
};
|
||||
let serialized_json =
|
||||
serde_json::to_vec(&manifest).expect("Should be able to serialize JSON");
|
||||
|
||||
let serialized_yaml = serde_yaml::to_string(&manifest)
|
||||
.expect("Should be able to serialize YAML")
|
||||
.into_bytes();
|
||||
|
||||
// Test the round trip back in
|
||||
let json_manifest: Manifest = serde_json::from_slice(&serialized_json)
|
||||
.expect("Should be able to deserialize JSON roundtrip");
|
||||
let yaml_manifest: Manifest = serde_yaml::from_slice(&serialized_yaml)
|
||||
.expect("Should be able to deserialize YAML roundtrip");
|
||||
|
||||
// Make sure the manifests don't contain any custom traits (to test that we aren't parsing
|
||||
// the tagged enum poorly)
|
||||
assert!(
|
||||
!json_manifest
|
||||
.spec
|
||||
.components
|
||||
.into_iter()
|
||||
.any(|component| component
|
||||
.traits
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.any(|t| matches!(t.properties, TraitProperty::Custom(_)))),
|
||||
"Should have found custom properties"
|
||||
);
|
||||
|
||||
assert!(
|
||||
!yaml_manifest
|
||||
.spec
|
||||
.components
|
||||
.into_iter()
|
||||
.any(|component| component
|
||||
.traits
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.any(|t| matches!(t.properties, TraitProperty::Custom(_)))),
|
||||
"Should have found custom properties"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deprecated_fields_not_set() {
|
||||
let manifest = deserialize_yaml("../../oam/simple2.yaml").expect("Should be able to parse");
|
||||
// Validate component traits
|
||||
let traits = manifest
|
||||
.spec
|
||||
.components
|
||||
.clone()
|
||||
.into_iter()
|
||||
.filter(|component| matches!(component.name.as_str(), "webcap"))
|
||||
.find(|component| matches!(component.properties, Properties::Capability { .. }))
|
||||
.expect("Should find component component")
|
||||
.traits
|
||||
.expect("Should have traits object");
|
||||
assert_eq!(traits.len(), 1, "Should have 1 trait");
|
||||
if let TraitProperty::Link(ld) = &traits[0].properties {
|
||||
assert_eq!(ld.source.as_ref().unwrap().config, vec![]);
|
||||
#[allow(deprecated)]
|
||||
let source_config = &ld.source_config;
|
||||
assert_eq!(source_config, &None);
|
||||
} else {
|
||||
panic!("trait property was not a link definition");
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,908 @@
|
|||
//! Logic for model ([`Manifest`]) validation
|
||||
//!
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
#[cfg(not(target_family = "wasm"))]
|
||||
use std::path::Path;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
CapabilityProperties, ComponentProperties, LinkProperty, Manifest, Properties, Trait,
|
||||
TraitProperty, DEFAULT_LINK_NAME, LATEST_VERSION,
|
||||
};
|
||||
|
||||
/// A namespace -> package -> interface lookup
|
||||
type KnownInterfaceLookup = HashMap<String, HashMap<String, HashMap<String, ()>>>;
|
||||
|
||||
/// Hard-coded list of known namespaces/packages and the interfaces they contain.
|
||||
///
|
||||
/// Using an interface that is *not* on this list is not an error --
|
||||
/// custom interfaces are expected to not be on this list, but when using
|
||||
/// a known namespace and package, interfaces should generally be well known.
|
||||
static KNOWN_INTERFACE_LOOKUP: OnceLock<KnownInterfaceLookup> = OnceLock::new();
|
||||
|
||||
const SECRET_POLICY_TYPE: &str = "policy.secret.wasmcloud.dev/v1alpha1";
|
||||
|
||||
/// Get the static list of known interfaces
|
||||
fn get_known_interface_lookup() -> &'static KnownInterfaceLookup {
|
||||
KNOWN_INTERFACE_LOOKUP.get_or_init(|| {
|
||||
HashMap::from([
|
||||
(
|
||||
"wrpc".into(),
|
||||
HashMap::from([
|
||||
(
|
||||
"blobstore".into(),
|
||||
HashMap::from([("blobstore".into(), ())]),
|
||||
),
|
||||
(
|
||||
"keyvalue".into(),
|
||||
HashMap::from([("atomics".into(), ()), ("store".into(), ())]),
|
||||
),
|
||||
(
|
||||
"http".into(),
|
||||
HashMap::from([
|
||||
("incoming-handler".into(), ()),
|
||||
("outgoing-handler".into(), ()),
|
||||
]),
|
||||
),
|
||||
]),
|
||||
),
|
||||
(
|
||||
"wasi".into(),
|
||||
HashMap::from([
|
||||
(
|
||||
"blobstore".into(),
|
||||
HashMap::from([("blobstore".into(), ())]),
|
||||
),
|
||||
("config".into(), HashMap::from([("runtime".into(), ())])),
|
||||
(
|
||||
"keyvalue".into(),
|
||||
HashMap::from([
|
||||
("atomics".into(), ()),
|
||||
("store".into(), ()),
|
||||
("batch".into(), ()),
|
||||
("watch".into(), ()),
|
||||
]),
|
||||
),
|
||||
(
|
||||
"http".into(),
|
||||
HashMap::from([
|
||||
("incoming-handler".into(), ()),
|
||||
("outgoing-handler".into(), ()),
|
||||
]),
|
||||
),
|
||||
("logging".into(), HashMap::from([("logging".into(), ())])),
|
||||
]),
|
||||
),
|
||||
(
|
||||
"wasmcloud".into(),
|
||||
HashMap::from([(
|
||||
"messaging".into(),
|
||||
HashMap::from([("consumer".into(), ()), ("handler".into(), ())]),
|
||||
)]),
|
||||
),
|
||||
])
|
||||
})
|
||||
}
|
||||
|
||||
static MANIFEST_NAME_REGEX_STR: &str = r"^[-\w]+$";
|
||||
static MANIFEST_NAME_REGEX: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
/// Retrieve regular expression which manifest names must match, compiled to a usable [`Regex`]
|
||||
fn get_manifest_name_regex() -> &'static Regex {
|
||||
MANIFEST_NAME_REGEX.get_or_init(|| {
|
||||
Regex::new(MANIFEST_NAME_REGEX_STR)
|
||||
.context("failed to parse manifest name regex")
|
||||
.unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
/// Check whether a manifest name matches requirements, returning all validation errors
|
||||
pub fn validate_manifest_name(name: &str) -> impl ValidationOutput {
|
||||
let mut errors = Vec::new();
|
||||
if !get_manifest_name_regex().is_match(name) {
|
||||
errors.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("manifest name [{name}] is not allowed (should match regex [{MANIFEST_NAME_REGEX_STR}])"),
|
||||
))
|
||||
}
|
||||
errors
|
||||
}
|
||||
|
||||
/// Check whether a manifest name matches requirements
|
||||
pub fn is_valid_manifest_name(name: &str) -> bool {
|
||||
validate_manifest_name(name).valid()
|
||||
}
|
||||
|
||||
/// Check whether a manifest version is valid, returning all validation errors
|
||||
pub fn validate_manifest_version(version: &str) -> impl ValidationOutput {
|
||||
let mut errors = Vec::new();
|
||||
if version == LATEST_VERSION {
|
||||
errors.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("{LATEST_VERSION} is not allowed in wadm"),
|
||||
))
|
||||
}
|
||||
errors
|
||||
}
|
||||
|
||||
/// Check whether a manifest version is valid requirements
|
||||
pub fn is_valid_manifest_version(version: &str) -> bool {
|
||||
validate_manifest_version(version).valid()
|
||||
}
|
||||
|
||||
/// Check whether a known grouping of namespace, package and interface are valid.
|
||||
/// A grouping must be both known/expected and invalid to fail this test (ex. a typo).
|
||||
///
|
||||
/// NOTE: what is considered a valid interface known to the host depends explicitly on
|
||||
/// the wasmCloud host and wasmCloud project goals/implementation. This information is
|
||||
/// subject to change.
|
||||
fn is_invalid_known_interface(
|
||||
namespace: &str,
|
||||
package: &str,
|
||||
interface: &str,
|
||||
) -> Vec<ValidationFailure> {
|
||||
let known_interfaces = get_known_interface_lookup();
|
||||
let Some(pkg_lookup) = known_interfaces.get(namespace) else {
|
||||
// This namespace isn't known, so it may be a custom interface
|
||||
return vec![];
|
||||
};
|
||||
let Some(iface_lookup) = pkg_lookup.get(package) else {
|
||||
// Unknown package inside a known interface we control is probably a bug
|
||||
return vec![ValidationFailure::new(
|
||||
ValidationFailureLevel::Warning,
|
||||
format!("unrecognized interface [{namespace}:{package}/{interface}]"),
|
||||
)];
|
||||
};
|
||||
// Unknown interface inside known namespace and package is probably a bug
|
||||
if !iface_lookup.contains_key(interface) {
|
||||
// Unknown package inside a known interface we control is probably a bug, but may be
|
||||
// a new interface we don't know about yet
|
||||
return vec![ValidationFailure::new(
|
||||
ValidationFailureLevel::Warning,
|
||||
format!("unrecognized interface [{namespace}:{package}/{interface}]"),
|
||||
)];
|
||||
}
|
||||
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Level of a failure related to validation
|
||||
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[non_exhaustive]
|
||||
pub enum ValidationFailureLevel {
|
||||
#[default]
|
||||
Warning,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl core::fmt::Display for ValidationFailureLevel {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
Self::Warning => "warning",
|
||||
Self::Error => "error",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Failure detailing a validation failure, normally indicating a failure
|
||||
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[non_exhaustive]
|
||||
pub struct ValidationFailure {
|
||||
pub level: ValidationFailureLevel,
|
||||
pub msg: String,
|
||||
}
|
||||
|
||||
impl ValidationFailure {
|
||||
fn new(level: ValidationFailureLevel, msg: String) -> Self {
|
||||
ValidationFailure { level, msg }
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Display for ValidationFailure {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write!(f, "[{}] {}", self.level, self.msg)
|
||||
}
|
||||
}
|
||||
|
||||
/// Things that support output validation
|
||||
pub trait ValidationOutput {
|
||||
/// Whether the object is valid
|
||||
fn valid(&self) -> bool;
|
||||
/// Warnings returned (if any) during validation
|
||||
fn warnings(&self) -> Vec<&ValidationFailure>;
|
||||
/// The errors returned by the validation
|
||||
fn errors(&self) -> Vec<&ValidationFailure>;
|
||||
}
|
||||
|
||||
/// Default implementation for a list of concrete [`ValidationFailure`]s
|
||||
impl ValidationOutput for [ValidationFailure] {
|
||||
fn valid(&self) -> bool {
|
||||
self.errors().is_empty()
|
||||
}
|
||||
fn warnings(&self) -> Vec<&ValidationFailure> {
|
||||
self.iter()
|
||||
.filter(|m| m.level == ValidationFailureLevel::Warning)
|
||||
.collect()
|
||||
}
|
||||
fn errors(&self) -> Vec<&ValidationFailure> {
|
||||
self.iter()
|
||||
.filter(|m| m.level == ValidationFailureLevel::Error)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Default implementation for a list of concrete [`ValidationFailure`]s
|
||||
impl ValidationOutput for Vec<ValidationFailure> {
|
||||
fn valid(&self) -> bool {
|
||||
self.as_slice().valid()
|
||||
}
|
||||
fn warnings(&self) -> Vec<&ValidationFailure> {
|
||||
self.iter()
|
||||
.filter(|m| m.level == ValidationFailureLevel::Warning)
|
||||
.collect()
|
||||
}
|
||||
fn errors(&self) -> Vec<&ValidationFailure> {
|
||||
self.iter()
|
||||
.filter(|m| m.level == ValidationFailureLevel::Error)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate a WADM application manifest, returning a list of validation failures
|
||||
///
|
||||
/// At present this can check for:
|
||||
/// - unsupported interfaces (i.e. typos, etc)
|
||||
/// - unknown packages under known namespaces
|
||||
/// - "dangling" links (missing components)
|
||||
///
|
||||
/// Since `[ValidationFailure]` implements `ValidationOutput`, you can call `valid()` and other
|
||||
/// trait methods on it:
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// let messages = validate_manifest(some_path).await?;
|
||||
/// let valid = messages.valid();
|
||||
/// ```
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Path to the Manifest that will be read into memory and validated
|
||||
#[cfg(not(target_family = "wasm"))]
|
||||
pub async fn validate_manifest_file(
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<(Manifest, Vec<ValidationFailure>)> {
|
||||
let content = tokio::fs::read_to_string(path.as_ref())
|
||||
.await
|
||||
.with_context(|| format!("failed to read manifest @ [{}]", path.as_ref().display()))?;
|
||||
|
||||
validate_manifest_bytes(&content).await.with_context(|| {
|
||||
format!(
|
||||
"failed to parse YAML manifest [{}]",
|
||||
path.as_ref().display()
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Validate a lsit of bytes that represents a WADM application manifest
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `content` - YAML content to the Manifest that will be read into memory and validated
|
||||
pub async fn validate_manifest_bytes(
|
||||
content: impl AsRef<[u8]>,
|
||||
) -> Result<(Manifest, Vec<ValidationFailure>)> {
|
||||
let raw_yaml_content = content.as_ref();
|
||||
let manifest =
|
||||
serde_yaml::from_slice(content.as_ref()).context("failed to parse manifest content")?;
|
||||
let mut failures = validate_manifest(&manifest).await?;
|
||||
let mut yaml_issues = validate_raw_yaml(raw_yaml_content)?;
|
||||
failures.append(&mut yaml_issues);
|
||||
Ok((manifest, failures))
|
||||
}
|
||||
|
||||
/// Validate a WADM application manifest, returning a list of validation failures
|
||||
///
|
||||
/// At present this can check for:
|
||||
/// - unsupported interfaces (i.e. typos, etc)
|
||||
/// - unknown packages under known namespaces
|
||||
/// - "dangling" links (missing components)
|
||||
/// - secrets mapped to unknown policies
|
||||
///
|
||||
/// Since `[ValidationFailure]` implements `ValidationOutput`, you can call `valid()` and other
|
||||
/// trait methods on it:
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// let messages = validate_manifest(some_path).await?;
|
||||
/// let valid = messages.valid();
|
||||
/// ```
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `manifest` - The [`Manifest`] that should be validated
|
||||
pub async fn validate_manifest(manifest: &Manifest) -> Result<Vec<ValidationFailure>> {
|
||||
// Check for known failures with the manifest
|
||||
let mut failures = Vec::new();
|
||||
failures.extend(
|
||||
validate_manifest_name(&manifest.metadata.name)
|
||||
.errors()
|
||||
.into_iter()
|
||||
.cloned(),
|
||||
);
|
||||
failures.extend(
|
||||
validate_manifest_version(manifest.version())
|
||||
.errors()
|
||||
.into_iter()
|
||||
.cloned(),
|
||||
);
|
||||
failures.extend(core_validation(manifest));
|
||||
failures.extend(check_misnamed_interfaces(manifest));
|
||||
failures.extend(check_dangling_links(manifest));
|
||||
failures.extend(validate_policies(manifest));
|
||||
failures.extend(ensure_no_custom_traits(manifest));
|
||||
failures.extend(validate_component_properties(manifest));
|
||||
failures.extend(check_duplicate_links(manifest));
|
||||
failures.extend(validate_link_configs(manifest));
|
||||
Ok(failures)
|
||||
}
|
||||
|
||||
pub fn validate_raw_yaml(content: &[u8]) -> Result<Vec<ValidationFailure>> {
|
||||
let mut failures = Vec::new();
|
||||
let raw_content: serde_yaml::Value =
|
||||
serde_yaml::from_slice(content).context("failed read raw yaml content")?;
|
||||
failures.extend(validate_components_configs(&raw_content));
|
||||
Ok(failures)
|
||||
}
|
||||
|
||||
fn core_validation(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
let mut name_registry: HashSet<String> = HashSet::new();
|
||||
let mut id_registry: HashSet<String> = HashSet::new();
|
||||
let mut required_capability_components: HashSet<String> = HashSet::new();
|
||||
|
||||
for label in manifest.metadata.labels.iter() {
|
||||
if !valid_oam_label(label) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Invalid OAM label: {:?}", label),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for annotation in manifest.metadata.annotations.iter() {
|
||||
if !valid_oam_label(annotation) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Invalid OAM annotation: {:?}", annotation),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for component in manifest.spec.components.iter() {
|
||||
// Component name validation : each component (components or providers) should have a unique name
|
||||
if !name_registry.insert(component.name.clone()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Duplicate component name in manifest: {}", component.name),
|
||||
));
|
||||
}
|
||||
// Provider validation :
|
||||
// Provider config should be serializable [For all components that have JSON config, validate that it can serialize.
|
||||
// We need this so it doesn't trigger an error when sending a command down the line]
|
||||
// Providers should have a unique image ref and link name
|
||||
if let Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
id: Some(component_id),
|
||||
config: _capability_config,
|
||||
..
|
||||
},
|
||||
} = &component.properties
|
||||
{
|
||||
if !id_registry.insert(component_id.to_string()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Duplicate component identifier in manifest: {}",
|
||||
component_id
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Component validation : Components should have a unique identifier per manifest
|
||||
if let Properties::Component {
|
||||
properties: ComponentProperties { id: Some(id), .. },
|
||||
} = &component.properties
|
||||
{
|
||||
if !id_registry.insert(id.to_string()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Duplicate component identifier in manifest: {}", id),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Linkdef validation : A linkdef from a component should have a unique target and reference
|
||||
if let Some(traits_vec) = &component.traits {
|
||||
for trait_item in traits_vec.iter() {
|
||||
if let Trait {
|
||||
// TODO : add trait type validation after custom types are done. See TraitProperty enum.
|
||||
properties: TraitProperty::Link(LinkProperty { target, .. }),
|
||||
..
|
||||
} = &trait_item
|
||||
{
|
||||
// Multiple components{ with type != 'capability'} can declare the same target, so we don't need to check for duplicates on insert
|
||||
required_capability_components.insert(target.name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let missing_capability_components = required_capability_components
|
||||
.difference(&name_registry)
|
||||
.collect::<Vec<&String>>();
|
||||
|
||||
if !missing_capability_components.is_empty() {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"The following capability component(s) are missing from the manifest: {:?}",
|
||||
missing_capability_components
|
||||
),
|
||||
));
|
||||
};
|
||||
failures
|
||||
}
|
||||
|
||||
/// Check for misnamed host-supported interfaces in the manifest
|
||||
fn check_misnamed_interfaces(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for link_trait in manifest.links() {
|
||||
if let TraitProperty::Link(LinkProperty {
|
||||
namespace,
|
||||
package,
|
||||
interfaces,
|
||||
target: _target,
|
||||
source: _source,
|
||||
..
|
||||
}) = &link_trait.properties
|
||||
{
|
||||
for interface in interfaces {
|
||||
failures.extend(is_invalid_known_interface(namespace, package, interface))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failures
|
||||
}
|
||||
|
||||
/// This validation rule should eventually be removed, but at this time (as of wadm 0.14.0)
|
||||
/// custom traits are not supported. We technically deserialize the custom trait, but 99%
|
||||
/// of the time this is just a poorly formatted spread or link scaler which is incredibly
|
||||
/// frustrating to debug.
|
||||
fn ensure_no_custom_traits(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for component in manifest.components() {
|
||||
if let Some(traits) = &component.traits {
|
||||
for trait_item in traits {
|
||||
match &trait_item.properties {
|
||||
TraitProperty::Custom(trt) if trait_item.is_link() => failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Link trait deserialized as custom trait, ensure fields are correct: {}", trt),
|
||||
)),
|
||||
TraitProperty::Custom(trt) if trait_item.is_scaler() => failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Scaler trait deserialized as custom trait, ensure fields are correct: {}", trt),
|
||||
)),
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Check for "dangling" links, which contain targets that are not specified elsewhere in the
|
||||
/// WADM manifest.
|
||||
///
|
||||
/// A problem of this type only constitutes a warning, because it is possible that the manifest
|
||||
/// does not *completely* specify targets (they may be deployed/managed external to WADM or in a separte
|
||||
/// manifest).
|
||||
fn check_dangling_links(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let lookup = manifest.component_lookup();
|
||||
let mut failures = Vec::new();
|
||||
for link_trait in manifest.links() {
|
||||
match &link_trait.properties {
|
||||
TraitProperty::Custom(obj) => {
|
||||
if obj.get("target").is_none() {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
"custom link is missing 'target' property".into(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ensure target property is present
|
||||
match obj["target"]["name"].as_str() {
|
||||
// If target is present, ensure it's pointing to a known component
|
||||
Some(target) if !lookup.contains_key(&String::from(target)) => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Warning,
|
||||
format!("custom link target [{target}] is not a listed component"),
|
||||
))
|
||||
}
|
||||
// For all keys where the the component is in the lookup we can do nothing
|
||||
Some(_) => {}
|
||||
// if target property is not present, note that it is missing
|
||||
None => failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
"custom link is missing 'target' name property".into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
TraitProperty::Link(LinkProperty { name, target, .. }) => {
|
||||
let link_identifier = name
|
||||
.as_ref()
|
||||
.map(|n| format!("(name [{n}])"))
|
||||
.unwrap_or_else(|| format!("(target [{}])", target.name));
|
||||
if !lookup.contains_key(&target.name) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Warning,
|
||||
format!(
|
||||
"link {link_identifier} target [{}] is not a listed component",
|
||||
target.name
|
||||
),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
_ => unreachable!("manifest.links() should only return links"),
|
||||
}
|
||||
}
|
||||
|
||||
failures
|
||||
}
|
||||
|
||||
/// Ensure that a manifest has secrets that are mapped to known policies
|
||||
/// and that those policies have the expected type and properties.
|
||||
fn validate_policies(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let policies = manifest.policy_lookup();
|
||||
let mut failures = Vec::new();
|
||||
for c in manifest.components() {
|
||||
// Ensure policies meant for secrets are valid
|
||||
for secret in c.secrets() {
|
||||
match policies.get(&secret.properties.policy) {
|
||||
Some(policy) if policy.policy_type != SECRET_POLICY_TYPE => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"secret '{}' is mapped to policy '{}' which is not a secret policy. Expected type '{SECRET_POLICY_TYPE}'",
|
||||
secret.name, secret.properties.policy
|
||||
),
|
||||
))
|
||||
}
|
||||
Some(policy) => {
|
||||
if !policy.properties.contains_key("backend") {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"secret '{}' is mapped to policy '{}' which does not include a 'backend' property",
|
||||
secret.name, secret.properties.policy
|
||||
),
|
||||
))
|
||||
}
|
||||
}
|
||||
None => failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"secret '{}' is mapped to unknown policy '{}'",
|
||||
secret.name, secret.properties.policy
|
||||
),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Ensure that all components in a manifest either specify an image reference or a shared
|
||||
/// component in a different manifest. Note that this does not validate that the image reference
|
||||
/// is valid or that the shared component is valid, only that one of the two properties is set.
|
||||
pub fn validate_component_properties(application: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for component in application.spec.components.iter() {
|
||||
match &component.properties {
|
||||
Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
image,
|
||||
application,
|
||||
config,
|
||||
secrets,
|
||||
..
|
||||
},
|
||||
}
|
||||
| Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
image,
|
||||
application,
|
||||
config,
|
||||
secrets,
|
||||
..
|
||||
},
|
||||
} => match (image, application) {
|
||||
(Some(_), Some(_)) => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
"Component cannot have both 'image' and 'application' properties".into(),
|
||||
));
|
||||
}
|
||||
(None, None) => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
"Component must have either 'image' or 'application' property".into(),
|
||||
));
|
||||
}
|
||||
// This is a problem because of our left-folding config implementation. A shared application
|
||||
// could specify additional config and actually overwrite the original manifest's config.
|
||||
(None, Some(shared_properties)) if !config.is_empty() => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Shared component '{}' cannot specify additional 'config'",
|
||||
shared_properties.name
|
||||
),
|
||||
));
|
||||
}
|
||||
(None, Some(shared_properties)) if !secrets.is_empty() => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Shared component '{}' cannot specify additional 'secrets'",
|
||||
shared_properties.name
|
||||
),
|
||||
));
|
||||
}
|
||||
// Shared application components already have scale properties defined in their original manifest
|
||||
(None, Some(shared_properties))
|
||||
if component
|
||||
.traits
|
||||
.as_ref()
|
||||
.is_some_and(|traits| traits.iter().any(|trt| trt.is_scaler())) =>
|
||||
{
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Shared component '{}' cannot include a scaler trait",
|
||||
shared_properties.name
|
||||
),
|
||||
));
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Validates link configs in a WADM application manifest.
|
||||
///
|
||||
/// At present this can check for:
|
||||
/// - all configs that declare `properties` have unique names
|
||||
/// (configs without properties refer to existing configs)
|
||||
///
|
||||
pub fn validate_link_configs(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
let mut link_config_names = HashSet::new();
|
||||
for link_trait in manifest.links() {
|
||||
if let TraitProperty::Link(LinkProperty { target, source, .. }) = &link_trait.properties {
|
||||
for config in &target.config {
|
||||
// we only need to check for uniqueness of configs with properties
|
||||
if config.properties.is_none() {
|
||||
continue;
|
||||
}
|
||||
// Check if config name is unique
|
||||
if !link_config_names.insert(config.name.clone()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Duplicate link config name found: '{}'", config.name),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(source) = source {
|
||||
for config in &source.config {
|
||||
// we only need to check for uniqueness of configs with properties
|
||||
if config.properties.is_none() {
|
||||
continue;
|
||||
}
|
||||
// Check if config name is unique
|
||||
if !link_config_names.insert(config.name.clone()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Duplicate link config name found: '{}'", config.name),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Funtion to validate the component configs
|
||||
/// from 0.13.0 source_config is deprecated and replaced with source:config:
|
||||
/// this function validates the raw yaml to check for deprecated source_config and target_config
|
||||
pub fn validate_components_configs(application: &serde_yaml::Value) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
|
||||
if let Some(specs) = application.get("spec") {
|
||||
if let Some(components) = specs.get("components") {
|
||||
if let Some(components_sequence) = components.as_sequence() {
|
||||
for component in components_sequence.iter() {
|
||||
failures.extend(get_deprecated_configs(component));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
fn get_deprecated_configs(component: &serde_yaml::Value) -> Vec<ValidationFailure> {
|
||||
let mut failures = vec![];
|
||||
if let Some(traits) = component.get("traits") {
|
||||
if let Some(traits_sequence) = traits.as_sequence() {
|
||||
for trait_ in traits_sequence.iter() {
|
||||
if let Some(trait_type) = trait_.get("type") {
|
||||
if trait_type.ne("link") {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(trait_properties) = trait_.get("properties") {
|
||||
if trait_properties.get("source_config").is_some() {
|
||||
failures.push(ValidationFailure {
|
||||
level: ValidationFailureLevel::Warning,
|
||||
msg: "one of the components' link trait contains a source_config key, please use source:config: rather".to_string(),
|
||||
});
|
||||
}
|
||||
if trait_properties.get("target_config").is_some() {
|
||||
failures.push(ValidationFailure {
|
||||
level: ValidationFailureLevel::Warning,
|
||||
msg: "one of the components' link trait contains a target_config key, please use target:config: rather".to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// This function validates that a key/value pair is a valid OAM label. It's using fairly
|
||||
/// basic validation rules to ensure that the manifest isn't doing anything horribly wrong. Keeping
|
||||
/// this function free of regex is intentional to keep this code functional but simple.
|
||||
///
|
||||
/// See <https://github.com/oam-dev/spec/blob/master/metadata.md#metadata> for details
|
||||
pub fn valid_oam_label(label: (&String, &String)) -> bool {
|
||||
let (key, _) = label;
|
||||
match key.split_once('/') {
|
||||
Some((prefix, name)) => is_valid_dns_subdomain(prefix) && is_valid_label_name(name),
|
||||
None => is_valid_label_name(key),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid_dns_subdomain(s: &str) -> bool {
|
||||
if s.is_empty() || s.len() > 253 {
|
||||
return false;
|
||||
}
|
||||
|
||||
s.split('.').all(|part| {
|
||||
// Ensure each part is non-empty, <= 63 characters, starts with an alphabetic character,
|
||||
// ends with an alphanumeric character, and contains only alphanumeric characters or hyphens
|
||||
!part.is_empty()
|
||||
&& part.len() <= 63
|
||||
&& part.starts_with(|c: char| c.is_ascii_alphabetic())
|
||||
&& part.ends_with(|c: char| c.is_ascii_alphanumeric())
|
||||
&& part.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure each name is non-empty, <= 63 characters, starts with an alphanumeric character,
|
||||
// ends with an alphanumeric character, and contains only alphanumeric characters, hyphens,
|
||||
// underscores, or periods
|
||||
pub fn is_valid_label_name(name: &str) -> bool {
|
||||
if name.is_empty() || name.len() > 63 {
|
||||
return false;
|
||||
}
|
||||
|
||||
name.starts_with(|c: char| c.is_ascii_alphanumeric())
|
||||
&& name.ends_with(|c: char| c.is_ascii_alphanumeric())
|
||||
&& name
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.')
|
||||
}
|
||||
|
||||
/// Checks whether a manifest contains "duplicate" links.
|
||||
///
|
||||
/// Multiple links from the same source with the same name, namespace, package and interface
|
||||
/// are considered duplicate links.
|
||||
fn check_duplicate_links(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for component in manifest.components() {
|
||||
let mut link_ids = HashSet::new();
|
||||
for link in component.links() {
|
||||
if let TraitProperty::Link(LinkProperty {
|
||||
name,
|
||||
namespace,
|
||||
package,
|
||||
interfaces,
|
||||
..
|
||||
}) = &link.properties
|
||||
{
|
||||
for interface in interfaces {
|
||||
if !link_ids.insert((
|
||||
name.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
|
||||
namespace,
|
||||
package,
|
||||
interface,
|
||||
)) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Duplicate link found inside component '{}': {} ({}:{}/{})",
|
||||
component.name,
|
||||
name.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
|
||||
namespace,
|
||||
package,
|
||||
interface
|
||||
),
|
||||
));
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::is_valid_manifest_name;
|
||||
|
||||
const VALID_MANIFEST_NAMES: [&str; 4] = [
|
||||
"mymanifest",
|
||||
"my-manifest",
|
||||
"my_manifest",
|
||||
"mymanifest-v2-v3-final",
|
||||
];
|
||||
|
||||
const INVALID_MANIFEST_NAMES: [&str; 2] = ["my.manifest", "my manifest"];
|
||||
|
||||
/// Ensure valid manifest names pass
|
||||
#[test]
|
||||
fn manifest_names_valid() {
|
||||
// Acceptable manifest names
|
||||
for valid in VALID_MANIFEST_NAMES {
|
||||
assert!(is_valid_manifest_name(valid));
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure invalid manifest names fail
|
||||
#[test]
|
||||
fn manifest_names_invalid() {
|
||||
for invalid in INVALID_MANIFEST_NAMES {
|
||||
assert!(!is_valid_manifest_name(invalid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
[wadm]
|
||||
path = "../../../wit/wadm"
|
||||
sha256 = "9795ab1a83023da07da2dc28d930004bd913b9dbf07d68d9ef9207a44348a169"
|
||||
sha512 = "9a94f33fd861912c81efd441cd19cc8066dbb2df5c2236d0472b66294bddc20ec5ad569484be18334d8c104ae9647b2c81c9878210ac35694ad8ba4a5b3780be"
|
||||
|
|
@ -0,0 +1 @@
|
|||
wadm = "../../../wit/wadm"
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
package wasmcloud:wadm@0.2.0;
|
||||
|
||||
/// A Wadm client which interacts with the wadm api
|
||||
interface client {
|
||||
use types.{
|
||||
version-info,
|
||||
status,
|
||||
model-summary,
|
||||
oam-manifest
|
||||
};
|
||||
|
||||
// Deploys a model to the WADM system.
|
||||
// If no lattice is provided, the default lattice name 'default' is used.
|
||||
deploy-model: func(model-name: string, version: option<string>, lattice: option<string>) -> result<string, string>;
|
||||
|
||||
// Undeploys a model from the WADM system.
|
||||
undeploy-model: func(model-name: string, lattice: option<string>, non-destructive: bool) -> result<_, string>;
|
||||
|
||||
// Stores the application manifest for later deploys.
|
||||
// Model is the full YAML or JSON string in this case
|
||||
// Returns the model name and version respectively.
|
||||
put-model: func(model: string, lattice: option<string>) -> result<tuple<string, string>, string>;
|
||||
|
||||
/// Store an oam manifest directly for later deploys.
|
||||
put-manifest: func(manifest: oam-manifest, lattice: option<string>) -> result<tuple<string, string>, string>;
|
||||
|
||||
// Retrieves the history of a given model name.
|
||||
get-model-history: func(model-name: string, lattice: option<string>) -> result<list<version-info>, string>;
|
||||
|
||||
// Retrieves the status of a given model by name.
|
||||
get-model-status: func(model-name: string, lattice: option<string>) -> result<status, string>;
|
||||
|
||||
// Retrieves details on a given model.
|
||||
get-model-details: func(model-name: string, version: option<string>, lattice: option<string>) -> result<oam-manifest, string>;
|
||||
|
||||
// Deletes a model version from the WADM system.
|
||||
delete-model-version: func(model-name: string, version: option<string>, lattice: option<string>) -> result<bool, string>;
|
||||
|
||||
// Retrieves all application manifests.
|
||||
get-models: func(lattice: option<string>) -> result<list<model-summary>, string>;
|
||||
}
|
||||
|
||||
interface handler {
|
||||
use types.{status-update};
|
||||
|
||||
// Callback handled to invoke a function when an update is received from a app status subscription
|
||||
handle-status-update: func(msg: status-update) -> result<_, string>;
|
||||
}
|
||||
|
|
@ -0,0 +1,218 @@
|
|||
package wasmcloud:wadm@0.2.0;
|
||||
|
||||
interface types {
|
||||
record model-summary {
|
||||
name: string,
|
||||
version: string,
|
||||
description: option<string>,
|
||||
deployed-version: option<string>,
|
||||
status: status-type,
|
||||
status-message: option<string>
|
||||
}
|
||||
|
||||
record version-info {
|
||||
version: string,
|
||||
deployed: bool
|
||||
}
|
||||
|
||||
record status-update {
|
||||
app: string,
|
||||
status: status
|
||||
}
|
||||
|
||||
record status {
|
||||
version: string,
|
||||
info: status-info,
|
||||
components: list<component-status>
|
||||
}
|
||||
|
||||
record component-status {
|
||||
name: string,
|
||||
component-type: string,
|
||||
info: status-info,
|
||||
traits: list<trait-status>
|
||||
}
|
||||
|
||||
record trait-status {
|
||||
trait-type: string,
|
||||
info: status-info
|
||||
}
|
||||
|
||||
record status-info {
|
||||
status-type: status-type,
|
||||
message: string
|
||||
}
|
||||
|
||||
enum put-result {
|
||||
error,
|
||||
created,
|
||||
new-version
|
||||
}
|
||||
|
||||
enum get-result {
|
||||
error,
|
||||
success,
|
||||
not-found
|
||||
}
|
||||
|
||||
enum status-result {
|
||||
error,
|
||||
ok,
|
||||
not-found
|
||||
}
|
||||
|
||||
enum delete-result {
|
||||
deleted,
|
||||
error,
|
||||
noop
|
||||
}
|
||||
|
||||
enum status-type {
|
||||
undeployed,
|
||||
reconciling,
|
||||
deployed,
|
||||
failed,
|
||||
waiting,
|
||||
unhealthy
|
||||
}
|
||||
|
||||
enum deploy-result {
|
||||
error,
|
||||
acknowledged,
|
||||
not-found
|
||||
}
|
||||
|
||||
// The overall structure of an OAM manifest.
|
||||
record oam-manifest {
|
||||
api-version: string,
|
||||
kind: string,
|
||||
metadata: metadata,
|
||||
spec: specification,
|
||||
}
|
||||
|
||||
// Metadata describing the manifest
|
||||
record metadata {
|
||||
name: string,
|
||||
annotations: list<tuple<string, string>>,
|
||||
labels: list<tuple<string, string>>,
|
||||
}
|
||||
|
||||
// The specification for this manifest
|
||||
record specification {
|
||||
components: list<component>,
|
||||
policies: list<policy>
|
||||
}
|
||||
|
||||
// A component definition
|
||||
record component {
|
||||
name: string,
|
||||
properties: properties,
|
||||
traits: option<list<trait>>,
|
||||
}
|
||||
|
||||
// Properties that can be defined for a component
|
||||
variant properties {
|
||||
component(component-properties),
|
||||
capability(capability-properties),
|
||||
}
|
||||
|
||||
// Properties for a component
|
||||
record component-properties {
|
||||
image: option<string>,
|
||||
application: option<shared-application-component-properties>,
|
||||
id: option<string>,
|
||||
config: list<config-property>,
|
||||
secrets: list<secret-property>,
|
||||
}
|
||||
|
||||
// Properties for a capability
|
||||
record capability-properties {
|
||||
image: option<string>,
|
||||
application: option<shared-application-component-properties>,
|
||||
id: option<string>,
|
||||
config: list<config-property>,
|
||||
secrets: list<secret-property>,
|
||||
}
|
||||
|
||||
// A policy definition
|
||||
record policy {
|
||||
name: string,
|
||||
properties: list<tuple<string, string>>,
|
||||
%type: string,
|
||||
}
|
||||
|
||||
// A trait definition
|
||||
record trait {
|
||||
trait-type: string,
|
||||
properties: trait-property,
|
||||
}
|
||||
|
||||
// Properties for defining traits
|
||||
variant trait-property {
|
||||
link(link-property),
|
||||
spreadscaler(spreadscaler-property),
|
||||
custom(string),
|
||||
}
|
||||
|
||||
// Properties for links
|
||||
record link-property {
|
||||
namespace: string,
|
||||
%package: string,
|
||||
interfaces: list<string>,
|
||||
source: option<config-definition>,
|
||||
target: target-config,
|
||||
name: option<string>,
|
||||
}
|
||||
|
||||
// Configuration definition
|
||||
record config-definition {
|
||||
config: list<config-property>,
|
||||
secrets: list<secret-property>,
|
||||
}
|
||||
|
||||
// Configuration properties
|
||||
record config-property {
|
||||
name: string,
|
||||
properties: option<list<tuple<string, string>>>,
|
||||
}
|
||||
|
||||
// Secret properties
|
||||
record secret-property {
|
||||
name: string,
|
||||
properties: secret-source-property,
|
||||
}
|
||||
|
||||
// Secret source properties
|
||||
record secret-source-property {
|
||||
policy: string,
|
||||
key: string,
|
||||
field: option<string>,
|
||||
version: option<string>,
|
||||
}
|
||||
|
||||
// Shared application component properties
|
||||
record shared-application-component-properties {
|
||||
name: string,
|
||||
component: string
|
||||
}
|
||||
|
||||
// Target configuration
|
||||
record target-config {
|
||||
name: string,
|
||||
config: list<config-property>,
|
||||
secrets: list<secret-property>,
|
||||
}
|
||||
|
||||
// Properties for spread scalers
|
||||
record spreadscaler-property {
|
||||
instances: u32,
|
||||
spread: list<spread>,
|
||||
}
|
||||
|
||||
// Configuration for various spreading requirements
|
||||
record spread {
|
||||
name: string,
|
||||
requirements: list<tuple<string, string>>,
|
||||
weight: option<u32>,
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
package wasmcloud:wadm-types@0.2.0;
|
||||
|
||||
world interfaces {
|
||||
import wasmcloud:wadm/types@0.2.0;
|
||||
import wasmcloud:wadm/client@0.2.0;
|
||||
import wasmcloud:wadm/handler@0.2.0;
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
[package]
|
||||
name = "wadm"
|
||||
description = "wasmCloud Application Deployment Manager: A tool for running Wasm applications in wasmCloud"
|
||||
version.workspace = true
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
license = "Apache-2.0"
|
||||
readme = "../../README.md"
|
||||
repository = "https://github.com/wasmcloud/wadm"
|
||||
|
||||
[features]
|
||||
# Enables clap attributes on the wadm configuration struct
|
||||
cli = ["clap"]
|
||||
http_admin = ["http", "http-body-util", "hyper", "hyper-util"]
|
||||
default = []
|
||||
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = ["cloudevents-sdk"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, optional = true, features = ["derive", "cargo", "env"]}
|
||||
cloudevents-sdk = { workspace = true }
|
||||
http = { workspace = true, features = ["std"], optional = true }
|
||||
http-body-util = { workspace = true, optional = true }
|
||||
hyper = { workspace = true, optional = true }
|
||||
hyper-util = { workspace = true, features = ["server"], optional = true }
|
||||
futures = { workspace = true }
|
||||
indexmap = { workspace = true, features = ["serde"] }
|
||||
nkeys = { workspace = true }
|
||||
semver = { workspace = true, features = ["serde"] }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-futures = { workspace = true }
|
||||
ulid = { workspace = true, features = ["serde"] }
|
||||
uuid = { workspace = true }
|
||||
wadm-types = { workspace = true }
|
||||
wasmcloud-control-interface = { workspace = true }
|
||||
wasmcloud-secrets-types = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "3"
|
||||
|
|
@ -0,0 +1,293 @@
|
|||
//! Type implementations for commands issued to compensate for state changes
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
error::Error,
|
||||
hash::{Hash, Hasher},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use wasmcloud_control_interface::Link;
|
||||
|
||||
use crate::{
|
||||
events::{ComponentScaleFailed, ComponentScaled, Event, ProviderStartFailed, ProviderStarted},
|
||||
workers::insert_managed_annotations,
|
||||
};
|
||||
|
||||
macro_rules! from_impl {
|
||||
($t:ident) => {
|
||||
impl From<$t> for Command {
|
||||
fn from(value: $t) -> Command {
|
||||
Command::$t(value)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// All possible compensatory commands for a lattice
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Command {
|
||||
ScaleComponent(ScaleComponent),
|
||||
StartProvider(StartProvider),
|
||||
StopProvider(StopProvider),
|
||||
PutLink(PutLink),
|
||||
DeleteLink(DeleteLink),
|
||||
PutConfig(PutConfig),
|
||||
DeleteConfig(DeleteConfig),
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Generates the corresponding event for a [Command](Command) in the form of a two-tuple ([Event](Event), Option<Event>)
|
||||
///
|
||||
/// # Arguments
|
||||
/// `model_name` - The model name that the command satisfies, needed to compute the proper annotations
|
||||
///
|
||||
/// # Return
|
||||
/// - The first element in the tuple corresponds to the "success" event a host would output after completing this command
|
||||
/// - The second element in the tuple corresponds to an optional "failure" event that a host could output if processing fails
|
||||
pub fn corresponding_event(&self) -> Option<(Event, Option<Event>)> {
|
||||
match self {
|
||||
Command::StartProvider(StartProvider {
|
||||
annotations,
|
||||
reference,
|
||||
host_id,
|
||||
provider_id,
|
||||
model_name,
|
||||
..
|
||||
}) => {
|
||||
let mut annotations = annotations.to_owned();
|
||||
insert_managed_annotations(&mut annotations, model_name);
|
||||
Some((
|
||||
Event::ProviderStarted(ProviderStarted {
|
||||
provider_id: provider_id.to_owned(),
|
||||
annotations: annotations.to_owned(),
|
||||
claims: None,
|
||||
image_ref: reference.to_owned(),
|
||||
host_id: host_id.to_owned(),
|
||||
}),
|
||||
Some(Event::ProviderStartFailed(ProviderStartFailed {
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_ref: reference.to_owned(),
|
||||
host_id: host_id.to_owned(),
|
||||
// We don't know this field from the command
|
||||
error: String::with_capacity(0),
|
||||
})),
|
||||
))
|
||||
}
|
||||
Command::ScaleComponent(ScaleComponent {
|
||||
component_id,
|
||||
host_id,
|
||||
count,
|
||||
reference,
|
||||
annotations,
|
||||
model_name,
|
||||
..
|
||||
}) => {
|
||||
let mut annotations = annotations.to_owned();
|
||||
insert_managed_annotations(&mut annotations, model_name);
|
||||
Some((
|
||||
Event::ComponentScaled(ComponentScaled {
|
||||
component_id: component_id.to_owned(),
|
||||
host_id: host_id.to_owned(),
|
||||
max_instances: *count as usize,
|
||||
image_ref: reference.to_owned(),
|
||||
annotations: annotations.to_owned(),
|
||||
// We don't know this field from the command
|
||||
claims: None,
|
||||
}),
|
||||
Some(Event::ComponentScaleFailed(ComponentScaleFailed {
|
||||
component_id: component_id.to_owned(),
|
||||
host_id: host_id.to_owned(),
|
||||
max_instances: *count as usize,
|
||||
image_ref: reference.to_owned(),
|
||||
annotations: annotations.to_owned(),
|
||||
// We don't know these fields from the command
|
||||
error: String::with_capacity(0),
|
||||
claims: None,
|
||||
})),
|
||||
))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct for the ScaleComponent command
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default, Eq)]
|
||||
pub struct ScaleComponent {
|
||||
/// The ID of the component to scale. This should be computed by wadm as a combination
|
||||
/// of the manifest name and the component name.
|
||||
pub component_id: String,
|
||||
/// The host id on which to scale the components
|
||||
pub host_id: String,
|
||||
/// The number of components to scale to
|
||||
pub count: u32,
|
||||
/// The OCI or bindle reference to scale
|
||||
pub reference: String,
|
||||
/// The name of the model/manifest that generated this command
|
||||
pub model_name: String,
|
||||
/// Additional annotations to attach on this command
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
/// Named configuration to pass to the component.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<String>,
|
||||
}
|
||||
|
||||
from_impl!(ScaleComponent);
|
||||
|
||||
impl PartialEq for ScaleComponent {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.component_id == other.component_id
|
||||
&& self.host_id == other.host_id
|
||||
&& self.count == other.count
|
||||
&& self.model_name == other.model_name
|
||||
&& self.annotations == other.annotations
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct for the StartProvider command
|
||||
#[derive(Clone, Debug, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct StartProvider {
|
||||
/// The OCI or bindle reference to start
|
||||
pub reference: String,
|
||||
/// The ID of the provider to scale. This should be computed by wadm as a combination
|
||||
/// of the manifest name and the provider name.
|
||||
pub provider_id: String,
|
||||
/// The host id on which to start the provider
|
||||
pub host_id: String,
|
||||
/// The name of the model/manifest that generated this command
|
||||
pub model_name: String,
|
||||
/// Named configuration to pass to the provider.
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<String>,
|
||||
/// Additional annotations to attach on this command
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
from_impl!(StartProvider);
|
||||
|
||||
impl PartialEq for StartProvider {
|
||||
fn eq(&self, other: &StartProvider) -> bool {
|
||||
self.reference == other.reference
|
||||
&& self.host_id == other.host_id
|
||||
&& self.model_name == other.model_name
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for StartProvider {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.reference.hash(state);
|
||||
self.host_id.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct for the StopProvider command
|
||||
#[derive(Clone, Debug, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct StopProvider {
|
||||
/// The ID of the provider to stop
|
||||
pub provider_id: String,
|
||||
/// The host ID on which to stop the provider
|
||||
pub host_id: String,
|
||||
/// The name of the model/manifest that generated this command
|
||||
pub model_name: String,
|
||||
/// Additional annotations to attach on this command
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
from_impl!(StopProvider);
|
||||
|
||||
impl PartialEq for StopProvider {
|
||||
fn eq(&self, other: &StopProvider) -> bool {
|
||||
self.provider_id == other.provider_id
|
||||
&& self.host_id == other.host_id
|
||||
&& self.model_name == other.model_name
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for StopProvider {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.provider_id.hash(state);
|
||||
self.host_id.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct for the PutLinkdef command
|
||||
#[derive(Clone, Debug, Eq, Serialize, Deserialize, Default, PartialEq, Hash)]
|
||||
pub struct PutLink {
|
||||
/// Source identifier for the link
|
||||
pub source_id: String,
|
||||
/// Target for the link, which can be a unique identifier or (future) a routing group
|
||||
pub target: String,
|
||||
/// Name of the link. Not providing this is equivalent to specifying "default"
|
||||
pub name: String,
|
||||
/// WIT namespace of the link operation, e.g. `wasi` in `wasi:keyvalue/readwrite.get`
|
||||
pub wit_namespace: String,
|
||||
/// WIT package of the link operation, e.g. `keyvalue` in `wasi:keyvalue/readwrite.get`
|
||||
pub wit_package: String,
|
||||
/// WIT Interfaces to be used for the link, e.g. `readwrite`, `atomic`, etc.
|
||||
pub interfaces: Vec<String>,
|
||||
/// List of named configurations to provide to the source upon request
|
||||
#[serde(default)]
|
||||
pub source_config: Vec<String>,
|
||||
/// List of named configurations to provide to the target upon request
|
||||
#[serde(default)]
|
||||
pub target_config: Vec<String>,
|
||||
/// The name of the model/manifest that generated this command
|
||||
pub model_name: String,
|
||||
}
|
||||
|
||||
impl TryFrom<PutLink> for Link {
|
||||
type Error = Box<dyn Error + Send + Sync>;
|
||||
|
||||
fn try_from(value: PutLink) -> Result<Link, Self::Error> {
|
||||
Link::builder()
|
||||
.source_id(&value.source_id)
|
||||
.target(&value.target)
|
||||
.name(&value.name)
|
||||
.wit_namespace(&value.wit_namespace)
|
||||
.wit_package(&value.wit_package)
|
||||
.interfaces(value.interfaces)
|
||||
.source_config(value.source_config)
|
||||
.target_config(value.target_config)
|
||||
.build()
|
||||
}
|
||||
}
|
||||
|
||||
from_impl!(PutLink);
|
||||
|
||||
/// Struct for the DeleteLinkdef command
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize, Default)]
|
||||
pub struct DeleteLink {
|
||||
/// The ID of the component to unlink
|
||||
pub source_id: String,
|
||||
/// The WIT namespace of the component to unlink
|
||||
pub wit_namespace: String,
|
||||
/// The WIT package of the component to unlink
|
||||
pub wit_package: String,
|
||||
/// The link name to unlink
|
||||
pub link_name: String,
|
||||
/// The name of the model/manifest that generated this command
|
||||
pub model_name: String,
|
||||
}
|
||||
|
||||
from_impl!(DeleteLink);
|
||||
|
||||
/// Struct for the PutConfig command
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct PutConfig {
|
||||
/// The name of the configuration to put
|
||||
pub config_name: String,
|
||||
/// The configuration properties to put
|
||||
pub config: HashMap<String, String>,
|
||||
}
|
||||
|
||||
from_impl!(PutConfig);
|
||||
|
||||
/// Struct for the DeleteConfig command
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct DeleteConfig {
|
||||
/// The name of the configuration to delete
|
||||
pub config_name: String,
|
||||
}
|
||||
|
||||
from_impl!(DeleteConfig);
|
||||
|
|
@ -0,0 +1,306 @@
|
|||
#[cfg(feature = "http_admin")]
|
||||
use core::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[cfg(feature = "cli")]
|
||||
use clap::Parser;
|
||||
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
|
||||
|
||||
use crate::nats::StreamPersistence;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "cli", derive(Parser))]
|
||||
#[cfg_attr(feature = "cli", command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None))]
|
||||
pub struct WadmConfig {
|
||||
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
|
||||
/// to help with debugging when identifying which process is doing the work
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")
|
||||
)]
|
||||
pub host_id: Option<String>,
|
||||
|
||||
/// Whether or not to use structured log output (as JSON)
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
short = 'l',
|
||||
long = "structured-logging",
|
||||
default_value = "false",
|
||||
env = "WADM_STRUCTURED_LOGGING"
|
||||
)
|
||||
)]
|
||||
pub structured_logging: bool,
|
||||
|
||||
/// Whether or not to enable opentelemetry tracing
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
short = 't',
|
||||
long = "tracing",
|
||||
default_value = "false",
|
||||
env = "WADM_TRACING_ENABLED"
|
||||
)
|
||||
)]
|
||||
pub tracing_enabled: bool,
|
||||
|
||||
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
|
||||
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")
|
||||
)]
|
||||
pub tracing_endpoint: Option<String>,
|
||||
|
||||
/// The NATS JetStream domain to connect to
|
||||
#[cfg_attr(feature = "cli", arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN"))]
|
||||
pub domain: Option<String>,
|
||||
|
||||
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
|
||||
/// careful how you use this as it can affect performance
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")
|
||||
)]
|
||||
pub max_jobs: Option<usize>,
|
||||
|
||||
/// The URL of the nats server you want to connect to
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
short = 's',
|
||||
long = "nats-server",
|
||||
env = "WADM_NATS_SERVER",
|
||||
default_value = "127.0.0.1:4222"
|
||||
)
|
||||
)]
|
||||
pub nats_server: String,
|
||||
|
||||
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "nats-seed",
|
||||
env = "WADM_NATS_NKEY",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_jwt"
|
||||
)
|
||||
)]
|
||||
pub nats_seed: Option<String>,
|
||||
|
||||
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "nats-jwt",
|
||||
env = "WADM_NATS_JWT",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_seed"
|
||||
)
|
||||
)]
|
||||
pub nats_jwt: Option<String>,
|
||||
|
||||
/// (Optional) NATS credential file to use when authenticating
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "nats-creds-file",
|
||||
env = "WADM_NATS_CREDS_FILE",
|
||||
conflicts_with_all = ["nats_seed", "nats_jwt"],
|
||||
))]
|
||||
pub nats_creds: Option<PathBuf>,
|
||||
|
||||
/// (Optional) NATS TLS certificate file to use when authenticating
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")
|
||||
)]
|
||||
pub nats_tls_ca_file: Option<PathBuf>,
|
||||
|
||||
/// Name of the bucket used for storage of lattice state
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "state-bucket-name",
|
||||
env = "WADM_STATE_BUCKET_NAME",
|
||||
default_value = "wadm_state"
|
||||
)
|
||||
)]
|
||||
pub state_bucket: String,
|
||||
|
||||
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
|
||||
/// store. By default, this is 70s because it is 2x the host heartbeat interval plus a little padding
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "cleanup-interval",
|
||||
env = "WADM_CLEANUP_INTERVAL",
|
||||
default_value = "70"
|
||||
)
|
||||
)]
|
||||
pub cleanup_interval: u64,
|
||||
|
||||
/// The API topic prefix to use. This is an advanced setting that should only be used if you
|
||||
/// know what you are doing
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "api-prefix",
|
||||
env = "WADM_API_PREFIX",
|
||||
default_value = DEFAULT_WADM_TOPIC_PREFIX
|
||||
))]
|
||||
pub api_prefix: String,
|
||||
|
||||
/// This prefix to used for the internal streams. When running in a multitenant environment,
|
||||
/// clients share the same JS domain (since messages need to come from lattices).
|
||||
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
|
||||
/// This is an advanced setting that should only be used if you know what you are doing.
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")
|
||||
)]
|
||||
pub stream_prefix: Option<String>,
|
||||
|
||||
/// Name of the bucket used for storage of manifests
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "manifest-bucket-name",
|
||||
env = "WADM_MANIFEST_BUCKET_NAME",
|
||||
default_value = "wadm_manifests"
|
||||
)
|
||||
)]
|
||||
pub manifest_bucket: String,
|
||||
|
||||
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
|
||||
/// account traffic and not simple cases where all lattices use credentials from the same
|
||||
/// account. See the deployment guide for more information
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)
|
||||
)]
|
||||
pub multitenant: bool,
|
||||
|
||||
//
|
||||
// Max bytes configuration for streams. Primarily configurable to enable deployment on NATS infra
|
||||
// with limited resources.
|
||||
//
|
||||
/// Maximum bytes to keep for the state bucket
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "state-bucket-max-bytes",
|
||||
env = "WADM_STATE_BUCKET_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_state_bucket_bytes: i64,
|
||||
/// Maximum bytes to keep for the manifest bucket
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "manifest-bucket-max-bytes",
|
||||
env = "WADM_MANIFEST_BUCKET_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_manifest_bucket_bytes: i64,
|
||||
/// Nats streams storage type
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "stream-persistence",
|
||||
env = "WADM_STREAM_PERSISTENCE",
|
||||
default_value_t = StreamPersistence::File
|
||||
))]
|
||||
pub stream_persistence: StreamPersistence,
|
||||
/// Maximum bytes to keep for the command stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "command-stream-max-bytes",
|
||||
env = "WADM_COMMAND_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_command_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the event stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "event-stream-max-bytes",
|
||||
env = "WADM_EVENT_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_event_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the event consumer stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "event-consumer-stream-max-bytes",
|
||||
env = "WADM_EVENT_CONSUMER_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_event_consumer_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the status stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "status-stream-max-bytes",
|
||||
env = "WADM_STATUS_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_status_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the notify stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "notify-stream-max-bytes",
|
||||
env = "WADM_NOTIFY_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_notify_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the wasmbus event stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "wasmbus-event-stream-max-bytes",
|
||||
env = "WADM_WASMBUS_EVENT_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_wasmbus_event_stream_bytes: i64,
|
||||
|
||||
#[cfg(feature = "http_admin")]
|
||||
#[cfg_attr(feature = "cli", clap(long = "http-admin", env = "WADM_HTTP_ADMIN"))]
|
||||
/// HTTP administration endpoint address
|
||||
pub http_admin: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
impl Default for WadmConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
host_id: None,
|
||||
domain: None,
|
||||
max_jobs: None,
|
||||
nats_server: "127.0.0.1:4222".to_string(),
|
||||
nats_seed: None,
|
||||
nats_jwt: None,
|
||||
nats_creds: None,
|
||||
nats_tls_ca_file: None,
|
||||
state_bucket: "wadm_state".to_string(),
|
||||
cleanup_interval: 70,
|
||||
api_prefix: DEFAULT_WADM_TOPIC_PREFIX.to_string(),
|
||||
stream_prefix: None,
|
||||
manifest_bucket: "wadm_manifests".to_string(),
|
||||
multitenant: false,
|
||||
max_state_bucket_bytes: -1,
|
||||
max_manifest_bucket_bytes: -1,
|
||||
stream_persistence: StreamPersistence::File,
|
||||
max_command_stream_bytes: -1,
|
||||
max_event_stream_bytes: -1,
|
||||
max_event_consumer_stream_bytes: -1,
|
||||
max_status_stream_bytes: -1,
|
||||
max_notify_stream_bytes: -1,
|
||||
max_wasmbus_event_stream_bytes: -1,
|
||||
structured_logging: false,
|
||||
tracing_enabled: false,
|
||||
tracing_endpoint: None,
|
||||
#[cfg(feature = "http_admin")]
|
||||
http_admin: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,61 +6,46 @@ use wasmcloud_control_interface::{Client, ClientBuilder};
|
|||
// Copied from https://github.com/wasmCloud/control-interface-client/blob/main/src/broker.rs#L1, not public
|
||||
const DEFAULT_TOPIC_PREFIX: &str = "wasmbus.ctl";
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ControlClientConfig {
|
||||
/// The jetstream domain to use for the clients
|
||||
pub js_domain: Option<String>,
|
||||
/// The topic prefix to use for operations
|
||||
pub topic_prefix: Option<String>,
|
||||
}
|
||||
|
||||
/// A client constructor for wasmCloud control interface clients, identified by a lattice ID
|
||||
// NOTE: Yes, this sounds java-y. Deal with it.
|
||||
#[derive(Clone)]
|
||||
pub struct ControlClientConstructor {
|
||||
client: async_nats::Client,
|
||||
config: ControlClientConfig,
|
||||
/// The topic prefix to use for operations
|
||||
topic_prefix: Option<String>,
|
||||
}
|
||||
|
||||
impl ControlClientConstructor {
|
||||
/// Creates a new client pool that is all backed using the same NATS client. The given NATS
|
||||
/// client should be using credentials that can access all desired lattices.
|
||||
/// Creates a new client pool that is all backed using the same NATS client and an optional
|
||||
/// topic prefix. The given NATS client should be using credentials that can access all desired
|
||||
/// lattices.
|
||||
pub fn new(
|
||||
client: async_nats::Client,
|
||||
config: ControlClientConfig,
|
||||
topic_prefix: Option<String>,
|
||||
) -> ControlClientConstructor {
|
||||
ControlClientConstructor { client, config }
|
||||
ControlClientConstructor {
|
||||
client,
|
||||
topic_prefix,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the client for the given lattice ID
|
||||
pub async fn get_connection(
|
||||
&self,
|
||||
id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Client> {
|
||||
let builder = ClientBuilder::new(self.client.clone()).lattice_prefix(id);
|
||||
let builder = if let Some(domain) = self.config.js_domain.as_deref() {
|
||||
builder.js_domain(domain)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
pub fn get_connection(&self, id: &str, multitenant_prefix: Option<&str>) -> Client {
|
||||
let builder = ClientBuilder::new(self.client.clone()).lattice(id);
|
||||
|
||||
let builder = builder.topic_prefix(topic_prefix(
|
||||
multitenant_prefix,
|
||||
self.config.topic_prefix.as_deref(),
|
||||
self.topic_prefix.as_deref(),
|
||||
));
|
||||
|
||||
builder
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Error building client for {id}: {e:?}"))
|
||||
builder.build()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the topic prefix to use for the given multitenant prefix and topic prefix. The
|
||||
/// default prefix is `wasmbus.ctl`.
|
||||
///
|
||||
/// If running in multitenant mode, we listen to events on *.wasmbus.evt and need to send commands
|
||||
/// If running in multitenant mode, we listen to events on *.wasmbus.evt.*.> and need to send commands
|
||||
/// back to the '*' account. This match takes into account custom prefixes as well to support
|
||||
/// advanced use cases.
|
||||
///
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
//! A module for creating and consuming a stream of commands from NATS
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
|
|
@ -13,7 +14,7 @@ use async_nats::{
|
|||
use futures::{Stream, TryStreamExt};
|
||||
use tracing::{error, warn};
|
||||
|
||||
use super::{CreateConsumer, ScopedMessage};
|
||||
use super::{CreateConsumer, ScopedMessage, LATTICE_METADATA_KEY, MULTITENANT_METADATA_KEY};
|
||||
use crate::commands::*;
|
||||
|
||||
/// The name of the durable NATS stream and consumer that contains incoming lattice events
|
||||
|
|
@ -42,10 +43,19 @@ impl CommandConsumer {
|
|||
return Err(format!("Topic {topic} does not match for lattice ID {lattice_id}").into());
|
||||
}
|
||||
|
||||
let consumer_name = if let Some(prefix) = multitenant_prefix {
|
||||
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}_{prefix}")
|
||||
let (consumer_name, metadata) = if let Some(prefix) = multitenant_prefix {
|
||||
(
|
||||
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}_{prefix}"),
|
||||
HashMap::from([
|
||||
(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string()),
|
||||
(MULTITENANT_METADATA_KEY.to_string(), prefix.to_string()),
|
||||
]),
|
||||
)
|
||||
} else {
|
||||
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}")
|
||||
(
|
||||
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}"),
|
||||
HashMap::from([(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string())]),
|
||||
)
|
||||
};
|
||||
let consumer = stream
|
||||
.get_or_create_consumer(
|
||||
|
|
@ -61,6 +71,7 @@ impl CommandConsumer {
|
|||
max_deliver: 3,
|
||||
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
|
||||
filter_subject: topic.to_owned(),
|
||||
metadata,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
//! A module for creating and consuming a stream of events from a wasmcloud lattice
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
|
@ -14,11 +15,11 @@ use async_nats::{
|
|||
use futures::{Stream, TryStreamExt};
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
use super::{CreateConsumer, ScopedMessage};
|
||||
use super::{CreateConsumer, ScopedMessage, LATTICE_METADATA_KEY, MULTITENANT_METADATA_KEY};
|
||||
use crate::events::*;
|
||||
|
||||
/// The name of the durable NATS stream and consumer that contains incoming lattice events
|
||||
pub const EVENTS_CONSUMER_PREFIX: &str = "wadm_events";
|
||||
pub const EVENTS_CONSUMER_PREFIX: &str = "wadm_event_consumer";
|
||||
|
||||
/// A stream of all events of a lattice, consumed from a durable NATS stream and consumer
|
||||
pub struct EventConsumer {
|
||||
|
|
@ -42,10 +43,19 @@ impl EventConsumer {
|
|||
if !topic.contains(lattice_id) {
|
||||
return Err(format!("Topic {topic} does not match for lattice ID {lattice_id}").into());
|
||||
}
|
||||
let consumer_name = if let Some(prefix) = multitenant_prefix {
|
||||
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}_{prefix}")
|
||||
let (consumer_name, metadata) = if let Some(prefix) = multitenant_prefix {
|
||||
(
|
||||
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}_{prefix}"),
|
||||
HashMap::from([
|
||||
(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string()),
|
||||
(MULTITENANT_METADATA_KEY.to_string(), prefix.to_string()),
|
||||
]),
|
||||
)
|
||||
} else {
|
||||
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}")
|
||||
(
|
||||
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}"),
|
||||
HashMap::from([(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string())]),
|
||||
)
|
||||
};
|
||||
let consumer = stream
|
||||
.get_or_create_consumer(
|
||||
|
|
@ -61,6 +71,7 @@ impl EventConsumer {
|
|||
max_deliver: 3,
|
||||
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
|
||||
filter_subject: topic.to_owned(),
|
||||
metadata,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
|
|
@ -9,6 +9,8 @@ use tokio::{
|
|||
};
|
||||
use tracing::{error, instrument, trace, warn, Instrument};
|
||||
|
||||
use crate::consumers::{LATTICE_METADATA_KEY, MULTITENANT_METADATA_KEY};
|
||||
|
||||
use super::{CreateConsumer, ScopedMessage};
|
||||
|
||||
/// A convenience type for returning work results
|
||||
|
|
@ -141,15 +143,24 @@ impl<C> ConsumerManager<C> {
|
|||
}
|
||||
};
|
||||
|
||||
// TODO: This is somewhat brittle as we could change naming schemes, but it is
|
||||
// good enough for now. We are just taking the name (which should be of the
|
||||
// format `<consumer_prefix>-<lattice_prefix>_<multitenant_prefix>`), but this makes sure
|
||||
// we are always getting the last thing in case of other underscores
|
||||
//
|
||||
// When NATS 2.10 is out, store this as metadata on the stream.
|
||||
let (lattice_id, multitenant_prefix) = match extract_lattice_and_multitenant(&info.name) {
|
||||
(Some(id), prefix) => (id, prefix),
|
||||
(None, _) => return None,
|
||||
// Now that wadm is using NATS 2.10, the lattice and multitenant prefix are stored in the consumer metadata
|
||||
// as a fallback for older versions, we can still extract it from the consumer name in the
|
||||
// form `<consumer_prefix>-<lattice_prefix>_<multitenant_prefix>`
|
||||
let (lattice_id, multitenant_prefix) = match (info.config.metadata.get(LATTICE_METADATA_KEY), info.config.metadata.get(MULTITENANT_METADATA_KEY)) {
|
||||
(Some(lattice), Some(multitenant_prefix)) => {
|
||||
trace!(%lattice, %multitenant_prefix, "Found lattice and multitenant prefix in consumer metadata");
|
||||
(lattice.to_owned(), Some(multitenant_prefix.to_owned()))
|
||||
}
|
||||
(Some(lattice), None) => {
|
||||
trace!(%lattice, "Found lattice in consumer metadata");
|
||||
(lattice.to_owned(), None)
|
||||
}
|
||||
_ => {
|
||||
match extract_lattice_and_multitenant(&info.name) {
|
||||
(Some(id), prefix) => (id, prefix),
|
||||
(None, _) => return None,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Don't create multitenant consumers if running in single tenant mode, and vice versa
|
||||
|
|
@ -16,6 +16,9 @@ pub mod manager;
|
|||
/// The default time given for a command to ack. This is longer than events due to the possible need for more processing time
|
||||
pub const DEFAULT_ACK_TIME: Duration = Duration::from_secs(2);
|
||||
|
||||
pub const LATTICE_METADATA_KEY: &str = "lattice";
|
||||
pub const MULTITENANT_METADATA_KEY: &str = "multitenant_prefix";
|
||||
|
||||
pub use commands::*;
|
||||
pub use events::*;
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
use core::hash::{Hash, Hasher};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
|
@ -7,19 +7,17 @@ use serde::{Deserialize, Serialize};
|
|||
/// and Hash since it can serve as a key
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq)]
|
||||
pub struct ProviderInfo {
|
||||
pub contract_id: String,
|
||||
pub link_name: String,
|
||||
// TODO: Should we actually parse the nkey?
|
||||
pub public_key: String,
|
||||
#[serde(alias = "public_key")]
|
||||
pub provider_id: String,
|
||||
#[serde(default)]
|
||||
pub annotations: HashMap<String, String>,
|
||||
pub provider_ref: String,
|
||||
#[serde(default)]
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
impl PartialEq for ProviderInfo {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.public_key == other.public_key
|
||||
&& self.contract_id == other.contract_id
|
||||
&& self.link_name == other.link_name
|
||||
self.provider_id == other.provider_id
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -27,9 +25,7 @@ impl PartialEq for ProviderInfo {
|
|||
// inventory where these three pieces need to be unique regardless of annotations
|
||||
impl Hash for ProviderInfo {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.public_key.hash(state);
|
||||
self.contract_id.hash(state);
|
||||
self.link_name.hash(state);
|
||||
self.provider_id.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -50,37 +46,25 @@ pub struct ProviderClaims {
|
|||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq, Eq)]
|
||||
pub struct ProviderHealthCheckInfo {
|
||||
pub link_name: String,
|
||||
// TODO: Should we make this a parsed nkey?
|
||||
pub public_key: String,
|
||||
pub contract_id: String,
|
||||
pub provider_id: String,
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq, Eq)]
|
||||
pub struct ActorClaims {
|
||||
pub struct ComponentClaims {
|
||||
pub call_alias: Option<String>,
|
||||
#[serde(rename = "caps")]
|
||||
pub capabilites: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub expires_human: String,
|
||||
// TODO: parse as nkey?
|
||||
#[serde(default)]
|
||||
pub issuer: String,
|
||||
#[serde(default)]
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub not_before_human: String,
|
||||
pub revision: usize,
|
||||
pub revision: Option<usize>,
|
||||
// NOTE: This doesn't need a custom deserialize because unlike provider claims, these come out
|
||||
// in an array
|
||||
pub tags: Option<Vec<String>>,
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Eq, PartialEq, Clone)]
|
||||
pub struct Linkdef {
|
||||
// TODO: parse as an nkey?
|
||||
pub actor_id: String,
|
||||
pub contract_id: String,
|
||||
pub id: String,
|
||||
pub link_name: String,
|
||||
// TODO: parse as an nkey?
|
||||
pub provider_id: String,
|
||||
pub values: HashMap<String, String>,
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
|
@ -2,13 +2,18 @@
|
|||
//! attribute of a cloudevent
|
||||
// TODO: These should probably be generated from a schema which we add into the actual cloud event
|
||||
|
||||
use std::{collections::HashMap, convert::TryFrom};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
convert::TryFrom,
|
||||
fmt::Display,
|
||||
};
|
||||
|
||||
use cloudevents::{AttributesReader, Data, Event as CloudEvent, EventBuilder, EventBuilderV10};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
use wasmcloud_control_interface::{ComponentDescription, Link, ProviderDescription};
|
||||
|
||||
use crate::model::Manifest;
|
||||
use wadm_types::Manifest;
|
||||
|
||||
use super::data::*;
|
||||
|
||||
|
|
@ -88,11 +93,8 @@ pub trait EventType {
|
|||
/// A lattice event
|
||||
#[derive(Debug, Clone, Deserialize, PartialEq, Eq)]
|
||||
pub enum Event {
|
||||
ActorStarted(ActorStarted),
|
||||
ActorsStarted(ActorsStarted),
|
||||
ActorsStartFailed(ActorsStartFailed),
|
||||
ActorStopped(ActorStopped),
|
||||
ActorsStopped(ActorsStopped),
|
||||
ComponentScaled(ComponentScaled),
|
||||
ComponentScaleFailed(ComponentScaleFailed),
|
||||
ProviderStarted(ProviderStarted),
|
||||
ProviderStopped(ProviderStopped),
|
||||
ProviderStartFailed(ProviderStartFailed),
|
||||
|
|
@ -104,24 +106,47 @@ pub enum Event {
|
|||
HostHeartbeat(HostHeartbeat),
|
||||
LinkdefSet(LinkdefSet),
|
||||
LinkdefDeleted(LinkdefDeleted),
|
||||
ConfigSet(ConfigSet),
|
||||
ConfigDeleted(ConfigDeleted),
|
||||
// NOTE(thomastaylor312): We may change where and how these get published, but it makes sense
|
||||
// for now to have them here even though they aren't technically lattice events
|
||||
ManifestPublished(ManifestPublished),
|
||||
ManifestUnpublished(ManifestUnpublished),
|
||||
}
|
||||
|
||||
impl Display for Event {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Event::ComponentScaled(_) => write!(f, "ComponentScaled"),
|
||||
Event::ComponentScaleFailed(_) => write!(f, "ComponentScaleFailed"),
|
||||
Event::ProviderStarted(_) => write!(f, "ProviderStarted"),
|
||||
Event::ProviderStopped(_) => write!(f, "ProviderStopped"),
|
||||
Event::ProviderStartFailed(_) => write!(f, "ProviderStartFailed"),
|
||||
Event::ProviderHealthCheckPassed(_) => write!(f, "ProviderHealthCheckPassed"),
|
||||
Event::ProviderHealthCheckFailed(_) => write!(f, "ProviderHealthCheckFailed"),
|
||||
Event::ProviderHealthCheckStatus(_) => write!(f, "ProviderHealthCheckStatus"),
|
||||
Event::HostStarted(_) => write!(f, "HostStarted"),
|
||||
Event::HostStopped(_) => write!(f, "HostStopped"),
|
||||
Event::HostHeartbeat(_) => write!(f, "HostHeartbeat"),
|
||||
Event::LinkdefSet(_) => write!(f, "LinkdefSet"),
|
||||
Event::LinkdefDeleted(_) => write!(f, "LinkdefDeleted"),
|
||||
Event::ConfigSet(_) => write!(f, "ConfigSet"),
|
||||
Event::ConfigDeleted(_) => write!(f, "ConfigDeleted"),
|
||||
Event::ManifestPublished(_) => write!(f, "ManifestPublished"),
|
||||
Event::ManifestUnpublished(_) => write!(f, "ManifestUnpublished"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<CloudEvent> for Event {
|
||||
type Error = ConversionError;
|
||||
|
||||
fn try_from(value: CloudEvent) -> Result<Self, Self::Error> {
|
||||
match value.ty() {
|
||||
ActorStarted::TYPE => ActorStarted::try_from(value).map(Event::ActorStarted),
|
||||
ActorsStarted::TYPE => ActorsStarted::try_from(value).map(Event::ActorsStarted),
|
||||
ActorsStartFailed::TYPE => {
|
||||
ActorsStartFailed::try_from(value).map(Event::ActorsStartFailed)
|
||||
ComponentScaled::TYPE => ComponentScaled::try_from(value).map(Event::ComponentScaled),
|
||||
ComponentScaleFailed::TYPE => {
|
||||
ComponentScaleFailed::try_from(value).map(Event::ComponentScaleFailed)
|
||||
}
|
||||
ActorStopped::TYPE => ActorStopped::try_from(value).map(Event::ActorStopped),
|
||||
ActorsStopped::TYPE => ActorsStopped::try_from(value).map(Event::ActorsStopped),
|
||||
ProviderStarted::TYPE => ProviderStarted::try_from(value).map(Event::ProviderStarted),
|
||||
ProviderStopped::TYPE => ProviderStopped::try_from(value).map(Event::ProviderStopped),
|
||||
ProviderStartFailed::TYPE => {
|
||||
|
|
@ -141,6 +166,8 @@ impl TryFrom<CloudEvent> for Event {
|
|||
HostHeartbeat::TYPE => HostHeartbeat::try_from(value).map(Event::HostHeartbeat),
|
||||
LinkdefSet::TYPE => LinkdefSet::try_from(value).map(Event::LinkdefSet),
|
||||
LinkdefDeleted::TYPE => LinkdefDeleted::try_from(value).map(Event::LinkdefDeleted),
|
||||
ConfigSet::TYPE => ConfigSet::try_from(value).map(Event::ConfigSet),
|
||||
ConfigDeleted::TYPE => ConfigDeleted::try_from(value).map(Event::ConfigDeleted),
|
||||
ManifestPublished::TYPE => {
|
||||
ManifestPublished::try_from(value).map(Event::ManifestPublished)
|
||||
}
|
||||
|
|
@ -157,11 +184,8 @@ impl TryFrom<Event> for CloudEvent {
|
|||
|
||||
fn try_from(value: Event) -> Result<Self, Self::Error> {
|
||||
let ty = match value {
|
||||
Event::ActorStarted(_) => ActorStarted::TYPE,
|
||||
Event::ActorsStarted(_) => ActorsStarted::TYPE,
|
||||
Event::ActorsStartFailed(_) => ActorsStartFailed::TYPE,
|
||||
Event::ActorStopped(_) => ActorStopped::TYPE,
|
||||
Event::ActorsStopped(_) => ActorsStopped::TYPE,
|
||||
Event::ComponentScaled(_) => ComponentScaled::TYPE,
|
||||
Event::ComponentScaleFailed(_) => ComponentScaleFailed::TYPE,
|
||||
Event::ProviderStarted(_) => ProviderStarted::TYPE,
|
||||
Event::ProviderStopped(_) => ProviderStopped::TYPE,
|
||||
Event::ProviderStartFailed(_) => ProviderStartFailed::TYPE,
|
||||
|
|
@ -173,6 +197,8 @@ impl TryFrom<Event> for CloudEvent {
|
|||
Event::HostHeartbeat(_) => HostHeartbeat::TYPE,
|
||||
Event::LinkdefSet(_) => LinkdefSet::TYPE,
|
||||
Event::LinkdefDeleted(_) => LinkdefDeleted::TYPE,
|
||||
Event::ConfigSet(_) => ConfigSet::TYPE,
|
||||
Event::ConfigDeleted(_) => ConfigDeleted::TYPE,
|
||||
Event::ManifestPublished(_) => ManifestPublished::TYPE,
|
||||
Event::ManifestUnpublished(_) => ManifestUnpublished::TYPE,
|
||||
};
|
||||
|
|
@ -195,11 +221,8 @@ impl Serialize for Event {
|
|||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Event::ActorStarted(evt) => evt.serialize(serializer),
|
||||
Event::ActorsStarted(evt) => evt.serialize(serializer),
|
||||
Event::ActorsStartFailed(evt) => evt.serialize(serializer),
|
||||
Event::ActorStopped(evt) => evt.serialize(serializer),
|
||||
Event::ActorsStopped(evt) => evt.serialize(serializer),
|
||||
Event::ComponentScaled(evt) => evt.serialize(serializer),
|
||||
Event::ComponentScaleFailed(evt) => evt.serialize(serializer),
|
||||
Event::ProviderStarted(evt) => evt.serialize(serializer),
|
||||
Event::ProviderStopped(evt) => evt.serialize(serializer),
|
||||
Event::ProviderStartFailed(evt) => evt.serialize(serializer),
|
||||
|
|
@ -211,6 +234,8 @@ impl Serialize for Event {
|
|||
Event::HostHeartbeat(evt) => evt.serialize(serializer),
|
||||
Event::LinkdefSet(evt) => evt.serialize(serializer),
|
||||
Event::LinkdefDeleted(evt) => evt.serialize(serializer),
|
||||
Event::ConfigSet(evt) => evt.serialize(serializer),
|
||||
Event::ConfigDeleted(evt) => evt.serialize(serializer),
|
||||
Event::ManifestPublished(evt) => evt.serialize(serializer),
|
||||
Event::ManifestUnpublished(evt) => evt.serialize(serializer),
|
||||
}
|
||||
|
|
@ -226,11 +251,8 @@ impl Event {
|
|||
/// Returns the underlying raw cloudevent type for the event
|
||||
pub fn raw_type(&self) -> &str {
|
||||
match self {
|
||||
Event::ActorStarted(_) => ActorStarted::TYPE,
|
||||
Event::ActorsStarted(_) => ActorsStarted::TYPE,
|
||||
Event::ActorsStartFailed(_) => ActorsStartFailed::TYPE,
|
||||
Event::ActorStopped(_) => ActorStopped::TYPE,
|
||||
Event::ActorsStopped(_) => ActorsStopped::TYPE,
|
||||
Event::ComponentScaled(_) => ComponentScaled::TYPE,
|
||||
Event::ComponentScaleFailed(_) => ComponentScaleFailed::TYPE,
|
||||
Event::ProviderStarted(_) => ProviderStarted::TYPE,
|
||||
Event::ProviderStopped(_) => ProviderStopped::TYPE,
|
||||
Event::ProviderStartFailed(_) => ProviderStopped::TYPE,
|
||||
|
|
@ -242,6 +264,8 @@ impl Event {
|
|||
Event::HostHeartbeat(_) => HostHeartbeat::TYPE,
|
||||
Event::LinkdefSet(_) => LinkdefSet::TYPE,
|
||||
Event::LinkdefDeleted(_) => LinkdefDeleted::TYPE,
|
||||
Event::ConfigSet(_) => ConfigSet::TYPE,
|
||||
Event::ConfigDeleted(_) => ConfigDeleted::TYPE,
|
||||
Event::ManifestPublished(_) => ManifestPublished::TYPE,
|
||||
Event::ManifestUnpublished(_) => ManifestUnpublished::TYPE,
|
||||
}
|
||||
|
|
@ -268,117 +292,55 @@ pub enum ConversionError {
|
|||
// EVENTS START HERE
|
||||
//
|
||||
|
||||
// Component Events
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ActorStarted {
|
||||
pub annotations: HashMap<String, String>,
|
||||
// Commented out for now because the host broken it and we actually don't use this right now
|
||||
// pub api_version: usize,
|
||||
pub claims: ActorClaims,
|
||||
pub struct ComponentScaled {
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
pub claims: Option<ComponentClaims>,
|
||||
pub image_ref: String,
|
||||
// TODO: Parse as UUID?
|
||||
pub instance_id: String,
|
||||
// TODO: Parse as nkey?
|
||||
pub public_key: String,
|
||||
pub max_instances: usize,
|
||||
pub component_id: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ActorStarted,
|
||||
"com.wasmcloud.lattice.actor_started",
|
||||
ComponentScaled,
|
||||
"com.wasmcloud.lattice.component_scaled",
|
||||
source,
|
||||
host_id
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ActorsStarted {
|
||||
pub annotations: HashMap<String, String>,
|
||||
// Commented out for now because the host broken it and we actually don't use this right now
|
||||
// pub api_version: usize,
|
||||
pub claims: ActorClaims,
|
||||
pub struct ComponentScaleFailed {
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
pub claims: Option<ComponentClaims>,
|
||||
pub image_ref: String,
|
||||
pub count: usize,
|
||||
// TODO: Parse as nkey?
|
||||
pub public_key: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ActorsStarted,
|
||||
"com.wasmcloud.lattice.actors_started",
|
||||
source,
|
||||
host_id
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ActorsStartFailed {
|
||||
pub annotations: HashMap<String, String>,
|
||||
pub image_ref: String,
|
||||
// TODO: Parse as nkey?
|
||||
pub public_key: String,
|
||||
pub max_instances: usize,
|
||||
pub component_id: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ActorsStartFailed,
|
||||
"com.wasmcloud.lattice.actors_start_failed",
|
||||
ComponentScaleFailed,
|
||||
"com.wasmcloud.lattice.component_scale_failed",
|
||||
source,
|
||||
host_id
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ActorStopped {
|
||||
#[serde(default)]
|
||||
pub annotations: HashMap<String, String>,
|
||||
pub instance_id: String,
|
||||
// TODO: Parse as nkey?
|
||||
pub public_key: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ActorStopped,
|
||||
"com.wasmcloud.lattice.actor_stopped",
|
||||
source,
|
||||
host_id
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ActorsStopped {
|
||||
#[serde(default)]
|
||||
pub annotations: HashMap<String, String>,
|
||||
// TODO: Parse as nkey?
|
||||
pub public_key: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
/// Number of actors stopped from this command
|
||||
pub count: usize,
|
||||
/// Remaining number of this actor running on the host
|
||||
pub remaining: usize,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ActorsStopped,
|
||||
"com.wasmcloud.lattice.actors_stopped",
|
||||
source,
|
||||
host_id
|
||||
);
|
||||
// Provider Events
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ProviderStarted {
|
||||
pub annotations: HashMap<String, String>,
|
||||
pub claims: ProviderClaims,
|
||||
pub contract_id: String,
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
#[serde(default)]
|
||||
/// Optional provider claims
|
||||
pub claims: Option<ProviderClaims>,
|
||||
pub image_ref: String,
|
||||
// TODO: parse as UUID?
|
||||
pub instance_id: String,
|
||||
pub link_name: String,
|
||||
// TODO: parse as nkey?
|
||||
pub public_key: String,
|
||||
pub provider_id: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
|
@ -393,7 +355,7 @@ event_impl!(
|
|||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ProviderStartFailed {
|
||||
pub error: String,
|
||||
pub link_name: String,
|
||||
pub provider_id: String,
|
||||
pub provider_ref: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
|
|
@ -408,18 +370,8 @@ event_impl!(
|
|||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ProviderStopped {
|
||||
#[serde(default)]
|
||||
// TODO(thomastaylor312): Yep, there was a spelling bug in the host is 0.62.1. Revert this once
|
||||
// 0.62.2 is out
|
||||
#[serde(rename = "annotaions")]
|
||||
pub annotations: HashMap<String, String>,
|
||||
pub contract_id: String,
|
||||
// TODO: parse as UUID?
|
||||
pub instance_id: String,
|
||||
pub link_name: String,
|
||||
// TODO: parse as nkey?
|
||||
pub public_key: String,
|
||||
// We should probably do an actual enum here, but elixir definitely isn't doing it
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
pub provider_id: String,
|
||||
pub reason: String,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
|
|
@ -436,68 +388,77 @@ event_impl!(
|
|||
pub struct ProviderHealthCheckPassed {
|
||||
#[serde(flatten)]
|
||||
pub data: ProviderHealthCheckInfo,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ProviderHealthCheckPassed,
|
||||
"com.wasmcloud.lattice.health_check_passed",
|
||||
source,
|
||||
host_id
|
||||
"com.wasmcloud.lattice.health_check_passed"
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ProviderHealthCheckFailed {
|
||||
#[serde(flatten)]
|
||||
pub data: ProviderHealthCheckInfo,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ProviderHealthCheckFailed,
|
||||
"com.wasmcloud.lattice.health_check_failed",
|
||||
source,
|
||||
host_id
|
||||
"com.wasmcloud.lattice.health_check_failed"
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ProviderHealthCheckStatus {
|
||||
#[serde(flatten)]
|
||||
pub data: ProviderHealthCheckInfo,
|
||||
#[serde(default)]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
ProviderHealthCheckStatus,
|
||||
"com.wasmcloud.lattice.health_check_status",
|
||||
source,
|
||||
host_id
|
||||
"com.wasmcloud.lattice.health_check_status"
|
||||
);
|
||||
|
||||
// Link Events
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct LinkdefSet {
|
||||
#[serde(flatten)]
|
||||
pub linkdef: Linkdef,
|
||||
pub linkdef: Link,
|
||||
}
|
||||
|
||||
event_impl!(LinkdefSet, "com.wasmcloud.lattice.linkdef_set");
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct LinkdefDeleted {
|
||||
#[serde(flatten)]
|
||||
pub linkdef: Linkdef,
|
||||
pub source_id: String,
|
||||
pub name: String,
|
||||
pub wit_namespace: String,
|
||||
pub wit_package: String,
|
||||
}
|
||||
|
||||
event_impl!(LinkdefDeleted, "com.wasmcloud.lattice.linkdef_deleted");
|
||||
|
||||
// Config Events
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ConfigSet {
|
||||
pub config_name: String,
|
||||
}
|
||||
|
||||
event_impl!(ConfigSet, "com.wasmcloud.lattice.config_set");
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ConfigDeleted {
|
||||
pub config_name: String,
|
||||
}
|
||||
|
||||
event_impl!(ConfigDeleted, "com.wasmcloud.lattice.config_deleted");
|
||||
|
||||
// Host Events
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct HostStarted {
|
||||
pub labels: HashMap<String, String>,
|
||||
pub friendly_name: String,
|
||||
// TODO: Parse as nkey?
|
||||
#[serde(default)]
|
||||
pub id: String,
|
||||
}
|
||||
|
|
@ -512,7 +473,6 @@ event_impl!(
|
|||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct HostStopped {
|
||||
pub labels: HashMap<String, String>,
|
||||
// TODO: Parse as nkey?
|
||||
#[serde(default)]
|
||||
pub id: String,
|
||||
}
|
||||
|
|
@ -526,27 +486,37 @@ event_impl!(
|
|||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct HostHeartbeat {
|
||||
pub actors: HashMap<String, usize>,
|
||||
/// Components running on this host.
|
||||
pub components: Vec<ComponentDescription>,
|
||||
/// Providers running on this host
|
||||
pub providers: Vec<ProviderDescription>,
|
||||
/// The host's unique ID
|
||||
#[serde(default, alias = "id")]
|
||||
pub host_id: String,
|
||||
/// The host's cluster issuer public key
|
||||
#[serde(default)]
|
||||
pub issuer: String,
|
||||
/// The host's human-readable friendly name
|
||||
pub friendly_name: String,
|
||||
/// The host's labels
|
||||
pub labels: HashMap<String, String>,
|
||||
#[serde(default)]
|
||||
pub annotations: HashMap<String, String>,
|
||||
pub providers: Vec<ProviderInfo>,
|
||||
pub uptime_human: String,
|
||||
pub uptime_seconds: usize,
|
||||
/// The host version
|
||||
pub version: semver::Version,
|
||||
// TODO: Parse as nkey?
|
||||
#[serde(default)]
|
||||
pub id: String,
|
||||
/// The host uptime in human-readable form
|
||||
pub uptime_human: String,
|
||||
/// The host uptime in seconds
|
||||
pub uptime_seconds: u64,
|
||||
}
|
||||
|
||||
event_impl!(
|
||||
HostHeartbeat,
|
||||
"com.wasmcloud.lattice.host_heartbeat",
|
||||
source,
|
||||
id
|
||||
host_id
|
||||
);
|
||||
|
||||
// Manifest Events
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct ManifestPublished {
|
||||
#[serde(flatten)]
|
||||
|
|
@ -595,7 +565,8 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn test_all_supported_events() {
|
||||
let raw = std::fs::read("./test/data/events.json").expect("Unable to load test data");
|
||||
let raw = std::fs::read("../../tests/fixtures/manifests/events.json")
|
||||
.expect("Unable to load test data");
|
||||
|
||||
let all_events: Vec<cloudevents::Event> = serde_json::from_slice(&raw).unwrap();
|
||||
|
||||
|
|
@ -0,0 +1,479 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_nats::jetstream::{stream::Stream, Context};
|
||||
use config::WadmConfig;
|
||||
use tokio::{sync::Semaphore, task::JoinSet};
|
||||
use tracing::log::debug;
|
||||
|
||||
#[cfg(feature = "http_admin")]
|
||||
use anyhow::Context as _;
|
||||
#[cfg(feature = "http_admin")]
|
||||
use hyper::body::Bytes;
|
||||
#[cfg(feature = "http_admin")]
|
||||
use hyper_util::rt::{TokioExecutor, TokioIo};
|
||||
#[cfg(feature = "http_admin")]
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use crate::{
|
||||
connections::ControlClientConstructor,
|
||||
consumers::{
|
||||
manager::{ConsumerManager, WorkerCreator},
|
||||
*,
|
||||
},
|
||||
nats_utils::LatticeIdParser,
|
||||
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
|
||||
server::{ManifestNotifier, Server},
|
||||
storage::{nats_kv::NatsKvStore, reaper::Reaper},
|
||||
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
|
||||
};
|
||||
|
||||
pub use nats::StreamPersistence;
|
||||
|
||||
pub mod commands;
|
||||
pub mod config;
|
||||
pub mod consumers;
|
||||
pub mod events;
|
||||
pub mod nats_utils;
|
||||
pub mod publisher;
|
||||
pub mod scaler;
|
||||
pub mod server;
|
||||
pub mod storage;
|
||||
pub mod workers;
|
||||
|
||||
mod connections;
|
||||
pub(crate) mod model;
|
||||
mod nats;
|
||||
mod observer;
|
||||
#[cfg(test)]
|
||||
pub mod test_util;
|
||||
|
||||
/// Default amount of time events should stay in the stream. This is the 2x heartbeat interval, plus
|
||||
/// some wiggle room. Exported to make setting defaults easy
|
||||
pub const DEFAULT_EXPIRY_TIME: Duration = Duration::from_secs(70);
|
||||
/// Default topic to listen to for all lattice events
|
||||
pub const DEFAULT_EVENTS_TOPIC: &str = "wasmbus.evt.*.>";
|
||||
/// Default topic to listen to for all lattice events in a multitenant deployment
|
||||
pub const DEFAULT_MULTITENANT_EVENTS_TOPIC: &str = "*.wasmbus.evt.*.>";
|
||||
/// Default topic to listen to for all commands
|
||||
pub const DEFAULT_COMMANDS_TOPIC: &str = "wadm.cmd.*";
|
||||
/// Default topic to listen to for all status updates. wadm.status.<lattice_id>.<manifest_name>
|
||||
pub const DEFAULT_STATUS_TOPIC: &str = "wadm.status.*.*";
|
||||
/// Default topic to listen to for all wadm event updates
|
||||
pub const DEFAULT_WADM_EVENTS_TOPIC: &str = "wadm.evt.*.>";
|
||||
/// Default internal wadm event consumer listen topic for the merged wadm and wasmbus events stream.
|
||||
pub const DEFAULT_WADM_EVENT_CONSUMER_TOPIC: &str = "wadm_event_consumer.evt.*.>";
|
||||
/// Managed by annotation used for labeling things properly in wadm
|
||||
pub const MANAGED_BY_ANNOTATION: &str = "wasmcloud.dev/managed-by";
|
||||
/// Identifier for managed by annotation. This is the value [`MANAGED_BY_ANNOTATION`] is set to
|
||||
pub const MANAGED_BY_IDENTIFIER: &str = "wadm";
|
||||
/// An annotation that denotes which model a resource belongs to
|
||||
pub const APP_SPEC_ANNOTATION: &str = "wasmcloud.dev/appspec";
|
||||
/// An annotation that denotes which scaler is managing a resource
|
||||
pub const SCALER_KEY: &str = "wasmcloud.dev/scaler";
|
||||
/// The default link name. In the future, this will likely be pulled in from another crate
|
||||
pub const DEFAULT_LINK_NAME: &str = "default";
|
||||
/// Default stream name for wadm events
|
||||
pub const DEFAULT_WADM_EVENT_STREAM_NAME: &str = "wadm_events";
|
||||
/// Default stream name for wadm event consumer
|
||||
pub const DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME: &str = "wadm_event_consumer";
|
||||
/// Default stream name for wadm commands
|
||||
pub const DEFAULT_COMMAND_STREAM_NAME: &str = "wadm_commands";
|
||||
/// Default stream name for wadm status
|
||||
pub const DEFAULT_STATUS_STREAM_NAME: &str = "wadm_status";
|
||||
/// Default stream name for wadm notifications
|
||||
pub const DEFAULT_NOTIFY_STREAM_NAME: &str = "wadm_notify";
|
||||
/// Default stream name for wasmbus events
|
||||
pub const DEFAULT_WASMBUS_EVENT_STREAM_NAME: &str = "wasmbus_events";
|
||||
|
||||
/// Start wadm with the provided [WadmConfig], returning [JoinSet] with two tasks:
|
||||
/// 1. The server task that listens for API requests
|
||||
/// 2. The observer task that listens for events and commands
|
||||
///
|
||||
/// When embedding wadm in another application, this function should be called to start the wadm
|
||||
/// server and observer tasks.
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// ```no_run
|
||||
/// async {
|
||||
/// let config = wadm::config::WadmConfig::default();
|
||||
/// let mut wadm = wadm::start_wadm(config).await.expect("should start wadm");
|
||||
/// tokio::select! {
|
||||
/// res = wadm.join_next() => {
|
||||
/// match res {
|
||||
/// Some(Ok(_)) => {
|
||||
/// tracing::info!("WADM has exited successfully");
|
||||
/// std::process::exit(0);
|
||||
/// }
|
||||
/// Some(Err(e)) => {
|
||||
/// tracing::error!("WADM has exited with an error: {:?}", e);
|
||||
/// std::process::exit(1);
|
||||
/// }
|
||||
/// None => {
|
||||
/// tracing::info!("WADM server did not start");
|
||||
/// std::process::exit(0);
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// _ = tokio::signal::ctrl_c() => {
|
||||
/// tracing::info!("Received Ctrl+C, shutting down");
|
||||
/// std::process::exit(0);
|
||||
/// }
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
pub async fn start_wadm(config: WadmConfig) -> Result<JoinSet<Result<()>>> {
|
||||
// Build storage adapter for lattice state (on by default)
|
||||
let (client, context) = nats::get_client_and_context(
|
||||
config.nats_server.clone(),
|
||||
config.domain.clone(),
|
||||
config.nats_seed.clone(),
|
||||
config.nats_jwt.clone(),
|
||||
config.nats_creds.clone(),
|
||||
config.nats_tls_ca_file.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
|
||||
let connection_pool = ControlClientConstructor::new(client.clone(), None);
|
||||
|
||||
let trimmer: &[_] = &['.', '>', '*'];
|
||||
|
||||
let store = nats::ensure_kv_bucket(
|
||||
&context,
|
||||
config.state_bucket,
|
||||
1,
|
||||
config.max_state_bucket_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let state_storage = NatsKvStore::new(store);
|
||||
|
||||
let manifest_storage = nats::ensure_kv_bucket(
|
||||
&context,
|
||||
config.manifest_bucket,
|
||||
1,
|
||||
config.max_manifest_bucket_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let internal_stream_name = |stream_name: &str| -> String {
|
||||
match config.stream_prefix.clone() {
|
||||
Some(stream_prefix) => {
|
||||
format!(
|
||||
"{}.{}",
|
||||
stream_prefix.trim_end_matches(trimmer),
|
||||
stream_name
|
||||
)
|
||||
}
|
||||
None => stream_name.to_string(),
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Ensuring wadm event stream");
|
||||
|
||||
let event_stream = nats::ensure_limits_stream(
|
||||
&context,
|
||||
internal_stream_name(DEFAULT_WADM_EVENT_STREAM_NAME),
|
||||
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wadm.evt subject in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
config.max_event_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring command stream");
|
||||
|
||||
let command_stream = nats::ensure_stream(
|
||||
&context,
|
||||
internal_stream_name(DEFAULT_COMMAND_STREAM_NAME),
|
||||
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
|
||||
Some("A stream that stores all commands for wadm".to_string()),
|
||||
config.max_command_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let status_stream = nats::ensure_status_stream(
|
||||
&context,
|
||||
internal_stream_name(DEFAULT_STATUS_STREAM_NAME),
|
||||
vec![DEFAULT_STATUS_TOPIC.to_owned()],
|
||||
config.max_status_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring wasmbus event stream");
|
||||
|
||||
// Remove the previous wadm_(multitenant)_mirror streams so that they don't
|
||||
// prevent us from creating the new wasmbus_(multitenant)_events stream
|
||||
// TODO(joonas): Remove this some time in the future once we're confident
|
||||
// enough that there are no more wadm_(multitenant)_mirror streams around.
|
||||
for mirror_stream_name in &["wadm_mirror", "wadm_multitenant_mirror"] {
|
||||
if (context.get_stream(mirror_stream_name).await).is_ok() {
|
||||
context.delete_stream(mirror_stream_name).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let wasmbus_event_subjects = match config.multitenant {
|
||||
true => vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
|
||||
false => vec![DEFAULT_EVENTS_TOPIC.to_owned()],
|
||||
};
|
||||
|
||||
let wasmbus_event_stream = nats::ensure_limits_stream(
|
||||
&context,
|
||||
DEFAULT_WASMBUS_EVENT_STREAM_NAME.to_string(),
|
||||
wasmbus_event_subjects.clone(),
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wasmbus.evt subject in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
config.max_wasmbus_event_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring notify stream");
|
||||
|
||||
let notify_stream = nats::ensure_notify_stream(
|
||||
&context,
|
||||
DEFAULT_NOTIFY_STREAM_NAME.to_owned(),
|
||||
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
|
||||
config.max_notify_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring event consumer stream");
|
||||
|
||||
let event_consumer_stream = nats::ensure_event_consumer_stream(
|
||||
&context,
|
||||
DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME.to_owned(),
|
||||
DEFAULT_WADM_EVENT_CONSUMER_TOPIC.to_owned(),
|
||||
vec![&wasmbus_event_stream, &event_stream],
|
||||
Some(
|
||||
"A stream that sources from wadm_events and wasmbus_events for wadm event consumer's use"
|
||||
.to_string(),
|
||||
),
|
||||
config.max_event_consumer_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Creating event consumer manager");
|
||||
|
||||
let permit_pool = Arc::new(Semaphore::new(
|
||||
config.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
|
||||
));
|
||||
let event_worker_creator = EventWorkerCreator {
|
||||
state_store: state_storage.clone(),
|
||||
manifest_store: manifest_storage.clone(),
|
||||
pool: connection_pool.clone(),
|
||||
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
|
||||
publisher: context.clone(),
|
||||
notify_stream,
|
||||
status_stream: status_stream.clone(),
|
||||
};
|
||||
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
event_consumer_stream,
|
||||
event_worker_creator.clone(),
|
||||
config.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
debug!("Creating command consumer manager");
|
||||
|
||||
let command_worker_creator = CommandWorkerCreator {
|
||||
pool: connection_pool,
|
||||
};
|
||||
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
command_stream,
|
||||
command_worker_creator.clone(),
|
||||
config.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
|
||||
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
|
||||
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
|
||||
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
|
||||
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
|
||||
let reaper = Reaper::new(
|
||||
state_storage.clone(),
|
||||
Duration::from_secs(config.cleanup_interval / 2),
|
||||
[],
|
||||
);
|
||||
|
||||
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
|
||||
|
||||
debug!("Creating lattice observer");
|
||||
|
||||
let observer = observer::Observer {
|
||||
parser: LatticeIdParser::new("wasmbus", config.multitenant),
|
||||
command_manager: commands_manager,
|
||||
event_manager: events_manager,
|
||||
reaper,
|
||||
client: client.clone(),
|
||||
command_worker_creator,
|
||||
event_worker_creator,
|
||||
};
|
||||
|
||||
debug!("Subscribing to API topic");
|
||||
|
||||
let server = Server::new(
|
||||
manifest_storage,
|
||||
client,
|
||||
Some(&config.api_prefix),
|
||||
config.multitenant,
|
||||
status_stream,
|
||||
ManifestNotifier::new(wadm_event_prefix, context),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
|
||||
#[cfg(feature = "http_admin")]
|
||||
if let Some(addr) = config.http_admin {
|
||||
debug!("Setting up HTTP administration endpoint");
|
||||
let socket = TcpListener::bind(addr)
|
||||
.await
|
||||
.context("failed to bind on HTTP administation endpoint")?;
|
||||
let svc = hyper::service::service_fn(move |req| {
|
||||
const OK: &str = r#"{"status":"ok"}"#;
|
||||
async move {
|
||||
let (http::request::Parts { method, uri, .. }, _) = req.into_parts();
|
||||
match (method.as_str(), uri.path()) {
|
||||
("HEAD", "/livez") => Ok(http::Response::default()),
|
||||
("GET", "/livez") => Ok(http::Response::new(http_body_util::Full::new(
|
||||
Bytes::from(OK),
|
||||
))),
|
||||
(method, "/livez") => http::Response::builder()
|
||||
.status(http::StatusCode::METHOD_NOT_ALLOWED)
|
||||
.body(http_body_util::Full::new(Bytes::from(format!(
|
||||
"method `{method}` not supported for path `/livez`"
|
||||
)))),
|
||||
("HEAD", "/readyz") => Ok(http::Response::default()),
|
||||
("GET", "/readyz") => Ok(http::Response::new(http_body_util::Full::new(
|
||||
Bytes::from(OK),
|
||||
))),
|
||||
(method, "/readyz") => http::Response::builder()
|
||||
.status(http::StatusCode::METHOD_NOT_ALLOWED)
|
||||
.body(http_body_util::Full::new(Bytes::from(format!(
|
||||
"method `{method}` not supported for path `/readyz`"
|
||||
)))),
|
||||
(.., path) => http::Response::builder()
|
||||
.status(http::StatusCode::NOT_FOUND)
|
||||
.body(http_body_util::Full::new(Bytes::from(format!(
|
||||
"unknown endpoint `{path}`"
|
||||
)))),
|
||||
}
|
||||
}
|
||||
});
|
||||
let srv = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
|
||||
tasks.spawn(async move {
|
||||
loop {
|
||||
let stream = match socket.accept().await {
|
||||
Ok((stream, _)) => stream,
|
||||
Err(err) => {
|
||||
tracing::error!(?err, "failed to accept HTTP administration connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if let Err(err) = srv.serve_connection(TokioIo::new(stream), svc).await {
|
||||
tracing::error!(?err, "failed to serve HTTP administration connection");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Subscribe and handle API requests
|
||||
tasks.spawn(server.serve());
|
||||
// Observe and handle events
|
||||
tasks.spawn(observer.observe(wasmbus_event_subjects));
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CommandWorkerCreator {
|
||||
pool: ControlClientConstructor,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WorkerCreator for CommandWorkerCreator {
|
||||
type Output = CommandWorker;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
|
||||
|
||||
Ok(CommandWorker::new(client))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct EventWorkerCreator<StateStore> {
|
||||
state_store: StateStore,
|
||||
manifest_store: async_nats::jetstream::kv::Store,
|
||||
pool: ControlClientConstructor,
|
||||
command_topic_prefix: String,
|
||||
publisher: Context,
|
||||
notify_stream: Stream,
|
||||
status_stream: Stream,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
|
||||
where
|
||||
StateStore: crate::storage::Store + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
|
||||
let command_publisher = CommandPublisher::new(
|
||||
self.publisher.clone(),
|
||||
&format!("{}.{lattice_id}", self.command_topic_prefix),
|
||||
);
|
||||
let status_publisher = StatusPublisher::new(
|
||||
self.publisher.clone(),
|
||||
Some(self.status_stream.clone()),
|
||||
&format!("wadm.status.{lattice_id}"),
|
||||
);
|
||||
let manager = ScalerManager::new(
|
||||
self.publisher.clone(),
|
||||
self.notify_stream.clone(),
|
||||
lattice_id,
|
||||
multitenant_prefix,
|
||||
self.state_store.clone(),
|
||||
self.manifest_store.clone(),
|
||||
command_publisher.clone(),
|
||||
status_publisher.clone(),
|
||||
client.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok(EventWorker::new(
|
||||
self.state_store.clone(),
|
||||
client,
|
||||
command_publisher,
|
||||
status_publisher,
|
||||
manager,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
|
@ -2,16 +2,16 @@
|
|||
use indexmap::IndexMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::model::Manifest;
|
||||
use wadm_types::{Manifest, LATEST_VERSION, VERSION_ANNOTATION_KEY};
|
||||
|
||||
use super::LATEST_VERSION;
|
||||
|
||||
/// This struct represents a single manfiest, with its version history. Internally these are stored
|
||||
/// This struct represents a single manifest, with its version history. Internally these are stored
|
||||
/// as an indexmap keyed by version name
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
pub(crate) struct StoredManifest {
|
||||
// Ordering matters for how we store a manifest, so we need to use an index map to preserve
|
||||
// insertion order _and_ have quick access to specific versions
|
||||
// NOTE(thomastaylor312): We probably should have a configurable limit for how many we keep
|
||||
// around in history so they don't balloon forever
|
||||
manifests: IndexMap<String, Manifest>,
|
||||
// Set only if a version is deployed
|
||||
deployed_version: Option<String>,
|
||||
|
|
@ -28,8 +28,20 @@ impl StoredManifest {
|
|||
|
||||
/// Adds the given manifest, returning `false` if unable to add (e.g. the version already
|
||||
/// exists)
|
||||
pub fn add_version(&mut self, manifest: Manifest) -> bool {
|
||||
let version = manifest.version().to_owned();
|
||||
pub fn add_version(&mut self, mut manifest: Manifest) -> bool {
|
||||
let version = match manifest.metadata.annotations.get(VERSION_ANNOTATION_KEY) {
|
||||
Some(v) => v.to_string(),
|
||||
None => {
|
||||
// If a version is not given, automatically add a new version with a specific ULID (that way
|
||||
// it can be sorted in order)
|
||||
let v = ulid::Ulid::new().to_string();
|
||||
manifest
|
||||
.metadata
|
||||
.annotations
|
||||
.insert(VERSION_ANNOTATION_KEY.to_string(), v.clone());
|
||||
v
|
||||
}
|
||||
};
|
||||
if self.manifests.contains_key(&version) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -128,3 +140,53 @@ impl StoredManifest {
|
|||
self.manifests.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use std::{io::BufReader, path::Path};
|
||||
|
||||
use anyhow::Result;
|
||||
use wadm_types::VERSION_ANNOTATION_KEY;
|
||||
|
||||
pub(crate) fn deserialize_yaml(filepath: impl AsRef<Path>) -> Result<Manifest> {
|
||||
let file = std::fs::File::open(filepath)?;
|
||||
let reader = BufReader::new(file);
|
||||
let yaml_string: Manifest = serde_yaml::from_reader(reader)?;
|
||||
Ok(yaml_string)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_versioning() {
|
||||
let mut manifest = deserialize_yaml("../../tests/fixtures/manifests/simple2.yaml")
|
||||
.expect("Should be able to parse");
|
||||
let mut stored = StoredManifest::default();
|
||||
|
||||
assert!(
|
||||
stored.add_version(manifest.clone()),
|
||||
"Should be able to add manifest without a version set"
|
||||
);
|
||||
|
||||
let updated = stored.get_current();
|
||||
ulid::Ulid::from_string(updated.version()).expect("Should have had a ULID set");
|
||||
|
||||
// Now update the manifest and add a new custom version
|
||||
manifest
|
||||
.metadata
|
||||
.annotations
|
||||
.insert(VERSION_ANNOTATION_KEY.to_string(), "v0.0.1".to_string());
|
||||
assert!(
|
||||
stored.add_version(manifest.clone()),
|
||||
"Should be able to add manifest with custom version"
|
||||
);
|
||||
let updated = stored.get_current();
|
||||
assert_eq!(updated.version(), "v0.0.1", "Version should still be set");
|
||||
|
||||
// Try adding again and make sure that still fails
|
||||
assert!(
|
||||
!stored.add_version(manifest),
|
||||
"Adding duplicate version should fail"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,410 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_nats::{
|
||||
jetstream::{
|
||||
self,
|
||||
kv::{Config as KvConfig, Store},
|
||||
stream::{Config as StreamConfig, Source, StorageType, Stream, SubjectTransform},
|
||||
Context,
|
||||
},
|
||||
Client, ConnectOptions,
|
||||
};
|
||||
|
||||
use crate::DEFAULT_EXPIRY_TIME;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub enum StreamPersistence {
|
||||
#[default]
|
||||
File,
|
||||
Memory,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StreamPersistence {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
StreamPersistence::File => write!(f, "file"),
|
||||
StreamPersistence::Memory => write!(f, "memory"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StreamPersistence> for StorageType {
|
||||
fn from(persistance: StreamPersistence) -> Self {
|
||||
match persistance {
|
||||
StreamPersistence::File => StorageType::File,
|
||||
StreamPersistence::Memory => StorageType::Memory,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for StreamPersistence {
|
||||
fn from(persistance: &str) -> Self {
|
||||
match persistance {
|
||||
"file" => StreamPersistence::File,
|
||||
"memory" => StreamPersistence::Memory,
|
||||
_ => StreamPersistence::File,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a NATS client from the given options
|
||||
pub(crate) async fn get_client_and_context(
|
||||
url: String,
|
||||
js_domain: Option<String>,
|
||||
seed: Option<String>,
|
||||
jwt: Option<String>,
|
||||
creds_path: Option<PathBuf>,
|
||||
ca_path: Option<PathBuf>,
|
||||
) -> Result<(Client, Context)> {
|
||||
let client = if seed.is_none() && jwt.is_none() && creds_path.is_none() {
|
||||
let mut opts = async_nats::ConnectOptions::new();
|
||||
if let Some(ca) = ca_path {
|
||||
opts = opts.add_root_certificates(ca).require_tls(true);
|
||||
}
|
||||
opts.connect(url).await?
|
||||
} else {
|
||||
let opts = build_nats_options(seed, jwt, creds_path, ca_path).await?;
|
||||
async_nats::connect_with_options(url, opts).await?
|
||||
};
|
||||
|
||||
let context = if let Some(domain) = js_domain {
|
||||
jetstream::with_domain(client.clone(), domain)
|
||||
} else {
|
||||
jetstream::new(client.clone())
|
||||
};
|
||||
|
||||
Ok((client, context))
|
||||
}
|
||||
|
||||
async fn build_nats_options(
|
||||
seed: Option<String>,
|
||||
jwt: Option<String>,
|
||||
creds_path: Option<PathBuf>,
|
||||
ca_path: Option<PathBuf>,
|
||||
) -> Result<ConnectOptions> {
|
||||
let mut opts = async_nats::ConnectOptions::new();
|
||||
opts = match (seed, jwt, creds_path) {
|
||||
(Some(seed), Some(jwt), None) => {
|
||||
let jwt = resolve_jwt(jwt).await?;
|
||||
let kp = std::sync::Arc::new(get_seed(seed).await?);
|
||||
|
||||
opts.jwt(jwt, move |nonce| {
|
||||
let key_pair = kp.clone();
|
||||
async move { key_pair.sign(&nonce).map_err(async_nats::AuthError::new) }
|
||||
})
|
||||
}
|
||||
(None, None, Some(creds)) => opts.credentials_file(creds).await?,
|
||||
_ => {
|
||||
// We shouldn't ever get here due to the requirements on the flags, but return a helpful error just in case
|
||||
return Err(anyhow::anyhow!(
|
||||
"Got too many options. Make sure to provide a seed and jwt or a creds path"
|
||||
));
|
||||
}
|
||||
};
|
||||
if let Some(ca) = ca_path {
|
||||
opts = opts.add_root_certificates(ca).require_tls(true);
|
||||
}
|
||||
Ok(opts)
|
||||
}
|
||||
|
||||
/// Takes a string that could be a raw seed, or a path and does all the necessary loading and parsing steps
|
||||
async fn get_seed(seed: String) -> Result<nkeys::KeyPair> {
|
||||
// MAGIC NUMBER: Length of a seed key
|
||||
let raw_seed = if seed.len() == 58 && seed.starts_with('S') {
|
||||
seed
|
||||
} else {
|
||||
tokio::fs::read_to_string(seed).await?
|
||||
};
|
||||
|
||||
nkeys::KeyPair::from_seed(&raw_seed).map_err(anyhow::Error::from)
|
||||
}
|
||||
|
||||
/// Resolves a JWT value by either returning the string itself if it's a valid JWT
|
||||
/// or by loading the contents of a file specified by the JWT value.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `jwt_or_file` - A string that represents either a JWT or a file path containing a JWT.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A `Result` containing a string if successful, or an error if the JWT value
|
||||
/// is invalid or the file cannot be read.
|
||||
async fn resolve_jwt(jwt_or_file: String) -> Result<String> {
|
||||
if tokio::fs::metadata(&jwt_or_file)
|
||||
.await
|
||||
.map(|metadata| metadata.is_file())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
tokio::fs::read_to_string(jwt_or_file)
|
||||
.await
|
||||
.map_err(|e| anyhow!("Error loading JWT from file: {e}"))
|
||||
} else {
|
||||
// We could do more validation on the JWT here, but if the JWT is invalid then
|
||||
// connecting will fail anyways
|
||||
Ok(jwt_or_file)
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper that ensures that the given stream name exists, using defaults to create if it does
|
||||
/// not. Returns the handle to the stream
|
||||
pub async fn ensure_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
description: Option<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
let stream_config = StreamConfig {
|
||||
name: name.clone(),
|
||||
description,
|
||||
num_replicas: 1,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::WorkQueue,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
allow_rollup: false,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Ok(stream) = context.get_stream(&name).await {
|
||||
// For now, we only check if the subjects are the same in order to make sure that
|
||||
// newer versions of wadm adjust subjects appropriately. In the case that developers
|
||||
// want to alter the storage or replicas of a stream, for example,
|
||||
// we don't want to override that configuration.
|
||||
if stream.cached_info().config.subjects == stream_config.subjects {
|
||||
return Ok(stream);
|
||||
} else {
|
||||
warn!("Found stream {name} with different configuration, deleting and recreating");
|
||||
context.delete_stream(name).await?;
|
||||
}
|
||||
}
|
||||
|
||||
context
|
||||
.get_or_create_stream(stream_config)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
pub async fn ensure_limits_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
description: Option<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
let stream_config = StreamConfig {
|
||||
name: name.clone(),
|
||||
description,
|
||||
num_replicas: 1,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::Limits,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
allow_rollup: false,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Ok(stream) = context.get_stream(&name).await {
|
||||
// For now, we only check if the subjects are the same in order to make sure that
|
||||
// newer versions of wadm adjust subjects appropriately. In the case that developers
|
||||
// want to alter the storage or replicas of a stream, for example,
|
||||
// we don't want to override that configuration.
|
||||
if stream.cached_info().config.subjects == stream_config.subjects {
|
||||
return Ok(stream);
|
||||
} else {
|
||||
warn!("Found stream {name} with different configuration, deleting and recreating");
|
||||
context.delete_stream(name).await?;
|
||||
}
|
||||
}
|
||||
|
||||
context
|
||||
.get_or_create_stream(stream_config)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
pub async fn ensure_event_consumer_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subject: String,
|
||||
streams: Vec<&Stream>,
|
||||
description: Option<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
// This maps the upstream (wasmbus.evt.*.> & wadm.evt.*.>) Streams into
|
||||
// a set of configuration for the downstream wadm event consumer Stream
|
||||
// that consolidates them into a single set of subjects (wadm_event_consumer.evt.*.>)
|
||||
// to be consumable by the wadm event consumer.
|
||||
let sources = streams
|
||||
.iter()
|
||||
.map(|stream| stream.cached_info().config.clone())
|
||||
.map(|stream_config| Source {
|
||||
name: stream_config.name,
|
||||
subject_transforms: stream_config
|
||||
.subjects
|
||||
.iter()
|
||||
.map(|stream_subject| SubjectTransform {
|
||||
source: stream_subject.to_owned(),
|
||||
destination: match stream_subject.starts_with('*') {
|
||||
// If we have a multi-tenant stream subject, we need to replace
|
||||
// the second wildcard since the first one represents the account id,
|
||||
// otherwise replace the first one:
|
||||
//
|
||||
// multi-tenant: <account-id>.<subject>.evt.<lattice-id>.<event-type>
|
||||
// single-tenant: <subject>.evt.<lattice-id>.<event-type>
|
||||
true => subject.replacen('*', "{{wildcard(2)}}", 1),
|
||||
false => subject.replacen('*', "{{wildcard(1)}}", 1),
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
..Default::default()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let stream_config = StreamConfig {
|
||||
name: name.clone(),
|
||||
description,
|
||||
num_replicas: 1,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::WorkQueue,
|
||||
subjects: vec![],
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
sources: Some(sources),
|
||||
allow_rollup: false,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Ok(stream) = context.get_stream(&name).await {
|
||||
if stream.cached_info().config.retention == stream_config.retention {
|
||||
return Ok(stream);
|
||||
} else {
|
||||
warn!("Found stream {name} with different configuration, deleting and recreating");
|
||||
context.delete_stream(name).await?;
|
||||
}
|
||||
}
|
||||
|
||||
context
|
||||
.get_or_create_stream(stream_config)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
pub async fn ensure_status_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
description: Some(
|
||||
"A stream that stores all status updates for wadm applications".into(),
|
||||
),
|
||||
num_replicas: 1,
|
||||
allow_direct: true,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::Limits,
|
||||
max_messages_per_subject: 10,
|
||||
subjects,
|
||||
max_age: std::time::Duration::from_nanos(0),
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
/// A helper that ensures that the notify stream exists
|
||||
pub async fn ensure_notify_stream(
|
||||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
description: Some("A stream for capturing all notification events for wadm".into()),
|
||||
num_replicas: 1,
|
||||
retention: async_nats::jetstream::stream::RetentionPolicy::Interest,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
|
||||
/// A helper that ensures that the given KV bucket exists, using defaults to create if it does
|
||||
/// not. Returns the handle to the stream
|
||||
pub async fn ensure_kv_bucket(
|
||||
context: &Context,
|
||||
name: String,
|
||||
history_to_keep: i64,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Store> {
|
||||
debug!("Ensuring kv bucket {name} exists");
|
||||
if let Ok(kv) = context.get_key_value(&name).await {
|
||||
Ok(kv)
|
||||
} else {
|
||||
context
|
||||
.create_key_value(KvConfig {
|
||||
bucket: name,
|
||||
history: history_to_keep,
|
||||
num_replicas: 1,
|
||||
storage,
|
||||
max_bytes,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::resolve_jwt;
|
||||
use anyhow::Result;
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_resolve_jwt_value_and_file() -> Result<()> {
|
||||
let my_jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ2aWRlb0lkIjoiUWpVaUxYSnVjMjl0IiwiaWF0IjoxNjIwNjAzNDY5fQ.2PKx6y2ym6IWbeM6zFgHOkDnZEtGTR3YgYlQ2_Jki5g";
|
||||
let jwt_path = "../../tests/fixtures/nats.jwt";
|
||||
let jwt_inside_file = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdHJpbmciOiAiQWNjb3JkIHRvIGFsbCBrbm93biBsb3dzIG9mIGF2aWF0aW9uLCB0aGVyZSBpcyBubyB3YXkgdGhhdCBhIGJlZSBhYmxlIHRvIGZseSJ9.GyU6pTRhflcOg6KBCU6wZedP8BQzLXbdgYIoU6KzzD8";
|
||||
|
||||
assert_eq!(
|
||||
resolve_jwt(my_jwt.to_string())
|
||||
.await
|
||||
.expect("should resolve jwt string to itself"),
|
||||
my_jwt.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
resolve_jwt(jwt_path.to_string())
|
||||
.await
|
||||
.expect("should be able to read jwt file"),
|
||||
jwt_inside_file.to_string()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,192 @@
|
|||
//! Helper utilities for interacting with NATS
|
||||
const EVENT_SUBJECT: &str = "evt";
|
||||
|
||||
/// A parser for NATS subjects that parses out a lattice ID for any given subject
|
||||
pub struct LatticeIdParser {
|
||||
// NOTE(thomastaylor312): We don't actually support specific prefixes right now, but we could in
|
||||
// the future as we already do for control topics. So this is just trying to future proof
|
||||
prefix: String,
|
||||
multitenant: bool,
|
||||
}
|
||||
|
||||
impl LatticeIdParser {
|
||||
/// Returns a new parser configured to use the given prefix. If `multitenant` is set to true,
|
||||
/// this parser will also attempt to parse a subject as if it were an account imported topic
|
||||
/// (e.g. `A****.wasmbus.evt.{lattice-id}.>`) if it doesn't match the normally expected pattern
|
||||
pub fn new(prefix: &str, multitenant: bool) -> LatticeIdParser {
|
||||
LatticeIdParser {
|
||||
prefix: prefix.to_owned(),
|
||||
multitenant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses the given subject based on settings and then returns the lattice ID of the subject and
|
||||
/// the account ID if it is multitenant.
|
||||
/// Returns None if it couldn't parse the topic
|
||||
pub fn parse(&self, subject: &str) -> Option<LatticeInformation> {
|
||||
let separated: Vec<&str> = subject.split('.').collect();
|
||||
// For reference, topics look like the following:
|
||||
//
|
||||
// Normal: `{prefix}.evt.{lattice-id}.{event-type}`
|
||||
// Multitenant: `{account-id}.{prefix}.evt.{lattice-id}.{event-type}`
|
||||
//
|
||||
// Note that the account ID should be prefaced with an `A`
|
||||
match separated[..] {
|
||||
[prefix, evt, lattice_id, _event_type]
|
||||
if prefix == self.prefix && evt == EVENT_SUBJECT =>
|
||||
{
|
||||
Some(LatticeInformation {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
multitenant_prefix: None,
|
||||
prefix: self.prefix.clone(),
|
||||
})
|
||||
}
|
||||
[account_id, prefix, evt, lattice_id, _event_type]
|
||||
if self.multitenant
|
||||
&& prefix == self.prefix
|
||||
&& evt == EVENT_SUBJECT
|
||||
&& account_id.starts_with('A') =>
|
||||
{
|
||||
Some(LatticeInformation {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
multitenant_prefix: Some(account_id.to_owned()),
|
||||
prefix: self.prefix.clone(),
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple helper struct for returning lattice information from a parsed event topic
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LatticeInformation {
|
||||
lattice_id: String,
|
||||
multitenant_prefix: Option<String>,
|
||||
prefix: String,
|
||||
}
|
||||
|
||||
impl LatticeInformation {
|
||||
pub fn lattice_id(&self) -> &str {
|
||||
&self.lattice_id
|
||||
}
|
||||
|
||||
pub fn multitenant_prefix(&self) -> Option<&str> {
|
||||
self.multitenant_prefix.as_deref()
|
||||
}
|
||||
|
||||
/// Constructs the event subject to listen on for a particular lattice
|
||||
pub fn event_subject(&self) -> String {
|
||||
if let Some(account_id) = &self.multitenant_prefix {
|
||||
// e.g. Axxx.wasmbus.evt.{lattice-id}.*
|
||||
format!(
|
||||
"{}.{}.{}.{}.*",
|
||||
account_id, self.prefix, EVENT_SUBJECT, self.lattice_id
|
||||
)
|
||||
} else {
|
||||
// e.g. wasmbus.evt.{lattice-id}.*
|
||||
format!("{}.{}.{}.*", self.prefix, EVENT_SUBJECT, self.lattice_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_subjects() {
|
||||
// Default first
|
||||
let parser = LatticeIdParser::new("wasmbus", false);
|
||||
|
||||
let single_lattice = parser
|
||||
.parse("wasmbus.evt.blahblah.>")
|
||||
.expect("Should return lattice id");
|
||||
assert_eq!(
|
||||
single_lattice.lattice_id(),
|
||||
"blahblah",
|
||||
"Should return the right ID"
|
||||
);
|
||||
assert_eq!(
|
||||
single_lattice.multitenant_prefix(),
|
||||
None,
|
||||
"Should return no multitenant prefix"
|
||||
);
|
||||
assert_eq!(
|
||||
single_lattice.event_subject(),
|
||||
"wasmbus.evt.blahblah.*",
|
||||
"Should return the right event subject"
|
||||
);
|
||||
|
||||
// Shouldn't parse a multitenant
|
||||
assert!(
|
||||
parser.parse("ACCOUNTID.wasmbus.evt.default.>").is_none(),
|
||||
"Shouldn't parse a multitenant topic"
|
||||
);
|
||||
|
||||
// Multitenant second
|
||||
let parser = LatticeIdParser::new("wasmbus", true);
|
||||
|
||||
assert_eq!(
|
||||
parser
|
||||
.parse("wasmbus.evt.blahblah.host_heartbeat")
|
||||
.expect("Should return lattice id")
|
||||
.lattice_id(),
|
||||
"blahblah",
|
||||
"Should return the right ID"
|
||||
);
|
||||
|
||||
let res = parser
|
||||
.parse("ACCOUNTID.wasmbus.evt.blahblah.>")
|
||||
.expect("Should parse multitenant topic");
|
||||
|
||||
assert_eq!(res.lattice_id(), "blahblah", "Should return the right ID");
|
||||
assert_eq!(
|
||||
res.multitenant_prefix()
|
||||
.expect("Should return account id in multitenant mode"),
|
||||
"ACCOUNTID",
|
||||
"Should return the right ID"
|
||||
);
|
||||
assert_eq!(
|
||||
res.event_subject(),
|
||||
"ACCOUNTID.wasmbus.evt.blahblah.*",
|
||||
"Should return the right event subject"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_subjects() {
|
||||
let parser = LatticeIdParser::new("wasmbus", true);
|
||||
|
||||
// Test 3 and 4 part subjects to make sure they don't parse
|
||||
assert!(
|
||||
parser.parse("BLAH.wasmbus.notevt.default.>").is_none(),
|
||||
"Shouldn't parse 4 part invalid topic"
|
||||
);
|
||||
|
||||
assert!(
|
||||
parser.parse("wasmbus.notme.default.>").is_none(),
|
||||
"Shouldn't parse 3 part invalid topic"
|
||||
);
|
||||
|
||||
assert!(
|
||||
parser.parse("lebus.evt.default.>").is_none(),
|
||||
"Shouldn't parse an non-matching prefix"
|
||||
);
|
||||
|
||||
assert!(
|
||||
parser.parse("wasmbus.evt.>").is_none(),
|
||||
"Shouldn't parse a too short topic"
|
||||
);
|
||||
|
||||
assert!(
|
||||
parser.parse("BADACCOUNT.wasmbus.evt.default.>").is_none(),
|
||||
"Shouldn't parse invalid account topic"
|
||||
);
|
||||
|
||||
assert!(
|
||||
parser.parse("wasmbus.notme.default.bar.baz").is_none(),
|
||||
"Shouldn't parse long topic"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -4,16 +4,15 @@ use async_nats::Subscriber;
|
|||
use futures::{stream::SelectAll, StreamExt, TryFutureExt};
|
||||
use tracing::{debug, error, instrument, trace, warn};
|
||||
|
||||
use wadm::{
|
||||
use crate::{
|
||||
consumers::{
|
||||
manager::{ConsumerManager, WorkerCreator},
|
||||
CommandConsumer, EventConsumer,
|
||||
},
|
||||
events::{EventType, HostHeartbeat, HostStarted},
|
||||
mirror::Mirror,
|
||||
events::{EventType, HostHeartbeat, HostStarted, ManifestPublished},
|
||||
nats_utils::LatticeIdParser,
|
||||
storage::{nats_kv::NatsKvStore, reaper::Reaper, Store},
|
||||
DEFAULT_COMMANDS_TOPIC, DEFAULT_WADM_EVENTS_TOPIC,
|
||||
DEFAULT_COMMANDS_TOPIC, DEFAULT_WADM_EVENT_CONSUMER_TOPIC,
|
||||
};
|
||||
|
||||
use super::{CommandWorkerCreator, EventWorkerCreator};
|
||||
|
|
@ -22,7 +21,6 @@ pub(crate) struct Observer<StateStore> {
|
|||
pub(crate) parser: LatticeIdParser,
|
||||
pub(crate) command_manager: ConsumerManager<CommandConsumer>,
|
||||
pub(crate) event_manager: ConsumerManager<EventConsumer>,
|
||||
pub(crate) mirror: Mirror,
|
||||
pub(crate) client: async_nats::Client,
|
||||
pub(crate) reaper: Reaper<NatsKvStore>,
|
||||
pub(crate) event_worker_creator: EventWorkerCreator<StateStore>,
|
||||
|
|
@ -46,37 +44,25 @@ where
|
|||
if !is_event_we_care_about(&msg.payload) {
|
||||
continue;
|
||||
}
|
||||
let (lattice_id, multitenant_prefix) = match self.parser.parse(&msg.subject) {
|
||||
(Some(lattice), Some(account)) => (lattice, Some(account)),
|
||||
(Some(lattice), None) => (lattice, None),
|
||||
(None, _) => {
|
||||
trace!(subject = %msg.subject, "Found non-matching lattice subject");
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(lattice_info) = self.parser.parse(&msg.subject) else {
|
||||
trace!(subject = %msg.subject, "Found non-matching lattice subject");
|
||||
continue;
|
||||
};
|
||||
let lattice_id = lattice_info.lattice_id();
|
||||
let multitenant_prefix = lattice_info.multitenant_prefix();
|
||||
let event_subject = lattice_info.event_subject();
|
||||
|
||||
// Create the reaper for this lattice. This operation returns early if it is
|
||||
// already running
|
||||
self.reaper.observe(lattice_id);
|
||||
|
||||
// Make sure the mirror consumer is up and running. This operation returns early
|
||||
// if it is already running
|
||||
if let Err(e) = self
|
||||
.mirror
|
||||
.monitor_lattice(&msg.subject, lattice_id, multitenant_prefix)
|
||||
.await
|
||||
{
|
||||
// If we can't set up the mirror, we can't proceed, so exit early
|
||||
error!(error = %e, %lattice_id, "Couldn't add mirror consumer. Will retry on next heartbeat");
|
||||
continue;
|
||||
}
|
||||
|
||||
let command_topic = DEFAULT_COMMANDS_TOPIC.replace('*', lattice_id);
|
||||
let events_topic = DEFAULT_WADM_EVENTS_TOPIC.replace('*', lattice_id);
|
||||
let events_topic = DEFAULT_WADM_EVENT_CONSUMER_TOPIC.replace('*', lattice_id);
|
||||
let needs_command = !self.command_manager.has_consumer(&command_topic).await;
|
||||
let needs_event = !self.event_manager.has_consumer(&events_topic).await;
|
||||
if needs_command {
|
||||
debug!(%lattice_id, subject = %msg.subject, mapped_subject = %command_topic, "Found unmonitored lattice, adding command consumer");
|
||||
debug!(%lattice_id, subject = %event_subject, mapped_subject = %command_topic, "Found unmonitored lattice, adding command consumer");
|
||||
let worker = match self
|
||||
.command_worker_creator
|
||||
.create(lattice_id, multitenant_prefix)
|
||||
|
|
@ -96,7 +82,7 @@ where
|
|||
})
|
||||
}
|
||||
if needs_event {
|
||||
debug!(%lattice_id, subject = %msg.subject, mapped_subject = %events_topic, "Found unmonitored lattice, adding event consumer");
|
||||
debug!(%lattice_id, subject = %event_subject, mapped_subject = %events_topic, "Found unmonitored lattice, adding event consumer");
|
||||
let worker = match self
|
||||
.event_worker_creator
|
||||
.create(lattice_id, multitenant_prefix)
|
||||
|
|
@ -125,15 +111,17 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
// This is a stupid hacky function to check that this is a host started or host heartbeat event
|
||||
// without actually parsing
|
||||
// This is a stupid hacky function to check that this is a host started, host heartbeat, or
|
||||
// manifest_published event without actually parsing
|
||||
fn is_event_we_care_about(data: &[u8]) -> bool {
|
||||
let string_data = match std::str::from_utf8(data) {
|
||||
Ok(s) => s,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
string_data.contains(HostStarted::TYPE) || string_data.contains(HostHeartbeat::TYPE)
|
||||
string_data.contains(HostStarted::TYPE)
|
||||
|| string_data.contains(HostHeartbeat::TYPE)
|
||||
|| string_data.contains(ManifestPublished::TYPE)
|
||||
}
|
||||
|
||||
async fn get_subscriber(
|
||||
|
|
@ -0,0 +1,395 @@
|
|||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::debug;
|
||||
use tracing::error;
|
||||
use tracing::instrument;
|
||||
use tracing::trace;
|
||||
use wadm_types::{
|
||||
api::{StatusInfo, StatusType},
|
||||
TraitProperty,
|
||||
};
|
||||
|
||||
use crate::commands::{DeleteConfig, PutConfig};
|
||||
use crate::events::{ConfigDeleted, ConfigSet};
|
||||
use crate::workers::ConfigSource;
|
||||
use crate::{commands::Command, events::Event, scaler::Scaler};
|
||||
|
||||
const CONFIG_SCALER_KIND: &str = "ConfigScaler";
|
||||
|
||||
pub struct ConfigScaler<ConfigSource> {
|
||||
config_bucket: ConfigSource,
|
||||
id: String,
|
||||
config_name: String,
|
||||
// NOTE(#263): Introducing storing the entire configuration in-memory has the potential to get
|
||||
// fairly heavy if the configuration is large. We should consider a more efficient way to store
|
||||
// this by fetching configuration from the manifest when it's needed, for example.
|
||||
config: Option<HashMap<String, String>>,
|
||||
status: RwLock<StatusInfo>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<C: ConfigSource + Send + Sync + Clone> Scaler for ConfigScaler<C> {
|
||||
fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
CONFIG_SCALER_KIND
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.config_name.to_string()
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
let _ = self.reconcile().await;
|
||||
self.status.read().await.to_owned()
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
|
||||
debug!("ConfigScaler does not support updating config, ignoring");
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(scaler_id = %self.id))]
|
||||
async fn handle_event(&self, event: &Event) -> Result<Vec<Command>> {
|
||||
match event {
|
||||
Event::ConfigSet(ConfigSet { config_name })
|
||||
| Event::ConfigDeleted(ConfigDeleted { config_name }) => {
|
||||
if config_name == &self.config_name {
|
||||
return self.reconcile().await;
|
||||
}
|
||||
}
|
||||
// This is a workaround to ensure that the config has a chance to periodically
|
||||
// update itself if it is out of sync. For efficiency, we only fetch configuration
|
||||
// again if the status is not deployed.
|
||||
Event::HostHeartbeat(_) => {
|
||||
if !matches!(self.status.read().await.status_type, StatusType::Deployed) {
|
||||
return self.reconcile().await;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
trace!("ConfigScaler does not support this event, ignoring");
|
||||
}
|
||||
}
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, scaler_id = %self.id)]
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
debug!(self.config_name, "Fetching configuration");
|
||||
match (
|
||||
self.config_bucket.get_config(&self.config_name).await,
|
||||
self.config.as_ref(),
|
||||
) {
|
||||
// If configuration is not supplied to the scaler, we just ensure that it exists
|
||||
(Ok(Some(_config)), None) => {
|
||||
*self.status.write().await = StatusInfo::deployed("");
|
||||
Ok(Vec::new())
|
||||
}
|
||||
// If configuration is not supplied and doesn't exist, we enter a failed state
|
||||
(Ok(None), None) => {
|
||||
*self.status.write().await = StatusInfo::failed(&format!(
|
||||
"Specified configuration {} does not exist",
|
||||
self.config_name
|
||||
));
|
||||
Ok(Vec::new())
|
||||
}
|
||||
// If configuration matches what's supplied, this scaler is deployed
|
||||
(Ok(Some(config)), Some(scaler_config)) if &config == scaler_config => {
|
||||
*self.status.write().await = StatusInfo::deployed("");
|
||||
Ok(Vec::new())
|
||||
}
|
||||
// If configuration is out of sync, we put the configuration
|
||||
(Ok(_config), Some(scaler_config)) => {
|
||||
debug!(self.config_name, "Putting configuration");
|
||||
*self.status.write().await = StatusInfo::reconciling("Configuration out of sync");
|
||||
Ok(vec![Command::PutConfig(PutConfig {
|
||||
config_name: self.config_name.clone(),
|
||||
config: scaler_config.clone(),
|
||||
})])
|
||||
}
|
||||
(Err(e), _) => {
|
||||
error!(error = %e, "Configscaler failed to fetch configuration");
|
||||
*self.status.write().await = StatusInfo::failed(&e.to_string());
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
if self.config.is_some() {
|
||||
Ok(vec![Command::DeleteConfig(DeleteConfig {
|
||||
config_name: self.config_name.clone(),
|
||||
})])
|
||||
} else {
|
||||
// This configuration is externally managed, don't delete it
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: ConfigSource> ConfigScaler<C> {
|
||||
/// Construct a new ConfigScaler with specified values
|
||||
pub fn new(
|
||||
config_bucket: C,
|
||||
config_name: &str,
|
||||
config: Option<&HashMap<String, String>>,
|
||||
) -> Self {
|
||||
let mut id = config_name.to_string();
|
||||
// Hash the config to generate a unique id, used to compare scalers for uniqueness when updating
|
||||
if let Some(config) = config.as_ref() {
|
||||
let mut config_hasher = std::collections::hash_map::DefaultHasher::new();
|
||||
BTreeMap::from_iter(config.iter()).hash(&mut config_hasher);
|
||||
id.extend(format!("-{}", config_hasher.finish()).chars());
|
||||
}
|
||||
|
||||
Self {
|
||||
config_bucket,
|
||||
id,
|
||||
config_name: config_name.to_string(),
|
||||
config: config.cloned(),
|
||||
status: RwLock::new(StatusInfo::reconciling("")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
use wadm_types::{api::StatusType, ConfigProperty};
|
||||
|
||||
use crate::{
|
||||
commands::{Command, PutConfig},
|
||||
events::{ComponentScaled, ConfigDeleted, Event, HostHeartbeat},
|
||||
scaler::{configscaler::ConfigScaler, Scaler},
|
||||
test_util::TestLatticeSource,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
/// Ensure that the config scaler reacts properly to events, fetching configuration
|
||||
/// when it is out of sync and ignoring irrelevant events.
|
||||
async fn test_configscaler() {
|
||||
let lattice = TestLatticeSource {
|
||||
claims: HashMap::new(),
|
||||
inventory: Default::default(),
|
||||
links: Vec::new(),
|
||||
config: HashMap::new(),
|
||||
};
|
||||
|
||||
let config = ConfigProperty {
|
||||
name: "test_config".to_string(),
|
||||
properties: Some(HashMap::from_iter(vec![(
|
||||
"key".to_string(),
|
||||
"value".to_string(),
|
||||
)])),
|
||||
};
|
||||
|
||||
let config_scaler =
|
||||
ConfigScaler::new(lattice.clone(), &config.name, config.properties.as_ref());
|
||||
|
||||
assert_eq!(
|
||||
config_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("reconcile should succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: config.name.clone(),
|
||||
config: config.properties.clone().expect("properties not found"),
|
||||
})]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
// Configuration deleted, relevant
|
||||
assert_eq!(
|
||||
config_scaler
|
||||
.handle_event(&Event::ConfigDeleted(ConfigDeleted {
|
||||
config_name: config.name.clone()
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: config.name.clone(),
|
||||
config: config.properties.clone().expect("properties not found"),
|
||||
})]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
// Configuration deleted, irrelevant
|
||||
assert_eq!(
|
||||
config_scaler
|
||||
.handle_event(&Event::ConfigDeleted(ConfigDeleted {
|
||||
config_name: "some_other_config".to_string()
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
// Periodic reconcile with host heartbeat
|
||||
assert_eq!(
|
||||
config_scaler
|
||||
.handle_event(&Event::HostHeartbeat(HostHeartbeat {
|
||||
components: Vec::new(),
|
||||
providers: Vec::new(),
|
||||
host_id: String::default(),
|
||||
issuer: String::default(),
|
||||
friendly_name: String::default(),
|
||||
labels: HashMap::new(),
|
||||
version: semver::Version::new(0, 0, 0),
|
||||
uptime_human: String::default(),
|
||||
uptime_seconds: 0,
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: config.name.clone(),
|
||||
config: config.properties.clone().expect("properties not found"),
|
||||
})]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
// Ignore other event
|
||||
assert_eq!(
|
||||
config_scaler
|
||||
.handle_event(&Event::ComponentScaled(ComponentScaled {
|
||||
annotations: BTreeMap::new(),
|
||||
claims: None,
|
||||
image_ref: "foo".to_string(),
|
||||
max_instances: 0,
|
||||
component_id: "fooo".to_string(),
|
||||
host_id: "hostid".to_string()
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
// Create lattice where config is present
|
||||
let lattice2 = TestLatticeSource {
|
||||
claims: HashMap::new(),
|
||||
inventory: Default::default(),
|
||||
links: Vec::new(),
|
||||
config: HashMap::from_iter(vec![(
|
||||
config.name.clone(),
|
||||
config.properties.clone().expect("properties not found"),
|
||||
)]),
|
||||
};
|
||||
|
||||
let config_scaler2 = ConfigScaler::new(lattice2, &config.name, config.properties.as_ref());
|
||||
|
||||
assert_eq!(
|
||||
config_scaler2
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("reconcile should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler2.status().await.status_type,
|
||||
StatusType::Deployed
|
||||
);
|
||||
// Periodic reconcile with host heartbeat
|
||||
assert_eq!(
|
||||
config_scaler2
|
||||
.handle_event(&Event::HostHeartbeat(HostHeartbeat {
|
||||
components: Vec::new(),
|
||||
providers: Vec::new(),
|
||||
host_id: String::default(),
|
||||
issuer: String::default(),
|
||||
friendly_name: String::default(),
|
||||
labels: HashMap::new(),
|
||||
version: semver::Version::new(0, 0, 0),
|
||||
uptime_human: String::default(),
|
||||
uptime_seconds: 0,
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler2.status().await.status_type,
|
||||
StatusType::Deployed
|
||||
);
|
||||
|
||||
// Create lattice where config is present but with the wrong values
|
||||
let lattice3 = TestLatticeSource {
|
||||
claims: HashMap::new(),
|
||||
inventory: Default::default(),
|
||||
links: Vec::new(),
|
||||
config: HashMap::from_iter(vec![(
|
||||
config.name.clone(),
|
||||
HashMap::from_iter(vec![("key".to_string(), "wrong_value".to_string())]),
|
||||
)]),
|
||||
};
|
||||
let config_scaler3 =
|
||||
ConfigScaler::new(lattice3.clone(), &config.name, config.properties.as_ref());
|
||||
|
||||
assert_eq!(
|
||||
config_scaler3
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("reconcile should succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: config.name.clone(),
|
||||
config: config.properties.clone().expect("properties not found"),
|
||||
})]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler3.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
// Test supplied name but not supplied config
|
||||
let config_scaler4 = ConfigScaler::new(lattice3, &config.name, None);
|
||||
assert_eq!(
|
||||
config_scaler4
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("reconcile should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler4.status().await.status_type,
|
||||
StatusType::Deployed
|
||||
);
|
||||
|
||||
let config_scaler5 = ConfigScaler::new(lattice, &config.name, None);
|
||||
assert_eq!(
|
||||
config_scaler5
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("reconcile should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
config_scaler5.status().await.status_type,
|
||||
StatusType::Failed
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,780 @@
|
|||
//! Contains code for converting the list of [`Component`]s in an application into a list of [`Scaler`]s
|
||||
//! that are responsible for monitoring and enforcing the desired state of a lattice
|
||||
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use tracing::{error, warn};
|
||||
use wadm_types::{
|
||||
api::StatusInfo, CapabilityProperties, Component, ComponentProperties, ConfigProperty,
|
||||
LinkProperty, Policy, Properties, SecretProperty, SharedApplicationComponentProperties,
|
||||
SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT, LINK_TRAIT, SPREADSCALER_TRAIT,
|
||||
};
|
||||
use wasmcloud_secrets_types::SECRET_PREFIX;
|
||||
|
||||
use crate::{
|
||||
publisher::Publisher,
|
||||
scaler::{
|
||||
spreadscaler::{link::LINK_SCALER_KIND, ComponentSpreadScaler, SPREAD_SCALER_KIND},
|
||||
statusscaler::StatusScaler,
|
||||
Scaler,
|
||||
},
|
||||
storage::{snapshot::SnapshotStore, ReadStore},
|
||||
workers::{ConfigSource, LinkSource, SecretSource},
|
||||
DEFAULT_LINK_NAME,
|
||||
};
|
||||
|
||||
use super::{
|
||||
configscaler::ConfigScaler,
|
||||
daemonscaler::{provider::ProviderDaemonScaler, ComponentDaemonScaler},
|
||||
secretscaler::SecretScaler,
|
||||
spreadscaler::{
|
||||
link::{LinkScaler, LinkScalerConfig},
|
||||
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
|
||||
},
|
||||
BackoffWrapper,
|
||||
};
|
||||
|
||||
pub(crate) type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
|
||||
pub(crate) type ScalerList = Vec<BoxedScaler>;
|
||||
|
||||
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
|
||||
|
||||
/// Converts a list of manifest [`Component`]s into a [`ScalerList`], resolving shared application
|
||||
/// references, links, configuration and secrets as necessary.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `name` - The name of the manifest that the scalers are being created for
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
pub(crate) fn manifest_components_to_scalers<S, P, L>(
|
||||
components: &[Component],
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
lattice_id: &str,
|
||||
manifest_name: &str,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) -> ScalerList
|
||||
where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
let mut scalers: ScalerList = Vec::new();
|
||||
components
|
||||
.iter()
|
||||
.for_each(|component| match &component.properties {
|
||||
Properties::Component { properties } => {
|
||||
// Determine if this component is contained in this manifest or a shared application
|
||||
let (application_name, component_name) = match resolve_manifest_component(
|
||||
manifest_name,
|
||||
&component.name,
|
||||
properties.image.as_ref(),
|
||||
properties.application.as_ref(),
|
||||
) {
|
||||
Ok(names) => names,
|
||||
Err(err) => {
|
||||
error!(err);
|
||||
scalers.push(Box::new(StatusScaler::new(
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
SPREAD_SCALER_KIND,
|
||||
&component.name,
|
||||
StatusInfo::failed(err),
|
||||
)) as BoxedScaler);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
component_scalers(
|
||||
&mut scalers,
|
||||
components,
|
||||
properties,
|
||||
component.traits.as_ref(),
|
||||
manifest_name,
|
||||
application_name,
|
||||
component_name,
|
||||
lattice_id,
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)
|
||||
}
|
||||
Properties::Capability { properties } => {
|
||||
// Determine if this component is contained in this manifest or a shared application
|
||||
let (application_name, component_name) = match resolve_manifest_component(
|
||||
manifest_name,
|
||||
&component.name,
|
||||
properties.image.as_ref(),
|
||||
properties.application.as_ref(),
|
||||
) {
|
||||
Ok(names) => names,
|
||||
Err(err) => {
|
||||
error!(err);
|
||||
scalers.push(Box::new(StatusScaler::new(
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
SPREAD_SCALER_KIND,
|
||||
&component.name,
|
||||
StatusInfo::failed(err),
|
||||
)) as BoxedScaler);
|
||||
return;
|
||||
}
|
||||
};
|
||||
provider_scalers(
|
||||
&mut scalers,
|
||||
components,
|
||||
properties,
|
||||
component.traits.as_ref(),
|
||||
manifest_name,
|
||||
application_name,
|
||||
component_name,
|
||||
lattice_id,
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)
|
||||
}
|
||||
});
|
||||
scalers
|
||||
}
|
||||
|
||||
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
|
||||
/// from a (Wasm) component [`Component`]
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `scalers` - The list of scalers to extend
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `properties` - The properties of the component to convert
|
||||
/// * `traits` - The traits of the component to convert
|
||||
/// * `manifest_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `application_name` - The name of the application that the scalers are being created for
|
||||
/// * `component_name` - The name of the component to convert
|
||||
/// * **The following arguments are required to create scalers, passed directly through to the scaler
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn component_scalers<S, P, L>(
|
||||
scalers: &mut ScalerList,
|
||||
components: &[Component],
|
||||
properties: &ComponentProperties,
|
||||
traits: Option<&Vec<Trait>>,
|
||||
manifest_name: &str,
|
||||
application_name: &str,
|
||||
component_name: &str,
|
||||
lattice_id: &str,
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
|
||||
// If an image is specified, then it's a component in the same manifest. Otherwise, it's a shared component
|
||||
let component_id = if properties.image.is_some() {
|
||||
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
|
||||
} else {
|
||||
compute_component_id(application_name, properties.id.as_ref(), component_name)
|
||||
};
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, manifest_name, &properties.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
|
||||
config_names.append(&mut secret_names.clone());
|
||||
// TODO(#451): Consider a way to report on status of a shared component
|
||||
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
|
||||
// Shared application components already have their own spread/daemon scalers, you
|
||||
// cannot modify them from another manifest
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported SpreadScaler trait specified for a shared component {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported DaemonScaler trait specified for a shared component {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
|
||||
// If the image is not specified, then it's a reference to a shared provider
|
||||
// in a different manifest
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ComponentSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
image_ref.clone(),
|
||||
component_id,
|
||||
lattice_id.to_owned(),
|
||||
application_name.to_owned(),
|
||||
p.to_owned(),
|
||||
component_name,
|
||||
config_names,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ComponentDaemonScaler::new(
|
||||
snapshot_data.clone(),
|
||||
image_ref.to_owned(),
|
||||
component_id,
|
||||
lattice_id.to_owned(),
|
||||
application_name.to_owned(),
|
||||
p.to_owned(),
|
||||
component_name,
|
||||
config_names,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(LINK_TRAIT, TraitProperty::Link(p), _) => {
|
||||
// Find the target component of the link and create a scaler for it
|
||||
components
|
||||
.iter()
|
||||
.find_map(|component| match &component.properties {
|
||||
Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
id,
|
||||
application,
|
||||
image,
|
||||
..
|
||||
},
|
||||
}
|
||||
| Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
id,
|
||||
application,
|
||||
image,
|
||||
..
|
||||
},
|
||||
} if component.name == p.target.name => Some(link_scaler(
|
||||
p,
|
||||
lattice_id,
|
||||
manifest_name,
|
||||
application_name,
|
||||
&component.name,
|
||||
component_id.to_string(),
|
||||
id.as_ref(),
|
||||
image.as_ref(),
|
||||
application.as_ref(),
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
|
||||
/// from a capability provider [`Component`]
|
||||
/// /// # Arguments
|
||||
/// * `scalers` - The list of scalers to extend
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `properties` - The properties of the capability provider to convert
|
||||
/// * `traits` - The traits of the component to convert
|
||||
/// * `manifest_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `application_name` - The name of the application that the scalers are being created for
|
||||
/// * `component_name` - The name of the component to convert
|
||||
/// * **The following arguments are required to create scalers, passed directly through to the scaler
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn provider_scalers<S, P, L>(
|
||||
scalers: &mut ScalerList,
|
||||
components: &[Component],
|
||||
properties: &CapabilityProperties,
|
||||
traits: Option<&Vec<Trait>>,
|
||||
manifest_name: &str,
|
||||
application_name: &str,
|
||||
component_name: &str,
|
||||
lattice_id: &str,
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
// If an image is specified, then it's a provider in the same manifest. Otherwise, it's a shared component
|
||||
let provider_id = if properties.image.is_some() {
|
||||
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
|
||||
} else {
|
||||
compute_component_id(application_name, properties.id.as_ref(), component_name)
|
||||
};
|
||||
|
||||
let mut scaler_specified = false;
|
||||
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
|
||||
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
|
||||
// Shared application components already have their own spread/daemon scalers, you
|
||||
// cannot modify them from another manifest
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported SpreadScaler trait specified for a shared provider {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported DaemonScaler trait specified for a shared provider {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
|
||||
scaler_specified = true;
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, application_name, &properties.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
application_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ProviderSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_reference: image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
model_name: application_name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
component_name,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
|
||||
scaler_specified = true;
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, application_name, &properties.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
application_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ProviderDaemonScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_reference: image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
model_name: application_name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
component_name,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
// Find the target component of the link and create a scaler for it.
|
||||
(LINK_TRAIT, TraitProperty::Link(p), _) => {
|
||||
components
|
||||
.iter()
|
||||
.find_map(|component| match &component.properties {
|
||||
// Providers cannot link to other providers, only components
|
||||
Properties::Capability { .. } if component.name == p.target.name => {
|
||||
error!(
|
||||
"Provider {} cannot link to provider {}, only components",
|
||||
&component.name, p.target.name
|
||||
);
|
||||
None
|
||||
}
|
||||
Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
image,
|
||||
application,
|
||||
id,
|
||||
..
|
||||
},
|
||||
} if component.name == p.target.name => Some(link_scaler(
|
||||
p,
|
||||
lattice_id,
|
||||
manifest_name,
|
||||
application_name,
|
||||
&component.name,
|
||||
provider_id.to_owned(),
|
||||
id.as_ref(),
|
||||
image.as_ref(),
|
||||
application.as_ref(),
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}));
|
||||
// Allow providers to omit the spreadscaler entirely for simplicity
|
||||
if !scaler_specified {
|
||||
if let Some(image) = &properties.image {
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, application_name, &properties.config);
|
||||
|
||||
let (secret_scalers, mut secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
application_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names);
|
||||
scalers.push(Box::new(BackoffWrapper::new(
|
||||
ProviderSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id,
|
||||
provider_reference: image.to_owned(),
|
||||
spread_config: SpreadScalerProperty {
|
||||
instances: 1,
|
||||
spread: vec![],
|
||||
},
|
||||
model_name: application_name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
component_name,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves configuration, secrets, and the target of a link to create a boxed [`LinkScaler`]
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `link_property` - The properties of the link to convert
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `manifest_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `application_name` - The name of the application that the scalers are being created for
|
||||
/// * `component_name` - The name of the component to convert
|
||||
/// * `source_id` - The ID of the source component
|
||||
/// * `target_id` - The optional ID of the target component
|
||||
/// * `image` - The optional image reference of the target component
|
||||
/// * `shared` - The optional shared application reference of the target component
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn link_scaler<S, P, L>(
|
||||
link_property: &LinkProperty,
|
||||
lattice_id: &str,
|
||||
manifest_name: &str,
|
||||
application_name: &str,
|
||||
component_name: &str,
|
||||
source_id: String,
|
||||
target_id: Option<&String>,
|
||||
image: Option<&String>,
|
||||
shared: Option<&SharedApplicationComponentProperties>,
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) -> BoxedScaler
|
||||
where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
let (mut config_scalers, mut source_config) = config_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&link_property
|
||||
.source
|
||||
.as_ref()
|
||||
.unwrap_or(&Default::default())
|
||||
.config,
|
||||
);
|
||||
let (target_config_scalers, mut target_config) =
|
||||
config_to_scalers(snapshot_data, manifest_name, &link_property.target.config);
|
||||
let (target_secret_scalers, target_secrets) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&link_property.target.secrets,
|
||||
policies,
|
||||
);
|
||||
let (mut source_secret_scalers, source_secrets) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&link_property
|
||||
.source
|
||||
.as_ref()
|
||||
.unwrap_or(&Default::default())
|
||||
.secrets,
|
||||
policies,
|
||||
);
|
||||
config_scalers.extend(target_config_scalers);
|
||||
source_secret_scalers.extend(target_secret_scalers);
|
||||
target_config.extend(target_secrets);
|
||||
source_config.extend(source_secrets);
|
||||
|
||||
let (target_manifest_name, target_component_name) =
|
||||
match resolve_manifest_component(manifest_name, component_name, image, shared) {
|
||||
Ok(name) => name,
|
||||
Err(err) => {
|
||||
error!(err);
|
||||
return Box::new(StatusScaler::new(
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
LINK_SCALER_KIND,
|
||||
format!(
|
||||
"{} -({}:{})-> {}",
|
||||
component_name,
|
||||
link_property.namespace,
|
||||
link_property.package,
|
||||
link_property.target.name
|
||||
),
|
||||
StatusInfo::failed(err),
|
||||
)) as BoxedScaler;
|
||||
}
|
||||
};
|
||||
let target = compute_component_id(target_manifest_name, target_id, target_component_name);
|
||||
Box::new(BackoffWrapper::new(
|
||||
LinkScaler::new(
|
||||
snapshot_data.clone(),
|
||||
LinkScalerConfig {
|
||||
source_id,
|
||||
target,
|
||||
wit_namespace: link_property.namespace.to_owned(),
|
||||
wit_package: link_property.package.to_owned(),
|
||||
wit_interfaces: link_property.interfaces.to_owned(),
|
||||
name: link_property
|
||||
.name
|
||||
.to_owned()
|
||||
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
model_name: application_name.to_owned(),
|
||||
source_config,
|
||||
target_config,
|
||||
},
|
||||
snapshot_data.clone(),
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
source_secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler
|
||||
}
|
||||
|
||||
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
|
||||
/// scalers use.
|
||||
///
|
||||
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
|
||||
/// the name of the configuration will be modified to be unique to the model and component. If the properties
|
||||
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
|
||||
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
|
||||
config_source: &C,
|
||||
manifest_name: &str,
|
||||
configs: &[ConfigProperty],
|
||||
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
|
||||
configs
|
||||
.iter()
|
||||
.map(|config| {
|
||||
let name = if config.properties.is_some() {
|
||||
compute_component_id(manifest_name, None, &config.name)
|
||||
} else {
|
||||
config.name.clone()
|
||||
};
|
||||
(
|
||||
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
|
||||
name,
|
||||
)
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
fn secrets_to_scalers<S: SecretSource + Send + Sync + Clone>(
|
||||
secret_source: &S,
|
||||
manifest_name: &str,
|
||||
secrets: &[SecretProperty],
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
) -> (Vec<SecretScaler<S>>, Vec<String>) {
|
||||
secrets
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let name = compute_secret_id(manifest_name, None, &s.name);
|
||||
let policy = *policies.get(&s.properties.policy).unwrap();
|
||||
(
|
||||
SecretScaler::new(
|
||||
name.clone(),
|
||||
policy.clone(),
|
||||
s.clone(),
|
||||
secret_source.clone(),
|
||||
),
|
||||
name,
|
||||
)
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
|
||||
/// component that is a sanitized version of the component reference and model name, separated
|
||||
/// by a dash.
|
||||
pub(crate) fn compute_component_id(
|
||||
manifest_name: &str,
|
||||
component_id: Option<&String>,
|
||||
component_name: &str,
|
||||
) -> String {
|
||||
if let Some(id) = component_id {
|
||||
id.to_owned()
|
||||
} else {
|
||||
format!(
|
||||
"{}-{}",
|
||||
manifest_name
|
||||
.to_lowercase()
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
|
||||
component_name
|
||||
.to_lowercase()
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compute_secret_id(
|
||||
manifest_name: &str,
|
||||
component_id: Option<&String>,
|
||||
component_name: &str,
|
||||
) -> String {
|
||||
let name = compute_component_id(manifest_name, component_id, component_name);
|
||||
format!("{SECRET_PREFIX}_{name}")
|
||||
}
|
||||
|
||||
/// Helper function to resolve a link to a manifest component, returning the name of the manifest
|
||||
/// and the name of the component where the target resides.
|
||||
///
|
||||
/// If the component resides in the same manifest, then the name of the manifest & the name of the
|
||||
/// component as specified will be returned. In the case that the component resides in a shared
|
||||
/// application, the name of the shared application & the name of the component in that application
|
||||
/// will be returned.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `application_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `component_name` - The name of the component in the source manifest to target
|
||||
/// * `component_image_ref` - The image reference for the component
|
||||
/// * `shared_app_info` - The optional shared application reference for the component
|
||||
fn resolve_manifest_component<'a>(
|
||||
application_name: &'a str,
|
||||
component_name: &'a str,
|
||||
component_image_ref: Option<&'a String>,
|
||||
shared_app_info: Option<&'a SharedApplicationComponentProperties>,
|
||||
) -> Result<(&'a str, &'a str), &'a str> {
|
||||
match (component_image_ref, shared_app_info) {
|
||||
(Some(_), None) => Ok((application_name, component_name)),
|
||||
(None, Some(app)) => Ok((app.name.as_str(), app.component.as_str())),
|
||||
// These two cases should both be unreachable, since this is caught at manifest
|
||||
// validation before it's put. Just in case, we'll log an error and ensure the status is failed
|
||||
(None, None) => Err("Application did not specify an image or shared application reference"),
|
||||
(Some(_image), Some(_app)) => {
|
||||
Err("Application specified both an image and a shared application reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::compute_component_id;
|
||||
|
||||
#[test]
|
||||
fn compute_proper_component_id() {
|
||||
// User supplied ID always takes precedence
|
||||
assert_eq!(
|
||||
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
|
||||
"myid"
|
||||
);
|
||||
assert_eq!(
|
||||
compute_component_id(
|
||||
"some model name with spaces cause yaml",
|
||||
Some(&"myid".to_string()),
|
||||
" echo "
|
||||
),
|
||||
"myid"
|
||||
);
|
||||
// Sanitize component reference
|
||||
assert_eq!(
|
||||
compute_component_id("mymodel", None, "echo-component"),
|
||||
"mymodel-echo_component"
|
||||
);
|
||||
// Ensure we can support spaces in the model name, because YAML strings
|
||||
assert_eq!(
|
||||
compute_component_id("some model name with spaces cause yaml", None, "echo"),
|
||||
"some_model_name_with_spaces_cause_yaml-echo"
|
||||
);
|
||||
// Ensure we can support spaces in the model name, because YAML strings
|
||||
// Ensure we can support lowercasing the reference as well, just in case
|
||||
assert_eq!(
|
||||
compute_component_id("My ThInG", None, "thing.wasm"),
|
||||
"my_thing-thing_wasm"
|
||||
);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,841 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{instrument, trace};
|
||||
use wadm_types::api::StatusType;
|
||||
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
|
||||
|
||||
use crate::commands::StopProvider;
|
||||
use crate::events::{
|
||||
ConfigSet, HostHeartbeat, ProviderHealthCheckFailed, ProviderHealthCheckInfo,
|
||||
ProviderHealthCheckPassed, ProviderInfo, ProviderStarted, ProviderStopped,
|
||||
};
|
||||
use crate::scaler::compute_id_sha256;
|
||||
use crate::scaler::spreadscaler::{
|
||||
compute_ineligible_hosts, eligible_hosts, provider::ProviderSpreadConfig,
|
||||
spreadscaler_annotations,
|
||||
};
|
||||
use crate::storage::{Provider, ProviderStatus};
|
||||
use crate::SCALER_KEY;
|
||||
use crate::{
|
||||
commands::{Command, StartProvider},
|
||||
events::{Event, HostStarted, HostStopped},
|
||||
scaler::Scaler,
|
||||
storage::{Host, ReadStore},
|
||||
};
|
||||
|
||||
use super::DAEMON_SCALER_KIND;
|
||||
|
||||
/// The ProviderDaemonScaler ensures that a provider is running on every host, according to a
|
||||
/// [SpreadScalerProperty](crate::model::SpreadScalerProperty)
|
||||
///
|
||||
/// If no [Spreads](crate::model::Spread) are specified, this Scaler simply maintains the number of instances
|
||||
/// on every available host.
|
||||
pub struct ProviderDaemonScaler<S> {
|
||||
config: ProviderSpreadConfig,
|
||||
store: S,
|
||||
id: String,
|
||||
status: RwLock<StatusInfo>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
|
||||
fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
DAEMON_SCALER_KIND
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.config.provider_id.to_string()
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
let _ = self.reconcile().await;
|
||||
self.status.read().await.to_owned()
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, config: TraitProperty) -> Result<Vec<Command>> {
|
||||
let spread_config = match config {
|
||||
TraitProperty::SpreadScaler(prop) => prop,
|
||||
_ => anyhow::bail!("Given config was not a daemon scaler config object"),
|
||||
};
|
||||
// If no spreads are specified, an empty spread is sufficient to match _every_ host
|
||||
// in a lattice
|
||||
let spread_config = if spread_config.spread.is_empty() {
|
||||
SpreadScalerProperty {
|
||||
instances: spread_config.instances,
|
||||
spread: vec![Spread::default()],
|
||||
}
|
||||
} else {
|
||||
spread_config
|
||||
};
|
||||
self.config.spread_config = spread_config;
|
||||
self.reconcile().await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(scaler_id = %self.id))]
|
||||
async fn handle_event(&self, event: &Event) -> Result<Vec<Command>> {
|
||||
// NOTE(brooksmtownsend): We could be more efficient here and instead of running
|
||||
// the entire reconcile, smart compute exactly what needs to change, but it just
|
||||
// requires more code branches and would be fine as a future improvement
|
||||
match event {
|
||||
Event::ProviderStarted(ProviderStarted { provider_id, .. })
|
||||
| Event::ProviderStopped(ProviderStopped { provider_id, .. })
|
||||
if provider_id == &self.config.provider_id =>
|
||||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// If the host labels match any spread requirement, perform reconcile
|
||||
Event::HostStopped(HostStopped { labels, .. })
|
||||
| Event::HostStarted(HostStarted { labels, .. })
|
||||
| Event::HostHeartbeat(HostHeartbeat { labels, .. })
|
||||
if self.config.spread_config.spread.iter().any(|spread| {
|
||||
spread.requirements.iter().all(|(key, value)| {
|
||||
labels.get(key).map(|val| val == value).unwrap_or(false)
|
||||
})
|
||||
}) =>
|
||||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// perform status updates for health check events
|
||||
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
})
|
||||
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
}) if provider_id == &self.config.provider_id => {
|
||||
let provider = self
|
||||
.store
|
||||
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
|
||||
.await?;
|
||||
|
||||
let unhealthy_providers = provider.map_or(0, |p| {
|
||||
p.hosts
|
||||
.values()
|
||||
.filter(|s| *s == &ProviderStatus::Failed)
|
||||
.count()
|
||||
});
|
||||
let status = self.status.read().await.to_owned();
|
||||
// update health status of scaler
|
||||
if let Some(status) = match (status, unhealthy_providers > 0) {
|
||||
// scaler is deployed but contains unhealthy providers
|
||||
(
|
||||
StatusInfo {
|
||||
status_type: StatusType::Deployed,
|
||||
..
|
||||
},
|
||||
true,
|
||||
) => Some(StatusInfo::failed(&format!(
|
||||
"Unhealthy provider on {} host(s)",
|
||||
unhealthy_providers
|
||||
))),
|
||||
// scaler can become unhealthy only if it was previously deployed
|
||||
// once scaler becomes healthy again revert back to deployed state
|
||||
// this is a workaround to detect unhealthy status until
|
||||
// StatusType::Unhealthy can be used
|
||||
(
|
||||
StatusInfo {
|
||||
status_type: StatusType::Failed,
|
||||
message,
|
||||
},
|
||||
false,
|
||||
) if message.starts_with("Unhealthy provider on") => {
|
||||
Some(StatusInfo::deployed(""))
|
||||
}
|
||||
// don't update status if scaler is not deployed
|
||||
_ => None,
|
||||
} {
|
||||
*self.status.write().await = status;
|
||||
}
|
||||
|
||||
// only status needs update no new commands required
|
||||
Ok(Vec::new())
|
||||
}
|
||||
Event::ConfigSet(ConfigSet { config_name })
|
||||
if self.config.provider_config.contains(config_name) =>
|
||||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// No other event impacts the job of this scaler so we can ignore it
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(name = %self.config.model_name, scaler_id = %self.id))]
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
let hosts = self.store.list::<Host>(&self.config.lattice_id).await?;
|
||||
let provider_id = &self.config.provider_id;
|
||||
let provider_ref = &self.config.provider_reference;
|
||||
|
||||
let ineligible_hosts = compute_ineligible_hosts(
|
||||
&hosts,
|
||||
self.config
|
||||
.spread_config
|
||||
.spread
|
||||
.iter()
|
||||
.collect::<Vec<&Spread>>(),
|
||||
);
|
||||
// Remove any providers that are managed by this scaler and running on ineligible hosts
|
||||
let remove_ineligible: Vec<Command> = ineligible_hosts
|
||||
.iter()
|
||||
.filter_map(|(_host_id, host)| {
|
||||
if host
|
||||
.providers
|
||||
.get(&ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
})
|
||||
.is_some_and(|provider| {
|
||||
provider
|
||||
.annotations
|
||||
.get(SCALER_KEY)
|
||||
.is_some_and(|id| id == &self.id)
|
||||
})
|
||||
{
|
||||
Some(Command::StopProvider(StopProvider {
|
||||
provider_id: provider_id.to_owned(),
|
||||
host_id: host.id.to_string(),
|
||||
model_name: self.config.model_name.to_owned(),
|
||||
annotations: BTreeMap::default(),
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// If we found any providers running on ineligible hosts, remove them before
|
||||
// attempting to start new ones.
|
||||
if !remove_ineligible.is_empty() {
|
||||
let status = StatusInfo::reconciling(
|
||||
"Found providers running on ineligible hosts, removing them.",
|
||||
);
|
||||
trace!(?status, "Updating scaler status");
|
||||
*self.status.write().await = status;
|
||||
return Ok(remove_ineligible);
|
||||
}
|
||||
|
||||
let mut spread_status = vec![];
|
||||
|
||||
trace!(spread = ?self.config.spread_config.spread, ?provider_id, "Computing commands");
|
||||
let commands = self
|
||||
.config
|
||||
.spread_config
|
||||
.spread
|
||||
.iter()
|
||||
.flat_map(|spread| {
|
||||
let eligible_hosts = eligible_hosts(&hosts, spread);
|
||||
if !eligible_hosts.is_empty() {
|
||||
eligible_hosts
|
||||
.iter()
|
||||
// Filter out hosts that are already running this provider
|
||||
.filter_map(|(_host_id, host)| {
|
||||
let provider_on_host = host.providers.get(&ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
});
|
||||
match (provider_on_host, self.config.spread_config.instances) {
|
||||
// Spread instances set to 0 means we're cleaning up and should stop
|
||||
// running providers
|
||||
(Some(_), 0) => Some(Command::StopProvider(StopProvider {
|
||||
provider_id: provider_id.to_owned(),
|
||||
host_id: host.id.to_string(),
|
||||
model_name: self.config.model_name.to_owned(),
|
||||
annotations: spreadscaler_annotations(&spread.name, &self.id),
|
||||
})),
|
||||
// Whenever instances > 0, we should start a provider if it's not already running
|
||||
(None, _n) => Some(Command::StartProvider(StartProvider {
|
||||
reference: provider_ref.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
host_id: host.id.to_string(),
|
||||
model_name: self.config.model_name.to_owned(),
|
||||
annotations: spreadscaler_annotations(&spread.name, &self.id),
|
||||
config: self.config.provider_config.clone(),
|
||||
})),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Command>>()
|
||||
} else {
|
||||
// No hosts were eligible, so we can't attempt to add or remove providers
|
||||
trace!(?spread.name, "Found no eligible hosts for daemon scaler");
|
||||
spread_status.push(StatusInfo::failed(&format!(
|
||||
"Could not satisfy daemonscaler {} for {}, 0 eligible hosts found.",
|
||||
spread.name, self.config.provider_reference
|
||||
)));
|
||||
vec![]
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Command>>();
|
||||
|
||||
trace!(?commands, "Calculated commands for provider daemonscaler");
|
||||
|
||||
let status = match (spread_status.is_empty(), commands.is_empty()) {
|
||||
// No failures, no commands, scaler satisfied
|
||||
(true, true) => StatusInfo::deployed(""),
|
||||
// No failures, commands generated, scaler is reconciling
|
||||
(true, false) => {
|
||||
StatusInfo::reconciling(&format!("Scaling provider on {} host(s)", commands.len()))
|
||||
}
|
||||
// Failures occurred, scaler is in a failed state
|
||||
(false, _) => StatusInfo::failed(
|
||||
&spread_status
|
||||
.into_iter()
|
||||
.map(|s| s.message)
|
||||
.collect::<Vec<String>>()
|
||||
.join(" "),
|
||||
),
|
||||
};
|
||||
trace!(?status, "Updating scaler status");
|
||||
*self.status.write().await = status;
|
||||
|
||||
Ok(commands)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(name = %self.config.model_name))]
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
let mut config_clone = self.config.clone();
|
||||
config_clone.spread_config.instances = 0;
|
||||
|
||||
let cleanerupper = ProviderDaemonScaler {
|
||||
config: config_clone,
|
||||
store: self.store.clone(),
|
||||
id: self.id.clone(),
|
||||
status: RwLock::new(StatusInfo::reconciling("")),
|
||||
};
|
||||
|
||||
cleanerupper.reconcile().await
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadStore + Send + Sync> ProviderDaemonScaler<S> {
|
||||
/// Construct a new ProviderDaemonScaler with specified configuration values
|
||||
pub fn new(store: S, config: ProviderSpreadConfig, component_name: &str) -> Self {
|
||||
// Compute the id of this scaler based on all of the configuration values
|
||||
// that make it unique. This is used during upgrades to determine if a
|
||||
// scaler is the same as a previous one.
|
||||
let mut id_parts = vec![
|
||||
DAEMON_SCALER_KIND,
|
||||
&config.model_name,
|
||||
component_name,
|
||||
&config.provider_id,
|
||||
&config.provider_reference,
|
||||
];
|
||||
id_parts.extend(
|
||||
config
|
||||
.provider_config
|
||||
.iter()
|
||||
.map(std::string::String::as_str),
|
||||
);
|
||||
let id = compute_id_sha256(&id_parts);
|
||||
|
||||
// If no spreads are specified, an empty spread is sufficient to match _every_ host
|
||||
// in a lattice
|
||||
let spread_config = if config.spread_config.spread.is_empty() {
|
||||
SpreadScalerProperty {
|
||||
instances: config.spread_config.instances,
|
||||
spread: vec![Spread::default()],
|
||||
}
|
||||
} else {
|
||||
config.spread_config
|
||||
};
|
||||
Self {
|
||||
store,
|
||||
config: ProviderSpreadConfig {
|
||||
spread_config,
|
||||
..config
|
||||
},
|
||||
id,
|
||||
status: RwLock::new(StatusInfo::reconciling("")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::Utc;
|
||||
use wadm_types::{Spread, SpreadScalerProperty};
|
||||
|
||||
use crate::{
|
||||
commands::{Command, StartProvider},
|
||||
scaler::{spreadscaler::spreadscaler_annotations, Scaler},
|
||||
storage::{Host, Provider, Store},
|
||||
test_util::TestStore,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
const MODEL_NAME: &str = "test_provider_spreadscaler";
|
||||
|
||||
#[test]
|
||||
fn test_id_generator() {
|
||||
let config = ProviderSpreadConfig {
|
||||
lattice_id: "lattice".to_string(),
|
||||
provider_reference: "provider_ref".to_string(),
|
||||
provider_id: "provider_id".to_string(),
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
spread_config: SpreadScalerProperty {
|
||||
instances: 1,
|
||||
spread: vec![],
|
||||
},
|
||||
provider_config: vec![],
|
||||
};
|
||||
|
||||
let scaler1 =
|
||||
ProviderDaemonScaler::new(Arc::new(TestStore::default()), config, "myprovider");
|
||||
|
||||
let config = ProviderSpreadConfig {
|
||||
lattice_id: "lattice".to_string(),
|
||||
provider_reference: "provider_ref".to_string(),
|
||||
provider_id: "provider_id".to_string(),
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
spread_config: SpreadScalerProperty {
|
||||
instances: 1,
|
||||
spread: vec![],
|
||||
},
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
};
|
||||
|
||||
let scaler2 =
|
||||
ProviderDaemonScaler::new(Arc::new(TestStore::default()), config, "myprovider");
|
||||
assert_ne!(
|
||||
scaler1.id(),
|
||||
scaler2.id(),
|
||||
"ProviderDaemonScaler IDs should be different with different configuration"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_spread_on_multiple_hosts() -> Result<()> {
|
||||
let lattice_id = "provider_spread_multi_host";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::new(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
// instances are ignored so putting an absurd number
|
||||
instances: 12312,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderDaemonScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
let mut commands = spreadscaler.reconcile().await?;
|
||||
assert_eq!(commands.len(), 2);
|
||||
// Sort to enable predictable test
|
||||
commands.sort_unstable_by(|a, b| match (a, b) {
|
||||
(Command::StartProvider(a), Command::StartProvider(b)) => a.host_id.cmp(&b.host_id),
|
||||
_ => panic!("Should have been start providers"),
|
||||
});
|
||||
|
||||
let cmd_one = commands.first().cloned();
|
||||
match cmd_one {
|
||||
None => panic!("command should have existed"),
|
||||
Some(Command::StartProvider(start)) => {
|
||||
assert_eq!(
|
||||
start,
|
||||
StartProvider {
|
||||
reference: provider_ref.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
|
||||
config: vec!["foobar".to_string()],
|
||||
}
|
||||
);
|
||||
// This manual assertion is because we don't hash on annotations and I want to be extra sure we have the
|
||||
// correct ones
|
||||
assert_eq!(
|
||||
start.annotations,
|
||||
spreadscaler_annotations("SimpleOne", spreadscaler.id())
|
||||
)
|
||||
}
|
||||
Some(_other) => panic!("command should have been a start provider"),
|
||||
}
|
||||
|
||||
let cmd_two = commands.get(1).cloned();
|
||||
match cmd_two {
|
||||
None => panic!("command should have existed"),
|
||||
Some(Command::StartProvider(start)) => {
|
||||
assert_eq!(
|
||||
start,
|
||||
StartProvider {
|
||||
reference: provider_ref.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_two.to_string(),
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("SimpleTwo", spreadscaler.id()),
|
||||
config: vec!["foobar".to_string()],
|
||||
}
|
||||
);
|
||||
// This manual assertion is because we don't hash on annotations and I want to be extra sure we have the
|
||||
// correct ones
|
||||
assert_eq!(
|
||||
start.annotations,
|
||||
spreadscaler_annotations("SimpleOne", spreadscaler.id())
|
||||
)
|
||||
}
|
||||
Some(_other) => panic!("command should have been a start provider"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
|
||||
let lattice_id = "test_healthy_providers";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Failed),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
// instances are ignored so putting an absurd number
|
||||
instances: 2,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderDaemonScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckFailed(
|
||||
ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Pending),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckPassed(
|
||||
ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_two.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
spreadscaler.status.read().await.to_owned(),
|
||||
StatusInfo::deployed("")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
|
||||
let lattice_id = "test_unhealthy_providers";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Failed),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
// instances are ignored so putting an absurd number
|
||||
instances: 2,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderDaemonScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckFailed(
|
||||
ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
spreadscaler.status.read().await.to_owned(),
|
||||
StatusInfo::failed("Unhealthy provider on 1 host(s)")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
//! A struct that manages creating and removing scalers for all manifests
|
||||
|
||||
use std::{collections::HashMap, ops::Deref, sync::Arc, time::Duration};
|
||||
use std::{collections::HashMap, ops::Deref, sync::Arc};
|
||||
|
||||
use anyhow::Result;
|
||||
use async_nats::jetstream::{
|
||||
|
|
@ -17,27 +17,20 @@ use tokio::{
|
|||
task::JoinHandle,
|
||||
};
|
||||
use tracing::{debug, error, instrument, trace, warn};
|
||||
use wadm_types::{
|
||||
api::{Status, StatusInfo},
|
||||
Manifest,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
events::Event,
|
||||
model::{
|
||||
Component, Manifest, Properties, SpreadScalerProperty, Trait, TraitProperty, LINKDEF_TRAIT,
|
||||
SPREADSCALER_TRAIT,
|
||||
},
|
||||
publisher::Publisher,
|
||||
scaler::{spreadscaler::ActorSpreadScaler, Command, Scaler},
|
||||
storage::ReadStore,
|
||||
workers::{CommandPublisher, LinkSource},
|
||||
DEFAULT_LINK_NAME,
|
||||
scaler::{Command, Scaler},
|
||||
storage::{snapshot::SnapshotStore, ReadStore},
|
||||
workers::{CommandPublisher, ConfigSource, LinkSource, SecretSource, StatusPublisher},
|
||||
};
|
||||
|
||||
use super::{
|
||||
spreadscaler::{
|
||||
link::LinkScaler,
|
||||
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
|
||||
},
|
||||
BackoffAwareScaler,
|
||||
};
|
||||
use super::convert::manifest_components_to_scalers;
|
||||
|
||||
pub type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
|
||||
pub type ScalerList = Vec<BoxedScaler>;
|
||||
|
|
@ -113,9 +106,9 @@ pub struct ScalerManager<StateStore, P: Clone, L: Clone> {
|
|||
client: P,
|
||||
subject: String,
|
||||
lattice_id: String,
|
||||
state_store: StateStore,
|
||||
command_publisher: CommandPublisher<P>,
|
||||
link_getter: L,
|
||||
status_publisher: StatusPublisher<P>,
|
||||
snapshot_data: SnapshotStore<StateStore, L>,
|
||||
}
|
||||
|
||||
impl<StateStore, P: Clone, L: Clone> Drop for ScalerManager<StateStore, P, L> {
|
||||
|
|
@ -130,7 +123,7 @@ impl<StateStore, P, L> ScalerManager<StateStore, P, L>
|
|||
where
|
||||
StateStore: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
/// Creates a new ScalerManager configured to notify messages to `wadm.notify.{lattice_id}`
|
||||
/// using the given jetstream client. Also creates an ephemeral consumer for notifications on
|
||||
|
|
@ -144,6 +137,7 @@ where
|
|||
state_store: StateStore,
|
||||
manifest_store: KvStore,
|
||||
command_publisher: CommandPublisher<P>,
|
||||
status_publisher: StatusPublisher<P>,
|
||||
link_getter: L,
|
||||
) -> Result<ScalerManager<StateStore, P, L>> {
|
||||
// Create the consumer first so that we can make sure we don't miss anything during the
|
||||
|
|
@ -176,41 +170,49 @@ where
|
|||
.list(multitenant_prefix, lattice_id)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|summary| manifest_store.get(multitenant_prefix, lattice_id, summary.name));
|
||||
.map(|summary| {
|
||||
manifest_store.get(multitenant_prefix, lattice_id, summary.name().to_owned())
|
||||
});
|
||||
let all_manifests = futures::future::join_all(futs)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|manifest| manifest.transpose())
|
||||
.map(|res| res.map(|(manifest, _)| manifest))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
let snapshot_data = SnapshotStore::new(
|
||||
state_store.clone(),
|
||||
link_getter.clone(),
|
||||
lattice_id.to_owned(),
|
||||
);
|
||||
let scalers: HashMap<String, ScalerList> = all_manifests
|
||||
.into_iter()
|
||||
.filter_map(|manifest| {
|
||||
let data = manifest.get_deployed()?;
|
||||
let name = manifest.name().to_owned();
|
||||
let scalers = components_to_scalers(
|
||||
let scalers = manifest_components_to_scalers(
|
||||
&data.spec.components,
|
||||
&state_store,
|
||||
&data.policy_lookup(),
|
||||
lattice_id,
|
||||
&client,
|
||||
&name,
|
||||
&subject,
|
||||
&link_getter,
|
||||
&client,
|
||||
&snapshot_data,
|
||||
);
|
||||
Some((name, scalers))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let scalers = Arc::new(RwLock::new(scalers));
|
||||
|
||||
let mut manager = ScalerManager {
|
||||
handle: None,
|
||||
scalers,
|
||||
client,
|
||||
subject,
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
state_store,
|
||||
command_publisher,
|
||||
link_getter,
|
||||
status_publisher,
|
||||
snapshot_data,
|
||||
};
|
||||
let cloned = manager.clone();
|
||||
let handle = tokio::spawn(async move { cloned.notify(messages).await });
|
||||
|
|
@ -226,20 +228,32 @@ where
|
|||
lattice_id: &str,
|
||||
state_store: StateStore,
|
||||
command_publisher: CommandPublisher<P>,
|
||||
status_publisher: StatusPublisher<P>,
|
||||
link_getter: L,
|
||||
) -> ScalerManager<StateStore, P, L> {
|
||||
let snapshot_data = SnapshotStore::new(
|
||||
state_store.clone(),
|
||||
link_getter.clone(),
|
||||
lattice_id.to_owned(),
|
||||
);
|
||||
ScalerManager {
|
||||
handle: None,
|
||||
scalers: Arc::new(RwLock::new(HashMap::new())),
|
||||
client,
|
||||
subject: format!("{WADM_NOTIFY_PREFIX}.{lattice_id}"),
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
state_store,
|
||||
command_publisher,
|
||||
link_getter,
|
||||
status_publisher,
|
||||
snapshot_data,
|
||||
}
|
||||
}
|
||||
|
||||
/// Refreshes the snapshot data consumed by all scalers. This is a temporary workaround until we
|
||||
/// start caching data
|
||||
pub(crate) async fn refresh_data(&self) -> Result<()> {
|
||||
self.snapshot_data.refresh().await
|
||||
}
|
||||
|
||||
/// Adds scalers for the given manifest. Emitting an event to notify other wadm processes that
|
||||
/// they should create them as well. Only returns an error if it can't notify. Returns the
|
||||
/// scaler list for immediate use in reconciliation
|
||||
|
|
@ -247,8 +261,11 @@ where
|
|||
/// This only constructs the scalers and doesn't reconcile. The returned [`Scalers`] type can be
|
||||
/// used to set this model to backoff mode
|
||||
#[instrument(level = "trace", skip_all, fields(name = %manifest.metadata.name, lattice_id = %self.lattice_id))]
|
||||
pub async fn add_scalers<'a>(&'a self, manifest: &'a Manifest) -> Result<Scalers> {
|
||||
let scalers = self.scalers_for_manifest(manifest);
|
||||
pub async fn add_scalers<'a>(
|
||||
&'a self,
|
||||
manifest: &'a Manifest,
|
||||
scalers: ScalerList,
|
||||
) -> Result<Scalers> {
|
||||
self.add_raw_scalers(&manifest.metadata.name, scalers).await;
|
||||
let notification = serde_json::to_vec(&Notifications::CreateScalers(manifest.to_owned()))?;
|
||||
self.client
|
||||
|
|
@ -263,14 +280,14 @@ where
|
|||
}
|
||||
|
||||
pub fn scalers_for_manifest<'a>(&'a self, manifest: &'a Manifest) -> ScalerList {
|
||||
components_to_scalers(
|
||||
manifest_components_to_scalers(
|
||||
&manifest.spec.components,
|
||||
&self.state_store,
|
||||
&manifest.policy_lookup(),
|
||||
&self.lattice_id,
|
||||
&self.client,
|
||||
&manifest.metadata.name,
|
||||
&self.subject,
|
||||
&self.link_getter,
|
||||
&self.client,
|
||||
&self.snapshot_data,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -315,14 +332,17 @@ where
|
|||
/// notification or handling commands fails, then this function will reinsert the scalers back into the internal map
|
||||
/// and return an error (so this function can be called again)
|
||||
// NOTE(thomastaylor312): This was designed the way it is to avoid race conditions. We only ever
|
||||
// stop actors and providers that have the right annotation. So if for some reason this leaves
|
||||
// something hanging, we should probably add something to the reaper
|
||||
// stop components and providers that have the right annotation. So if for some reason this
|
||||
// leaves something hanging, we should probably add something to the reaper
|
||||
#[instrument(level = "debug", skip(self), fields(lattice_id = %self.lattice_id))]
|
||||
pub async fn remove_scalers(&self, name: &str) -> Option<Result<()>> {
|
||||
let scalers = match self.remove_scalers_internal(name).await {
|
||||
Some(Ok(s)) => s,
|
||||
Some(Err(e)) => return Some(Err(e)),
|
||||
None => return None,
|
||||
Some(Ok(s)) => Some(s),
|
||||
Some(Err(e)) => {
|
||||
warn!(err = ?e, "Error when running cleanup steps for scalers. Operation will be retried");
|
||||
return Some(Err(e));
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
// SAFETY: This is entirely data in our control and should be safe to unwrap
|
||||
|
|
@ -335,7 +355,9 @@ where
|
|||
.await
|
||||
{
|
||||
error!(error = %e, "Unable to publish notification");
|
||||
self.scalers.write().await.insert(name.to_owned(), scalers);
|
||||
if let Some(scalers) = scalers {
|
||||
self.scalers.write().await.insert(name.to_owned(), scalers);
|
||||
}
|
||||
Some(Err(e))
|
||||
} else {
|
||||
Some(Ok(()))
|
||||
|
|
@ -357,17 +379,29 @@ where
|
|||
/// Does everything except sending the notification
|
||||
#[instrument(level = "debug", skip(self), fields(lattice_id = %self.lattice_id))]
|
||||
async fn remove_scalers_internal(&self, name: &str) -> Option<Result<ScalerList>> {
|
||||
// Remove the scalers first to avoid them handling events while we're cleaning up
|
||||
let scalers = self.remove_raw_scalers(name).await?;
|
||||
let commands =
|
||||
match futures::future::join_all(scalers.iter().map(|scaler| scaler.cleanup()))
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<Vec<Command>>, anyhow::Error>>()
|
||||
.map(|all| all.into_iter().flatten().collect::<Vec<Command>>())
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => return Some(Err(e)),
|
||||
};
|
||||
|
||||
// Always refresh data before cleaning up
|
||||
if let Err(e) = self.refresh_data().await {
|
||||
return Some(Err(e));
|
||||
}
|
||||
let commands = match futures::future::join_all(
|
||||
scalers.iter().map(|scaler| scaler.cleanup()),
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<Vec<Command>>, anyhow::Error>>()
|
||||
.map(|all| all.into_iter().flatten().collect::<Vec<Command>>())
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
warn!(err = ?e, "Error when running cleanup steps for scalers. Operation will be retried");
|
||||
// Put the scalers back into the map so we can run cleanup again on retry
|
||||
self.scalers.write().await.insert(name.to_owned(), scalers);
|
||||
return Some(Err(e));
|
||||
}
|
||||
};
|
||||
trace!(?commands, "Publishing cleanup commands");
|
||||
if let Err(e) = self.command_publisher.publish_commands(commands).await {
|
||||
error!(error = %e, "Unable to publish cleanup commands");
|
||||
|
|
@ -396,14 +430,14 @@ where
|
|||
match notification {
|
||||
Notifications::CreateScalers(manifest) => {
|
||||
// We don't want to trigger the notification, so just create the scalers and then insert
|
||||
let scalers = components_to_scalers(
|
||||
let scalers = manifest_components_to_scalers(
|
||||
&manifest.spec.components,
|
||||
&self.state_store,
|
||||
&manifest.policy_lookup(),
|
||||
&self.lattice_id,
|
||||
&self.client,
|
||||
&manifest.metadata.name,
|
||||
&self.subject,
|
||||
&self.link_getter,
|
||||
&self.client,
|
||||
&self.snapshot_data,
|
||||
);
|
||||
let num_scalers = scalers.len();
|
||||
self.add_raw_scalers(&manifest.metadata.name, scalers).await;
|
||||
|
|
@ -412,8 +446,25 @@ where
|
|||
Notifications::DeleteScalers(name) => {
|
||||
trace!(%name, "Removing scalers for manifest");
|
||||
match self.remove_scalers_internal(&name).await {
|
||||
Some(Ok(_)) => {
|
||||
trace!(%name, "Successfully removed scalers for manifest")
|
||||
Some(Ok(_)) | None => {
|
||||
trace!(%name, "Removed manifests or manifests were already removed");
|
||||
// NOTE(thomastaylor312): We publish the undeployed
|
||||
// status here after we remove scalers. All wadm
|
||||
// instances will receive this event (even the one that
|
||||
// initially deleted it) and so it made more sense to
|
||||
// publish the status here so we don't get any stray
|
||||
// reconciling status messages from a wadm instance that
|
||||
// hasn't deleted the scaler yet
|
||||
if let Err(e) = self
|
||||
.status_publisher
|
||||
.publish_status(&name, Status::new(
|
||||
StatusInfo::undeployed("Manifest has been undeployed"),
|
||||
Vec::with_capacity(0),
|
||||
))
|
||||
.await
|
||||
{
|
||||
warn!(error = ?e, "Failed to set status to undeployed");
|
||||
}
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
error!(error = %e, %name, "Error when running cleanup steps for scalers. Nacking notification");
|
||||
|
|
@ -423,9 +474,6 @@ where
|
|||
continue;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
debug!(%name, "Scalers don't exist or were already deleted");
|
||||
}
|
||||
}
|
||||
// NOTE(thomastaylor312): We could find that this strategy actually
|
||||
// doesn't tear down everything or leaves something hanging. If that is
|
||||
|
|
@ -443,7 +491,7 @@ where
|
|||
// wrapped ones (which is good from a Rust API point of view). If
|
||||
// this starts to become a problem, we can revisit how we handle
|
||||
// this (probably by requiring that this struct always wraps any
|
||||
// scaler in the backoff scaler and using custom methods from that
|
||||
// scaler in the backoff wrapper and using custom methods from that
|
||||
// type)
|
||||
Notifications::RegisterExpectedEvents{ name, scaler_id, triggering_event } => {
|
||||
trace!(%name, "Computing and registering expected events for manifest");
|
||||
|
|
@ -505,151 +553,3 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
|
||||
|
||||
/// Converts a list of components into a list of scalers
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `store` - The store to use when creating the scalers so they can access lattice state
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `name` - The name of the manifest that the scalers are being created for
|
||||
pub(crate) fn components_to_scalers<S, P, L>(
|
||||
components: &[Component],
|
||||
store: &S,
|
||||
lattice_id: &str,
|
||||
notifier: &P,
|
||||
name: &str,
|
||||
notifier_subject: &str,
|
||||
link_getter: &L,
|
||||
) -> ScalerList
|
||||
where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
let mut scalers: ScalerList = Vec::new();
|
||||
for component in components.iter() {
|
||||
let traits = component.traits.as_ref();
|
||||
match &component.properties {
|
||||
Properties::Actor { properties: props } => {
|
||||
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
|
||||
match (trt.trait_type.as_str(), &trt.properties) {
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
|
||||
Some(Box::new(BackoffAwareScaler::new(
|
||||
ActorSpreadScaler::new(
|
||||
store.clone(),
|
||||
props.image.to_owned(),
|
||||
lattice_id.to_owned(),
|
||||
name.to_owned(),
|
||||
p.to_owned(),
|
||||
&component.name,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
notifier_subject,
|
||||
name,
|
||||
None,
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(LINKDEF_TRAIT, TraitProperty::Linkdef(p)) => {
|
||||
components
|
||||
.iter()
|
||||
.find_map(|component| match &component.properties {
|
||||
Properties::Capability { properties: cappy }
|
||||
if component.name == p.target =>
|
||||
{
|
||||
Some(Box::new(BackoffAwareScaler::new(
|
||||
LinkScaler::new(
|
||||
store.clone(),
|
||||
props.image.to_owned(),
|
||||
cappy.image.to_owned(),
|
||||
cappy.contract.to_owned(),
|
||||
cappy.link_name.to_owned(),
|
||||
lattice_id.to_owned(),
|
||||
name.to_owned(),
|
||||
p.values.to_owned(),
|
||||
link_getter.clone(),
|
||||
),
|
||||
notifier.to_owned(),
|
||||
notifier_subject,
|
||||
name,
|
||||
None,
|
||||
))
|
||||
as BoxedScaler)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}))
|
||||
}
|
||||
Properties::Capability { properties: props } => {
|
||||
if let Some(traits) = traits {
|
||||
scalers.extend(traits.iter().filter_map(|trt| {
|
||||
match (trt.trait_type.as_str(), &trt.properties) {
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
|
||||
Some(Box::new(BackoffAwareScaler::new(
|
||||
ProviderSpreadScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_reference: props.image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
provider_contract_id: props.contract.to_owned(),
|
||||
provider_link_name: props
|
||||
.link_name
|
||||
.as_deref()
|
||||
.unwrap_or(DEFAULT_LINK_NAME)
|
||||
.to_owned(),
|
||||
model_name: name.to_owned(),
|
||||
provider_config: props.config.to_owned(),
|
||||
},
|
||||
&component.name,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
notifier_subject,
|
||||
name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}))
|
||||
} else {
|
||||
// Allow providers to omit the scaler entirely for simplicity
|
||||
scalers.push(Box::new(BackoffAwareScaler::new(
|
||||
ProviderSpreadScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_reference: props.image.to_owned(),
|
||||
spread_config: SpreadScalerProperty {
|
||||
replicas: 1,
|
||||
spread: vec![],
|
||||
},
|
||||
provider_contract_id: props.contract.to_owned(),
|
||||
provider_link_name: props
|
||||
.link_name
|
||||
.as_deref()
|
||||
.unwrap_or(DEFAULT_LINK_NAME)
|
||||
.to_owned(),
|
||||
model_name: name.to_owned(),
|
||||
provider_config: props.config.to_owned(),
|
||||
},
|
||||
&component.name,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
notifier_subject,
|
||||
name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
scalers
|
||||
}
|
||||
|
|
@ -2,30 +2,36 @@ use std::{sync::Arc, time::Duration};
|
|||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use sha2::{Digest, Sha256};
|
||||
use tokio::{
|
||||
sync::{Mutex, RwLock},
|
||||
task::JoinHandle,
|
||||
};
|
||||
use tracing::{instrument, trace, Instrument};
|
||||
use tracing::{error, instrument, trace, Instrument};
|
||||
use wadm_types::{api::StatusInfo, TraitProperty};
|
||||
|
||||
use crate::{
|
||||
commands::Command,
|
||||
events::{
|
||||
ActorsStartFailed, ActorsStarted, ActorsStopped, Event, Linkdef, LinkdefSet,
|
||||
ProviderStartFailed, ProviderStarted,
|
||||
},
|
||||
model::TraitProperty,
|
||||
events::{ComponentScaleFailed, ComponentScaled, Event, ProviderStartFailed, ProviderStarted},
|
||||
publisher::Publisher,
|
||||
server::StatusInfo,
|
||||
workers::{get_commands_and_result, ConfigSource, SecretSource},
|
||||
};
|
||||
|
||||
pub mod configscaler;
|
||||
mod convert;
|
||||
pub mod daemonscaler;
|
||||
pub mod manager;
|
||||
mod simplescaler;
|
||||
pub mod secretscaler;
|
||||
pub mod spreadscaler;
|
||||
pub mod statusscaler;
|
||||
|
||||
use manager::Notifications;
|
||||
|
||||
use self::configscaler::ConfigScaler;
|
||||
use self::secretscaler::SecretScaler;
|
||||
|
||||
const DEFAULT_WAIT_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
const DEFAULT_SCALER_KIND: &str = "Scaler";
|
||||
|
||||
/// A trait describing a struct that can be configured to compute the difference between
|
||||
/// desired state and configured state, returning a set of commands to approach desired state.
|
||||
|
|
@ -40,11 +46,23 @@ const DEFAULT_WAIT_TIMEOUT: Duration = Duration::from_secs(30);
|
|||
#[async_trait]
|
||||
pub trait Scaler {
|
||||
/// A unique identifier for this scaler type. This is used for logging and for selecting
|
||||
/// specific scalers as needed. Generally this should be something like
|
||||
/// `$NAME_OF_SCALER_TYPE-$MODEL_NAME-$OCI_REF`. However, the only requirement is that it can
|
||||
/// uniquely identify a scaler
|
||||
/// specific scalers as needed. wadm scalers implement this by computing a sha256 hash of
|
||||
/// all of the parameters that are used to construct the scaler, therefore ensuring that
|
||||
/// the ID is unique for each scaler
|
||||
fn id(&self) -> &str;
|
||||
|
||||
/// An optional human-friendly name for this scaler. This is used for logging and for selecting
|
||||
/// specific scalers as needed. This is optional and by default returns the same value as `id`,
|
||||
/// and does not have to be unique
|
||||
fn name(&self) -> String {
|
||||
self.id().to_string()
|
||||
}
|
||||
|
||||
/// An optional kind of scaler. This is used for logging and for selecting specific scalers as needed
|
||||
fn kind(&self) -> &str {
|
||||
DEFAULT_SCALER_KIND
|
||||
}
|
||||
|
||||
/// Determine the status of this scaler according to reconciliation logic. This is the opportunity
|
||||
/// for scalers to indicate that they are unhealthy with a message as to what's missing.
|
||||
async fn status(&self) -> StatusInfo;
|
||||
|
|
@ -70,23 +88,36 @@ pub trait Scaler {
|
|||
async fn cleanup(&self) -> Result<Vec<Command>>;
|
||||
}
|
||||
|
||||
/// The BackoffAwareScaler is a wrapper around a scaler that is responsible for
|
||||
/// computing a proper backoff in terms of `expected_events` for the scaler based
|
||||
/// on its commands. When the BackoffAwareScaler handles events that it's expecting,
|
||||
/// it does not compute new commands and instead removes them from the list.
|
||||
/// The BackoffWrapper is a wrapper around a scaler that is responsible for
|
||||
/// ensuring that a particular scaler doesn't get overwhelmed with events and has the
|
||||
/// necessary prerequisites to reconcile.
|
||||
///
|
||||
/// This effectively allows the inner Scaler to only worry about the logic around
|
||||
/// 1. `required_config` & `required_secrets`: With the introduction of configuration
|
||||
/// for wadm applications, the most necessary prerequisite for components, providers
|
||||
/// and links to start is that their configuration is available. Scalers will not be
|
||||
/// able to issue commands until the configuration exists.
|
||||
/// 2. `expected_events`: For scalers that issue commands that should result in events,
|
||||
/// the BackoffWrapper is responsible for ensuring that the scaler doesn't continually
|
||||
/// issue commands that it's already expecting events for. Commonly this will allow a host
|
||||
/// to download larger images from an OCI repository without being bombarded with repeat requests.
|
||||
/// 3. `backoff_status`: If a scaler receives an event that it was expecting, but it was a failure
|
||||
/// event, the scaler should back off exponentially while reporting that failure status. This both
|
||||
/// allows for diagnosing issues with reconciliation and prevents thrashing.
|
||||
///
|
||||
/// All of the above effectively allows the inner Scaler to only worry about the logic around
|
||||
/// reconciling and handling events, rather than be concerned about whether or not
|
||||
/// it should handle a specific event, if it's causing jitter, overshoot, etc.
|
||||
///
|
||||
/// The `notifier` is used to publish notifications to add, remove, or recompute
|
||||
/// expected events with scalers on other wadm instances, as only one wadm instance
|
||||
/// at a time will handle a specific event.
|
||||
pub(crate) struct BackoffAwareScaler<T, P> {
|
||||
pub(crate) struct BackoffWrapper<T, P, C> {
|
||||
scaler: T,
|
||||
notifier: P,
|
||||
notify_subject: String,
|
||||
model_name: String,
|
||||
required_config: Vec<ConfigScaler<C>>,
|
||||
required_secrets: Vec<SecretScaler<C>>,
|
||||
/// A list of (success, Option<failure>) events that the scaler is expecting
|
||||
#[allow(clippy::type_complexity)]
|
||||
expected_events: Arc<RwLock<Vec<(Event, Option<Event>)>>>,
|
||||
|
|
@ -94,18 +125,27 @@ pub(crate) struct BackoffAwareScaler<T, P> {
|
|||
event_cleaner: Mutex<Option<JoinHandle<()>>>,
|
||||
/// The amount of time to wait before cleaning up the expected events list
|
||||
cleanup_timeout: std::time::Duration,
|
||||
/// The status of the scaler, set when the scaler is backing off due to a
|
||||
/// failure event.
|
||||
backoff_status: Arc<RwLock<Option<StatusInfo>>>,
|
||||
// TODO(#253): Figure out where/when/how to store the backoff and exponentially repeat it
|
||||
/// Responsible for cleaning up the backoff status after a specified duration
|
||||
status_cleaner: Mutex<Option<JoinHandle<()>>>,
|
||||
}
|
||||
|
||||
impl<T, P> BackoffAwareScaler<T, P>
|
||||
impl<T, P, C> BackoffWrapper<T, P, C>
|
||||
where
|
||||
T: Scaler + Send + Sync,
|
||||
P: Publisher + Send + Sync + 'static,
|
||||
C: ConfigSource + SecretSource + Send + Sync + Clone + 'static,
|
||||
{
|
||||
/// Wraps the given scaler in a new backoff aware scaler. `cleanup_timeout` can be set to a
|
||||
/// Wraps the given scaler in a new BackoffWrapper. `cleanup_timeout` can be set to a
|
||||
/// desired waiting time, otherwise it will default to 30s
|
||||
pub fn new(
|
||||
scaler: T,
|
||||
notifier: P,
|
||||
required_config: Vec<ConfigScaler<C>>,
|
||||
required_secrets: Vec<SecretScaler<C>>,
|
||||
notify_subject: &str,
|
||||
model_name: &str,
|
||||
cleanup_timeout: Option<Duration>,
|
||||
|
|
@ -113,11 +153,15 @@ where
|
|||
Self {
|
||||
scaler,
|
||||
notifier,
|
||||
required_config,
|
||||
required_secrets,
|
||||
notify_subject: notify_subject.to_owned(),
|
||||
model_name: model_name.to_string(),
|
||||
expected_events: Arc::new(RwLock::new(Vec::new())),
|
||||
event_cleaner: Mutex::new(None),
|
||||
cleanup_timeout: cleanup_timeout.unwrap_or(DEFAULT_WAIT_TIMEOUT),
|
||||
backoff_status: Arc::new(RwLock::new(None)),
|
||||
status_cleaner: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -139,25 +183,32 @@ where
|
|||
expected_events.clear();
|
||||
}
|
||||
expected_events.extend(events);
|
||||
self.set_timed_cleanup().await;
|
||||
self.set_timed_event_cleanup().await;
|
||||
}
|
||||
|
||||
/// Removes an event pair from the expected events list if one matches the given event
|
||||
/// Returns true if the event was removed, false otherwise
|
||||
async fn remove_event(&self, event: &Event) -> Result<bool> {
|
||||
/// Returns a tuple of bools, the first indicating if the event was removed, and the second
|
||||
/// indicating if the event was the failure event
|
||||
async fn remove_event(&self, event: &Event) -> Result<(bool, bool)> {
|
||||
let mut expected_events = self.expected_events.write().await;
|
||||
let before_count = expected_events.len();
|
||||
|
||||
let mut failed_event = false;
|
||||
|
||||
expected_events.retain(|(success, fail)| {
|
||||
// Retain the event if it doesn't match either the success or optional failure event.
|
||||
// Most events have a possibility of seeing a failure and either one means we saw the
|
||||
// event we were expecting
|
||||
!evt_matches_expected(success, event)
|
||||
&& !fail
|
||||
.as_ref()
|
||||
.map(|f| evt_matches_expected(f, event))
|
||||
.unwrap_or(false)
|
||||
let matches_success = evt_matches_expected(success, event);
|
||||
let matches_failure = fail
|
||||
.as_ref()
|
||||
.map_or(false, |f| evt_matches_expected(f, event));
|
||||
|
||||
// Update failed_event if the event matches the failure event
|
||||
failed_event |= matches_failure;
|
||||
|
||||
// Retain the event if it doesn't match either the success or failure event
|
||||
!(matches_success || matches_failure)
|
||||
});
|
||||
Ok(expected_events.len() != before_count)
|
||||
|
||||
Ok((expected_events.len() < before_count, failed_event))
|
||||
}
|
||||
|
||||
/// Handles an incoming event for the given scaler.
|
||||
|
|
@ -179,8 +230,24 @@ where
|
|||
#[instrument(level = "trace", skip_all, fields(scaler_id = %self.id()))]
|
||||
async fn handle_event_internal(&self, event: &Event) -> anyhow::Result<Vec<Command>> {
|
||||
let model_name = &self.model_name;
|
||||
let commands: Vec<Command> = if self.remove_event(event).await? {
|
||||
trace!("Scaler received event that it was expecting");
|
||||
let (expected_event, failed_event) = self.remove_event(event).await?;
|
||||
let commands: Vec<Command> = if expected_event {
|
||||
// So here, if we receive a failed event that it was "expecting"
|
||||
// Then we know that the scaler status is essentially failed and should retry
|
||||
// So we should tell the other scalers to remove the event, AND other scalers
|
||||
// in the process of removing that event will know that it failed.
|
||||
trace!(failed_event, "Scaler received event that it was expecting");
|
||||
if failed_event {
|
||||
let failed_message = match event {
|
||||
Event::ProviderStartFailed(evt) => evt.error.clone(),
|
||||
Event::ComponentScaleFailed(evt) => evt.error.clone(),
|
||||
_ => format!("Received a failed event of type '{}'", event.raw_type()),
|
||||
};
|
||||
*self.backoff_status.write().await = Some(StatusInfo::failed(&failed_message));
|
||||
// TODO(#253): Here we could refer to a stored previous duration and increase it
|
||||
self.set_timed_status_cleanup(std::time::Duration::from_secs(5))
|
||||
.await;
|
||||
}
|
||||
let data = serde_json::to_vec(&Notifications::RemoveExpectedEvent {
|
||||
name: model_name.to_owned(),
|
||||
scaler_id: self.scaler.id().to_owned(),
|
||||
|
|
@ -197,18 +264,58 @@ where
|
|||
// If a scaler is expecting events still, don't have it handle events. This is effectively
|
||||
// the backoff mechanism within wadm
|
||||
Vec::with_capacity(0)
|
||||
} else if self.backoff_status.read().await.is_some() {
|
||||
trace!("Scaler received event but is in backoff, ignoring");
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
trace!("Scaler is not backing off, handling event");
|
||||
trace!("Scaler is not backing off, checking configuration");
|
||||
let (mut config_commands, res) = get_commands_and_result(
|
||||
self.required_config
|
||||
.iter()
|
||||
.map(|config| async { config.handle_event(event).await }),
|
||||
"Errors occurred while handling event with config scalers",
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
error!(
|
||||
"Error occurred while handling event with config scalers: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
let (mut secret_commands, res) = get_commands_and_result(
|
||||
self.required_secrets
|
||||
.iter()
|
||||
.map(|secret| async { secret.handle_event(event).await }),
|
||||
"Errors occurred while handling event with secret scalers",
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
error!(
|
||||
"Error occurred while handling event with secret scalers: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
// If the config scalers or secret scalers have commands to send, return them
|
||||
if !config_commands.is_empty() || !secret_commands.is_empty() {
|
||||
config_commands.append(&mut secret_commands);
|
||||
return Ok(config_commands);
|
||||
}
|
||||
|
||||
trace!("Scaler required configuration is present, handling event");
|
||||
let commands = self.scaler.handle_event(event).await?;
|
||||
|
||||
// Based on the commands, compute the events that we expect to see for this scaler. The scaler
|
||||
// will then ignore incoming events until all of the expected events have been received.
|
||||
let expected_events = commands
|
||||
.iter()
|
||||
.filter_map(|cmd| cmd.corresponding_event(model_name));
|
||||
let expected_events = commands.iter().filter_map(|cmd| cmd.corresponding_event());
|
||||
|
||||
self.add_events(expected_events, false).await;
|
||||
|
||||
// Only let other scalers know if we generated commands to take
|
||||
if !commands.is_empty() {
|
||||
if !self.expected_events.read().await.is_empty() {
|
||||
trace!("Scaler generated commands, notifying other scalers to register expected events");
|
||||
let data = serde_json::to_vec(&Notifications::RegisterExpectedEvents {
|
||||
name: model_name.to_owned(),
|
||||
|
|
@ -220,8 +327,6 @@ where
|
|||
.publish(data, Some(&self.notify_subject))
|
||||
.await?;
|
||||
}
|
||||
|
||||
self.add_events(expected_events, false).await;
|
||||
commands
|
||||
};
|
||||
|
||||
|
|
@ -233,28 +338,58 @@ where
|
|||
// If we're already in backoff, return an empty list
|
||||
let current_event_count = self.event_count().await;
|
||||
if current_event_count > 0 {
|
||||
trace!(%current_event_count, "Scaler is backing off, not reconciling");
|
||||
trace!(%current_event_count, "Scaler is awaiting an event, not reconciling");
|
||||
return Ok(Vec::with_capacity(0));
|
||||
}
|
||||
if self.backoff_status.read().await.is_some() {
|
||||
tracing::info!(%current_event_count, "Scaler is backing off, not reconciling");
|
||||
return Ok(Vec::with_capacity(0));
|
||||
}
|
||||
|
||||
let mut commands = Vec::new();
|
||||
for config in &self.required_config {
|
||||
config.reconcile().await?.into_iter().for_each(|cmd| {
|
||||
commands.push(cmd);
|
||||
});
|
||||
}
|
||||
|
||||
let mut secret_commands = Vec::new();
|
||||
for secret in &self.required_secrets {
|
||||
secret.reconcile().await?.into_iter().for_each(|cmd| {
|
||||
secret_commands.push(cmd);
|
||||
});
|
||||
}
|
||||
commands.append(secret_commands.as_mut());
|
||||
|
||||
if !commands.is_empty() {
|
||||
return Ok(commands);
|
||||
}
|
||||
|
||||
match self.scaler.reconcile().await {
|
||||
// "Back off" scaler with expected corresponding events if the scaler generated commands
|
||||
Ok(commands) if !commands.is_empty() => {
|
||||
trace!("Reconcile generated commands, notifying other scalers to register expected events");
|
||||
let data = serde_json::to_vec(&Notifications::RegisterExpectedEvents {
|
||||
name: self.model_name.to_owned(),
|
||||
scaler_id: self.scaler.id().to_owned(),
|
||||
triggering_event: None,
|
||||
})?;
|
||||
self.notifier
|
||||
.publish(data, Some(&self.notify_subject))
|
||||
.await?;
|
||||
// Generate expected events
|
||||
self.add_events(
|
||||
commands
|
||||
.iter()
|
||||
.filter_map(|command| command.corresponding_event(&self.model_name)),
|
||||
.filter_map(|command| command.corresponding_event()),
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
|
||||
if !self.expected_events.read().await.is_empty() {
|
||||
trace!("Reconcile generated expected events, notifying other scalers to register expected events");
|
||||
let data = serde_json::to_vec(&Notifications::RegisterExpectedEvents {
|
||||
name: self.model_name.to_owned(),
|
||||
scaler_id: self.scaler.id().to_owned(),
|
||||
triggering_event: None,
|
||||
})?;
|
||||
self.notifier
|
||||
.publish(data, Some(&self.notify_subject))
|
||||
.await?;
|
||||
return Ok(commands);
|
||||
}
|
||||
|
||||
Ok(commands)
|
||||
}
|
||||
Ok(commands) => {
|
||||
|
|
@ -265,8 +400,35 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
async fn cleanup_internal(&self) -> Result<Vec<Command>> {
|
||||
let mut commands = self.scaler.cleanup().await.unwrap_or_default();
|
||||
for config in self.required_config.iter() {
|
||||
match config.cleanup().await {
|
||||
Ok(cmds) => commands.extend(cmds),
|
||||
// Explicitly logging, but continuing, in the case of an error to make sure
|
||||
// we don't prevent other cleanup tasks from running
|
||||
Err(e) => {
|
||||
error!("Error occurred while cleaning up config scalers: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for secret in self.required_secrets.iter() {
|
||||
match secret.cleanup().await {
|
||||
Ok(cmds) => commands.extend(cmds),
|
||||
// Explicitly logging, but continuing, in the case of an error to make sure
|
||||
// we don't prevent other cleanup tasks from running
|
||||
Err(e) => {
|
||||
error!("Error occurred while cleaning up secret scalers: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(commands)
|
||||
}
|
||||
|
||||
/// Sets a timed cleanup task to clear the expected events list after a timeout
|
||||
async fn set_timed_cleanup(&self) {
|
||||
async fn set_timed_event_cleanup(&self) {
|
||||
let mut event_cleaner = self.event_cleaner.lock().await;
|
||||
// Clear any existing handle
|
||||
if let Some(handle) = event_cleaner.take() {
|
||||
|
|
@ -284,12 +446,30 @@ where
|
|||
.instrument(tracing::trace_span!("event_cleaner", scaler_id = %self.id())),
|
||||
));
|
||||
}
|
||||
|
||||
/// Sets a timed cleanup task to clear the expected events list after a timeout
|
||||
async fn set_timed_status_cleanup(&self, timeout: Duration) {
|
||||
let mut status_cleaner = self.status_cleaner.lock().await;
|
||||
// Clear any existing handle
|
||||
if let Some(handle) = status_cleaner.take() {
|
||||
handle.abort();
|
||||
}
|
||||
let backoff_status = self.backoff_status.clone();
|
||||
|
||||
*status_cleaner = Some(tokio::spawn(
|
||||
async move {
|
||||
tokio::time::sleep(timeout).await;
|
||||
trace!("Reached status cleanup timeout, clearing backoff status");
|
||||
backoff_status.write().await.take();
|
||||
}
|
||||
.instrument(tracing::trace_span!("status_cleaner", scaler_id = %self.id())),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
/// The [Scaler](Scaler) trait implementation for the [BackoffAwareScaler](BackoffAwareScaler)
|
||||
/// is mostly a simple wrapper, with two exceptions, which allow scalers to sync expected
|
||||
/// events between different wadm instances.
|
||||
/// The [`Scaler`] trait implementation for the [`BackoffWrapper`] is mostly a simple wrapper,
|
||||
/// with three exceptions, which allow scalers to sync state between different wadm instances.
|
||||
///
|
||||
/// * `handle_event` calls an internal method that uses a notifier to publish notifications to
|
||||
/// all Scalers, even running on different wadm instances, to handle that event. The resulting
|
||||
|
|
@ -297,18 +477,35 @@ where
|
|||
/// * `reconcile` calls an internal method that uses a notifier to ensure all Scalers, even
|
||||
/// running on different wadm instances, compute their expected events in response to the
|
||||
/// reconciliation commands in order to "back off".
|
||||
impl<T, P> Scaler for BackoffAwareScaler<T, P>
|
||||
/// * `status` will first check to see if the scaler is in a backing off state, and if so, return
|
||||
/// the backoff status. Otherwise, it will return the status of the scaler.
|
||||
impl<T, P, C> Scaler for BackoffWrapper<T, P, C>
|
||||
where
|
||||
T: Scaler + Send + Sync,
|
||||
P: Publisher + Send + Sync + 'static,
|
||||
C: ConfigSource + SecretSource + Send + Sync + Clone + 'static,
|
||||
{
|
||||
fn id(&self) -> &str {
|
||||
// Pass through the ID of the wrapped scaler
|
||||
self.scaler.id()
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
// Pass through the kind of the wrapped scaler
|
||||
self.scaler.kind()
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.scaler.name()
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
self.scaler.status().await
|
||||
// If the scaler has a backoff status, return that, otherwise return the status of the scaler
|
||||
if let Some(status) = self.backoff_status.read().await.clone() {
|
||||
status
|
||||
} else {
|
||||
self.scaler.status().await
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, config: TraitProperty) -> Result<Vec<Command>> {
|
||||
|
|
@ -324,7 +521,7 @@ where
|
|||
}
|
||||
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
self.scaler.cleanup().await
|
||||
self.cleanup_internal().await
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -338,109 +535,77 @@ where
|
|||
fn evt_matches_expected(incoming: &Event, expected: &Event) -> bool {
|
||||
match (incoming, expected) {
|
||||
(
|
||||
// NOTE(brooksmtownsend): It may be worth it to simply use the count here as
|
||||
// extra information. If we receive the exact event but the count is different, that
|
||||
// may mean some instances failed to start on that host. The cause for this isn't
|
||||
// well known but if we find ourselves missing expected events we should revisit
|
||||
Event::ActorsStarted(ActorsStarted {
|
||||
Event::ProviderStarted(ProviderStarted {
|
||||
annotations: a1,
|
||||
image_ref: i1,
|
||||
count: c1,
|
||||
host_id: h1,
|
||||
provider_id: p1,
|
||||
..
|
||||
}),
|
||||
Event::ProviderStarted(ProviderStarted {
|
||||
annotations: a2,
|
||||
image_ref: i2,
|
||||
host_id: h2,
|
||||
provider_id: p2,
|
||||
..
|
||||
}),
|
||||
) => a1 == a2 && i1 == i2 && p1 == p2 && h1 == h2,
|
||||
(
|
||||
Event::ProviderStartFailed(ProviderStartFailed {
|
||||
provider_id: p1,
|
||||
provider_ref: i1,
|
||||
host_id: h1,
|
||||
..
|
||||
}),
|
||||
Event::ActorsStarted(ActorsStarted {
|
||||
Event::ProviderStartFailed(ProviderStartFailed {
|
||||
provider_id: p2,
|
||||
provider_ref: i2,
|
||||
host_id: h2,
|
||||
..
|
||||
}),
|
||||
) => p1 == p2 && h1 == h2 && i1 == i2,
|
||||
(
|
||||
Event::ComponentScaled(ComponentScaled {
|
||||
annotations: a1,
|
||||
image_ref: i1,
|
||||
component_id: c1,
|
||||
host_id: h1,
|
||||
..
|
||||
}),
|
||||
Event::ComponentScaled(ComponentScaled {
|
||||
annotations: a2,
|
||||
image_ref: i2,
|
||||
count: c2,
|
||||
component_id: c2,
|
||||
host_id: h2,
|
||||
..
|
||||
}),
|
||||
) => a1 == a2 && i1 == i2 && c1 == c2 && h1 == h2,
|
||||
(
|
||||
Event::ActorsStartFailed(ActorsStartFailed {
|
||||
Event::ComponentScaleFailed(ComponentScaleFailed {
|
||||
annotations: a1,
|
||||
image_ref: i1,
|
||||
component_id: c1,
|
||||
host_id: h1,
|
||||
..
|
||||
}),
|
||||
Event::ActorsStartFailed(ActorsStartFailed {
|
||||
Event::ComponentScaleFailed(ComponentScaleFailed {
|
||||
annotations: a2,
|
||||
image_ref: i2,
|
||||
component_id: c2,
|
||||
host_id: h2,
|
||||
..
|
||||
}),
|
||||
) => a1 == a2 && i1 == i2 && h1 == h2,
|
||||
(
|
||||
Event::ActorsStopped(ActorsStopped {
|
||||
annotations: a1,
|
||||
public_key: p1,
|
||||
count: c1,
|
||||
host_id: h1,
|
||||
..
|
||||
}),
|
||||
Event::ActorsStopped(ActorsStopped {
|
||||
annotations: a2,
|
||||
public_key: p2,
|
||||
count: c2,
|
||||
host_id: h2,
|
||||
..
|
||||
}),
|
||||
) => a1 == a2 && p1 == p2 && c1 == c2 && h1 == h2,
|
||||
(
|
||||
Event::ProviderStarted(ProviderStarted {
|
||||
annotations: a1,
|
||||
image_ref: i1,
|
||||
link_name: l1,
|
||||
host_id: h1,
|
||||
..
|
||||
}),
|
||||
Event::ProviderStarted(ProviderStarted {
|
||||
annotations: a2,
|
||||
image_ref: i2,
|
||||
link_name: l2,
|
||||
host_id: h2,
|
||||
..
|
||||
}),
|
||||
) => a1 == a2 && i1 == i2 && l1 == l2 && h1 == h2,
|
||||
// NOTE(brooksmtownsend): This is a little less information than we really need here.
|
||||
// Image reference + annotations would be nice
|
||||
(
|
||||
Event::ProviderStartFailed(ProviderStartFailed {
|
||||
link_name: l1,
|
||||
host_id: h1,
|
||||
..
|
||||
}),
|
||||
Event::ProviderStartFailed(ProviderStartFailed {
|
||||
link_name: l2,
|
||||
host_id: h2,
|
||||
..
|
||||
}),
|
||||
) => l1 == l2 && h1 == h2,
|
||||
(
|
||||
Event::LinkdefSet(LinkdefSet {
|
||||
linkdef:
|
||||
Linkdef {
|
||||
actor_id: a1,
|
||||
contract_id: c1,
|
||||
link_name: l1,
|
||||
provider_id: p1,
|
||||
values: v1,
|
||||
..
|
||||
},
|
||||
}),
|
||||
Event::LinkdefSet(LinkdefSet {
|
||||
linkdef:
|
||||
Linkdef {
|
||||
actor_id: a2,
|
||||
contract_id: c2,
|
||||
link_name: l2,
|
||||
provider_id: p2,
|
||||
values: v2,
|
||||
..
|
||||
},
|
||||
}),
|
||||
) => a1 == a2 && c1 == c2 && l1 == l2 && p1 == p2 && v1 == v2,
|
||||
) => a1 == a2 && i1 == i2 && c1 == c2 && h1 == h2,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the sha256 digest of the given parameters to form a unique ID for a scaler
|
||||
pub(crate) fn compute_id_sha256(params: &[&str]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
for param in params {
|
||||
hasher.update(param.as_bytes())
|
||||
}
|
||||
let hash = hasher.finalize();
|
||||
format!("{hash:x}")
|
||||
}
|
||||
|
|
@ -0,0 +1,316 @@
|
|||
use anyhow::{Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, instrument, trace};
|
||||
use wadm_types::{
|
||||
api::{StatusInfo, StatusType},
|
||||
Policy, SecretProperty, TraitProperty,
|
||||
};
|
||||
use wasmcloud_secrets_types::SecretConfig;
|
||||
|
||||
use crate::{
|
||||
commands::{Command, DeleteConfig, PutConfig},
|
||||
events::{ConfigDeleted, ConfigSet, Event},
|
||||
scaler::Scaler,
|
||||
workers::SecretSource,
|
||||
};
|
||||
|
||||
use super::compute_id_sha256;
|
||||
|
||||
const SECRET_SCALER_KIND: &str = "SecretScaler";
|
||||
|
||||
pub struct SecretScaler<SecretSource> {
|
||||
secret_source: SecretSource,
|
||||
/// The key to use in the configdata bucket for this secret
|
||||
secret_name: String,
|
||||
secret_config: SecretConfig,
|
||||
id: String,
|
||||
status: RwLock<StatusInfo>,
|
||||
}
|
||||
|
||||
impl<S: SecretSource> SecretScaler<S> {
|
||||
pub fn new(
|
||||
secret_name: String,
|
||||
policy: Policy,
|
||||
secret_property: SecretProperty,
|
||||
secret_source: S,
|
||||
) -> Self {
|
||||
// Compute the id of this scaler based on all of the values that make it unique.
|
||||
// This is used during upgrades to determine if a scaler is the same as a previous one.
|
||||
let mut id_parts = vec![
|
||||
secret_name.as_str(),
|
||||
policy.name.as_str(),
|
||||
policy.policy_type.as_str(),
|
||||
secret_property.name.as_str(),
|
||||
secret_property.properties.policy.as_str(),
|
||||
secret_property.properties.key.as_str(),
|
||||
];
|
||||
if let Some(version) = secret_property.properties.version.as_ref() {
|
||||
id_parts.push(version.as_str());
|
||||
}
|
||||
id_parts.extend(
|
||||
policy
|
||||
.properties
|
||||
.iter()
|
||||
.flat_map(|(k, v)| vec![k.as_str(), v.as_str()]),
|
||||
);
|
||||
let id = compute_id_sha256(&id_parts);
|
||||
|
||||
let secret_config = config_from_manifest_structures(policy, secret_property)
|
||||
.expect("failed to create secret config from policy and secret properties");
|
||||
|
||||
Self {
|
||||
id,
|
||||
secret_name,
|
||||
secret_config,
|
||||
secret_source,
|
||||
status: RwLock::new(StatusInfo::reconciling("")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: SecretSource + Send + Sync + Clone> Scaler for SecretScaler<S> {
|
||||
fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
SECRET_SCALER_KIND
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.secret_config.name.to_string()
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
let _ = self.reconcile().await;
|
||||
self.status.read().await.to_owned()
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
|
||||
debug!("SecretScaler does not support updating config, ignoring");
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn handle_event(&self, event: &Event) -> Result<Vec<Command>> {
|
||||
match event {
|
||||
Event::ConfigSet(ConfigSet { config_name })
|
||||
| Event::ConfigDeleted(ConfigDeleted { config_name }) => {
|
||||
if config_name == &self.secret_name {
|
||||
return self.reconcile().await;
|
||||
}
|
||||
}
|
||||
// This is a workaround to ensure that the config has a chance to periodically
|
||||
// update itself if it is out of sync. For efficiency, we only fetch configuration
|
||||
// again if the status is not deployed.
|
||||
Event::HostHeartbeat(_) => {
|
||||
if !matches!(self.status.read().await.status_type, StatusType::Deployed) {
|
||||
return self.reconcile().await;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
trace!("SecretScaler does not support this event, ignoring");
|
||||
}
|
||||
}
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, scaler_id = %self.id)]
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
debug!(self.secret_name, "Fetching configuration");
|
||||
match self.secret_source.get_secret(&self.secret_name).await {
|
||||
// If configuration matches what's supplied, this scaler is deployed
|
||||
Ok(Some(config)) if config == self.secret_config => {
|
||||
*self.status.write().await = StatusInfo::deployed("");
|
||||
Ok(Vec::new())
|
||||
}
|
||||
// If configuration is out of sync, we put the configuration
|
||||
Ok(_config) => {
|
||||
debug!(self.secret_name, "Putting secret");
|
||||
|
||||
match self.secret_config.clone().try_into() {
|
||||
Ok(config) => {
|
||||
*self.status.write().await = StatusInfo::reconciling("Secret out of sync");
|
||||
|
||||
Ok(vec![Command::PutConfig(PutConfig {
|
||||
config_name: self.secret_name.clone(),
|
||||
config,
|
||||
})])
|
||||
}
|
||||
Err(e) => {
|
||||
*self.status.write().await = StatusInfo::failed(&format!(
|
||||
"Failed to convert secret config to map: {}.",
|
||||
e
|
||||
));
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "SecretScaler failed to fetch configuration");
|
||||
*self.status.write().await = StatusInfo::failed(&e.to_string());
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
Ok(vec![Command::DeleteConfig(DeleteConfig {
|
||||
config_name: self.secret_name.clone(),
|
||||
})])
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge policy and properties into a [`SecretConfig`] for later use.
|
||||
fn config_from_manifest_structures(
|
||||
policy: Policy,
|
||||
reference: SecretProperty,
|
||||
) -> anyhow::Result<SecretConfig> {
|
||||
let mut policy_properties = policy.properties.clone();
|
||||
let backend = policy_properties
|
||||
.remove("backend")
|
||||
.context("policy did not have a backend property")?;
|
||||
Ok(SecretConfig::new(
|
||||
reference.name.clone(),
|
||||
backend,
|
||||
reference.properties.key.clone(),
|
||||
reference.properties.field.clone(),
|
||||
reference.properties.version.clone(),
|
||||
policy_properties
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, v.into()))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::config_from_manifest_structures;
|
||||
|
||||
use crate::{
|
||||
commands::{Command, PutConfig},
|
||||
events::{ConfigDeleted, Event, HostHeartbeat},
|
||||
scaler::Scaler,
|
||||
test_util::TestLatticeSource,
|
||||
};
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use wadm_types::{api::StatusType, Policy, SecretProperty, SecretSourceProperty};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_scaler() {
|
||||
let lattice = TestLatticeSource {
|
||||
claims: HashMap::new(),
|
||||
inventory: Default::default(),
|
||||
links: Vec::new(),
|
||||
config: HashMap::new(),
|
||||
};
|
||||
|
||||
let policy = Policy {
|
||||
name: "nats-kv".to_string(),
|
||||
policy_type: "secrets-backend".to_string(),
|
||||
properties: BTreeMap::from([("backend".to_string(), "nats-kv".to_string())]),
|
||||
};
|
||||
|
||||
let secret = SecretProperty {
|
||||
name: "test".to_string(),
|
||||
properties: SecretSourceProperty {
|
||||
policy: "nats-kv".to_string(),
|
||||
key: "test".to_string(),
|
||||
field: None,
|
||||
version: None,
|
||||
},
|
||||
};
|
||||
|
||||
let secret_scaler = super::SecretScaler::new(
|
||||
secret.name.clone(),
|
||||
policy.clone(),
|
||||
secret.clone(),
|
||||
lattice.clone(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
secret_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
let cfg = config_from_manifest_structures(policy, secret.clone())
|
||||
.expect("failed to merge policy");
|
||||
|
||||
assert_eq!(
|
||||
secret_scaler
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("reconcile did not succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: secret.name.clone(),
|
||||
config: cfg.clone().try_into().expect("should convert to map"),
|
||||
})],
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
secret_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
// Configuration deleted, relevant
|
||||
assert_eq!(
|
||||
secret_scaler
|
||||
.handle_event(&Event::ConfigDeleted(ConfigDeleted {
|
||||
config_name: secret.name.clone()
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: secret.name.clone(),
|
||||
config: cfg.clone().try_into().expect("should convert to map"),
|
||||
})]
|
||||
);
|
||||
assert_eq!(
|
||||
secret_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
// Configuration deleted, irrelevant
|
||||
assert_eq!(
|
||||
secret_scaler
|
||||
.handle_event(&Event::ConfigDeleted(ConfigDeleted {
|
||||
config_name: "some_other_config".to_string()
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
secret_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
|
||||
// Periodic reconcile with host heartbeat
|
||||
assert_eq!(
|
||||
secret_scaler
|
||||
.handle_event(&Event::HostHeartbeat(HostHeartbeat {
|
||||
components: Vec::new(),
|
||||
providers: Vec::new(),
|
||||
host_id: String::default(),
|
||||
issuer: String::default(),
|
||||
friendly_name: String::default(),
|
||||
labels: HashMap::new(),
|
||||
version: semver::Version::new(0, 0, 0),
|
||||
uptime_human: String::default(),
|
||||
uptime_seconds: 0,
|
||||
}))
|
||||
.await
|
||||
.expect("handle_event should succeed"),
|
||||
vec![Command::PutConfig(PutConfig {
|
||||
config_name: secret.name.clone(),
|
||||
config: cfg.clone().try_into().expect("should convert to map"),
|
||||
})]
|
||||
);
|
||||
assert_eq!(
|
||||
secret_scaler.status().await.status_type,
|
||||
StatusType::Reconciling
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,612 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::instrument;
|
||||
use wadm_types::{api::StatusInfo, TraitProperty};
|
||||
|
||||
use crate::{
|
||||
commands::{Command, DeleteLink, PutLink},
|
||||
events::{
|
||||
Event, LinkdefDeleted, LinkdefSet, ProviderHealthCheckInfo, ProviderHealthCheckPassed,
|
||||
ProviderHealthCheckStatus,
|
||||
},
|
||||
scaler::{compute_id_sha256, Scaler},
|
||||
storage::ReadStore,
|
||||
workers::LinkSource,
|
||||
};
|
||||
|
||||
pub const LINK_SCALER_KIND: &str = "LinkScaler";
|
||||
|
||||
/// Config for a LinkSpreadConfig
|
||||
pub struct LinkScalerConfig {
|
||||
/// Component identifier for the source of the link
|
||||
pub source_id: String,
|
||||
/// Target identifier or group for the link
|
||||
pub target: String,
|
||||
/// WIT Namespace for the link
|
||||
pub wit_namespace: String,
|
||||
/// WIT Package for the link
|
||||
pub wit_package: String,
|
||||
/// WIT Interfaces for the link
|
||||
pub wit_interfaces: Vec<String>,
|
||||
/// Name of the link
|
||||
pub name: String,
|
||||
/// Lattice ID the Link is configured for
|
||||
pub lattice_id: String,
|
||||
/// The name of the wadm model this SpreadScaler is under
|
||||
pub model_name: String,
|
||||
/// List of configurations for the source of this link
|
||||
pub source_config: Vec<String>,
|
||||
/// List of configurations for the target of this link
|
||||
pub target_config: Vec<String>,
|
||||
}
|
||||
|
||||
/// The LinkSpreadScaler ensures that link configuration exists on a specified lattice.
|
||||
pub struct LinkScaler<S, L> {
|
||||
pub config: LinkScalerConfig,
|
||||
// TODO(#253): Reenable once we figure out https://github.com/wasmCloud/wadm/issues/123
|
||||
#[allow(unused)]
|
||||
store: S,
|
||||
ctl_client: L,
|
||||
id: String,
|
||||
status: RwLock<StatusInfo>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S, L> Scaler for LinkScaler<S, L>
|
||||
where
|
||||
S: ReadStore + Send + Sync,
|
||||
L: LinkSource + Send + Sync,
|
||||
{
|
||||
fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
LINK_SCALER_KIND
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
format!(
|
||||
"{} -({}:{})-> {}",
|
||||
self.config.source_id,
|
||||
self.config.wit_namespace,
|
||||
self.config.wit_package,
|
||||
self.config.target
|
||||
)
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
let _ = self.reconcile().await;
|
||||
self.status.read().await.to_owned()
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
|
||||
// NOTE(brooksmtownsend): Updating a link scaler essentially means you're creating
|
||||
// a totally new scaler, so just do that instead.
|
||||
self.reconcile().await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(scaler_id = %self.id))]
|
||||
async fn handle_event(&self, event: &Event) -> Result<Vec<Command>> {
|
||||
match event {
|
||||
// Trigger linkdef creation if this component starts and belongs to this model
|
||||
Event::ComponentScaled(evt) if evt.component_id == self.config.source_id || evt.component_id == self.config.target => {
|
||||
self.reconcile().await
|
||||
}
|
||||
Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
..
|
||||
})
|
||||
| Event::ProviderHealthCheckStatus(ProviderHealthCheckStatus {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
..
|
||||
// NOTE(brooksmtownsend): Ideally we shouldn't actually care about the target being healthy, but
|
||||
// I'm leaving this part in for now to avoid strange conditions in the future where we might want
|
||||
// to re-put a link or at least reconcile if the target changes health status.
|
||||
}) if provider_id == &self.config.source_id || provider_id == &self.config.target => {
|
||||
// Wait until we know the provider is healthy before we link. This also avoids the race condition
|
||||
// where a provider is started by the host
|
||||
self.reconcile().await
|
||||
}
|
||||
Event::LinkdefDeleted(LinkdefDeleted {
|
||||
source_id,
|
||||
wit_namespace,
|
||||
wit_package,
|
||||
name,
|
||||
}) if source_id == &self.config.source_id
|
||||
&& name == &self.config.name
|
||||
&& wit_namespace == &self.config.wit_namespace
|
||||
&& wit_package == &self.config.wit_package =>
|
||||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
Event::LinkdefSet(LinkdefSet { linkdef })
|
||||
if linkdef.source_id() == self.config.source_id
|
||||
&& linkdef.target() == self.config.target
|
||||
&& linkdef.name() == self.config.name =>
|
||||
{
|
||||
*self.status.write().await = StatusInfo::deployed("");
|
||||
Ok(Vec::new())
|
||||
}
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(source_id = %self.config.source_id, target = %self.config.source_id, link_name = %self.config.name, scaler_id = %self.id))]
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
let source_id = &self.config.source_id;
|
||||
let target = &self.config.target;
|
||||
let linkdefs = self.ctl_client.get_links().await?;
|
||||
let (exists, _config_different) = linkdefs
|
||||
.into_iter()
|
||||
.find(|linkdef| {
|
||||
linkdef.source_id() == source_id
|
||||
&& linkdef.target() == target
|
||||
&& linkdef.name() == self.config.name
|
||||
})
|
||||
.map(|linkdef| {
|
||||
(
|
||||
true,
|
||||
// TODO(#88): reverse compare too
|
||||
// Ensure all supplied configs (both source and target) are the same
|
||||
linkdef
|
||||
.source_config()
|
||||
.iter()
|
||||
.eq(self.config.source_config.iter())
|
||||
&& linkdef
|
||||
.target_config()
|
||||
.iter()
|
||||
.eq(self.config.target_config.iter()),
|
||||
)
|
||||
})
|
||||
.unwrap_or((false, false));
|
||||
|
||||
// TODO(#88)
|
||||
// If it already exists, but values are different, we need to have a delete event first
|
||||
// and recreate it with the correct values second
|
||||
// let mut commands = values_different
|
||||
// .then(|| {
|
||||
// trace!("Linkdef exists, but values are different, deleting and recreating");
|
||||
// vec![Command::DeleteLinkdef(DeleteLinkdef {
|
||||
// component_id: component_id.to_owned(),
|
||||
// provider_id: provider_id.to_owned(),
|
||||
// contract_id: self.config.provider_contract_id.to_owned(),
|
||||
// link_name: self.config.provider_link_name.to_owned(),
|
||||
// model_name: self.config.model_name.to_owned(),
|
||||
// })]
|
||||
// })
|
||||
// .unwrap_or_default();
|
||||
|
||||
// if exists && !values_different {
|
||||
// trace!("Linkdef already exists, skipping");
|
||||
// } else if !exists || values_different {
|
||||
// trace!("Linkdef does not exist or needs to be recreated");
|
||||
// commands.push(Command::PutLinkdef(PutLinkdef {
|
||||
// component_id: component_id.to_owned(),
|
||||
// provider_id: provider_id.to_owned(),
|
||||
// link_name: self.config.provider_link_name.to_owned(),
|
||||
// contract_id: self.config.provider_contract_id.to_owned(),
|
||||
// values: self.config.values.to_owned(),
|
||||
// model_name: self.config.model_name.to_owned(),
|
||||
// }))
|
||||
// };
|
||||
|
||||
let commands = if !exists {
|
||||
*self.status.write().await = StatusInfo::reconciling(&format!(
|
||||
"Putting link definition between {source_id} and {target}"
|
||||
));
|
||||
vec![Command::PutLink(PutLink {
|
||||
source_id: self.config.source_id.to_owned(),
|
||||
target: self.config.target.to_owned(),
|
||||
name: self.config.name.to_owned(),
|
||||
wit_namespace: self.config.wit_namespace.to_owned(),
|
||||
wit_package: self.config.wit_package.to_owned(),
|
||||
interfaces: self.config.wit_interfaces.to_owned(),
|
||||
source_config: self.config.source_config.clone(),
|
||||
target_config: self.config.target_config.clone(),
|
||||
model_name: self.config.model_name.to_owned(),
|
||||
})]
|
||||
} else {
|
||||
*self.status.write().await = StatusInfo::deployed("");
|
||||
Vec::with_capacity(0)
|
||||
};
|
||||
Ok(commands)
|
||||
}
|
||||
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
Ok(vec![Command::DeleteLink(DeleteLink {
|
||||
model_name: self.config.model_name.to_owned(),
|
||||
source_id: self.config.source_id.to_owned(),
|
||||
link_name: self.config.name.to_owned(),
|
||||
wit_namespace: self.config.wit_namespace.to_owned(),
|
||||
wit_package: self.config.wit_package.to_owned(),
|
||||
})])
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadStore + Send + Sync, L: LinkSource> LinkScaler<S, L> {
|
||||
/// Construct a new LinkScaler with specified configuration values
|
||||
pub fn new(store: S, link_config: LinkScalerConfig, ctl_client: L) -> Self {
|
||||
// Compute the id of this scaler based on all of the configuration values
|
||||
// that make it unique. This is used during upgrades to determine if a
|
||||
// scaler is the same as a previous one.
|
||||
let mut id_parts = vec![
|
||||
LINK_SCALER_KIND,
|
||||
&link_config.model_name,
|
||||
&link_config.name,
|
||||
&link_config.source_id,
|
||||
&link_config.target,
|
||||
&link_config.wit_namespace,
|
||||
&link_config.wit_package,
|
||||
];
|
||||
id_parts.extend(
|
||||
link_config
|
||||
.wit_interfaces
|
||||
.iter()
|
||||
.map(std::string::String::as_str),
|
||||
);
|
||||
id_parts.extend(
|
||||
link_config
|
||||
.source_config
|
||||
.iter()
|
||||
.map(std::string::String::as_str),
|
||||
);
|
||||
id_parts.extend(
|
||||
link_config
|
||||
.target_config
|
||||
.iter()
|
||||
.map(std::string::String::as_str),
|
||||
);
|
||||
let id = compute_id_sha256(&id_parts);
|
||||
|
||||
Self {
|
||||
store,
|
||||
config: link_config,
|
||||
ctl_client,
|
||||
id,
|
||||
status: RwLock::new(StatusInfo::reconciling("")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
sync::Arc,
|
||||
vec,
|
||||
};
|
||||
|
||||
use wasmcloud_control_interface::Link;
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::{
|
||||
events::{ComponentScaled, ProviderHealthCheckInfo, ProviderInfo},
|
||||
storage::{Component, Host, Provider, Store},
|
||||
test_util::{TestLatticeSource, TestStore},
|
||||
APP_SPEC_ANNOTATION,
|
||||
};
|
||||
|
||||
async fn create_store(lattice_id: &str, component_ref: &str, provider_ref: &str) -> TestStore {
|
||||
let store = TestStore::default();
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
"component".to_string(),
|
||||
Component {
|
||||
id: "component".to_string(),
|
||||
reference: component_ref.to_owned(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Couldn't store component");
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
"provider".to_string(),
|
||||
Provider {
|
||||
id: "provider".to_string(),
|
||||
reference: provider_ref.to_owned(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Couldn't store component");
|
||||
store
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_different_ids() {
|
||||
let lattice_id = "id_generator".to_string();
|
||||
let component_ref = "component_ref".to_string();
|
||||
let component_id = "component_id".to_string();
|
||||
let provider_ref = "provider_ref".to_string();
|
||||
let provider_id = "provider_id".to_string();
|
||||
|
||||
let source_config = vec!["source_config".to_string()];
|
||||
let target_config = vec!["target_config".to_string()];
|
||||
|
||||
let scaler = LinkScaler::new(
|
||||
create_store(&lattice_id, &component_ref, &provider_ref).await,
|
||||
LinkScalerConfig {
|
||||
source_id: provider_id.clone(),
|
||||
target: component_id.clone(),
|
||||
wit_namespace: "wit_namespace".to_string(),
|
||||
wit_package: "wit_package".to_string(),
|
||||
wit_interfaces: vec!["wit_interface".to_string()],
|
||||
name: "default".to_string(),
|
||||
lattice_id: lattice_id.clone(),
|
||||
model_name: "model".to_string(),
|
||||
source_config: source_config.clone(),
|
||||
target_config: target_config.clone(),
|
||||
},
|
||||
TestLatticeSource::default(),
|
||||
);
|
||||
|
||||
let other_same_scaler = LinkScaler::new(
|
||||
create_store(&lattice_id, &component_ref, &provider_ref).await,
|
||||
LinkScalerConfig {
|
||||
source_id: provider_id.clone(),
|
||||
target: component_id.clone(),
|
||||
wit_namespace: "wit_namespace".to_string(),
|
||||
wit_package: "wit_package".to_string(),
|
||||
wit_interfaces: vec!["wit_interface".to_string()],
|
||||
name: "default".to_string(),
|
||||
lattice_id: lattice_id.clone(),
|
||||
model_name: "model".to_string(),
|
||||
source_config: source_config.clone(),
|
||||
target_config: target_config.clone(),
|
||||
},
|
||||
TestLatticeSource::default(),
|
||||
);
|
||||
|
||||
assert_eq!(scaler.id(), other_same_scaler.id(), "LinkScaler ID should be the same when scalers have the same type, model name, provider link name, component reference, provider reference, and values");
|
||||
|
||||
let different_scaler = LinkScaler::new(
|
||||
create_store(&lattice_id, &component_ref, &provider_ref).await,
|
||||
LinkScalerConfig {
|
||||
source_id: provider_id.clone(),
|
||||
target: component_id.clone(),
|
||||
wit_namespace: "wit_namespace".to_string(),
|
||||
wit_package: "wit_package".to_string(),
|
||||
wit_interfaces: vec!["wit_interface".to_string()],
|
||||
name: "default".to_string(),
|
||||
lattice_id: lattice_id.clone(),
|
||||
model_name: "model".to_string(),
|
||||
source_config: vec!["foo".to_string()],
|
||||
target_config: vec!["bar".to_string()],
|
||||
},
|
||||
TestLatticeSource::default(),
|
||||
);
|
||||
|
||||
assert_ne!(
|
||||
scaler.id(),
|
||||
different_scaler.id(),
|
||||
"LinkScaler ID should be different when scalers have different configured values"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_linkdef() {
|
||||
let lattice_id = "no-linkdef".to_string();
|
||||
let component_ref = "component_ref".to_string();
|
||||
let component_id = "component".to_string();
|
||||
let provider_ref = "provider_ref".to_string();
|
||||
let provider_id = "provider".to_string();
|
||||
|
||||
let scaler = LinkScaler::new(
|
||||
create_store(&lattice_id, &component_ref, &provider_ref).await,
|
||||
LinkScalerConfig {
|
||||
source_id: component_id.clone(),
|
||||
target: provider_id.clone(),
|
||||
wit_namespace: "namespace".to_string(),
|
||||
wit_package: "package".to_string(),
|
||||
wit_interfaces: vec!["interface".to_string()],
|
||||
name: "default".to_string(),
|
||||
lattice_id: lattice_id.clone(),
|
||||
model_name: "model".to_string(),
|
||||
source_config: vec![],
|
||||
target_config: vec![],
|
||||
},
|
||||
TestLatticeSource::default(),
|
||||
);
|
||||
|
||||
// Run a reconcile and make sure it returns a single put linkdef command
|
||||
let commands = scaler.reconcile().await.expect("Couldn't reconcile");
|
||||
assert_eq!(commands.len(), 1, "Expected 1 command, got {commands:?}");
|
||||
assert!(matches!(commands[0], Command::PutLink(_)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_existing_linkdef() {
|
||||
let lattice_id = "existing-linkdef".to_string();
|
||||
let component_ref = "component_ref".to_string();
|
||||
let component_id = "component".to_string();
|
||||
let provider_ref = "provider_ref".to_string();
|
||||
let provider_id = "provider".to_string();
|
||||
|
||||
let linkdef = Link::builder()
|
||||
.source_id(&component_id)
|
||||
.target(&provider_id)
|
||||
.wit_namespace("namespace")
|
||||
.wit_package("package")
|
||||
.interfaces(vec!["interface".to_string()])
|
||||
.name("default")
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let scaler = LinkScaler::new(
|
||||
create_store(&lattice_id, &component_ref, &provider_ref).await,
|
||||
LinkScalerConfig {
|
||||
source_id: linkdef.source_id().to_string(),
|
||||
target: linkdef.target().to_string(),
|
||||
wit_namespace: linkdef.wit_namespace().to_string(),
|
||||
wit_package: linkdef.wit_package().to_string(),
|
||||
wit_interfaces: linkdef.interfaces().clone(),
|
||||
name: linkdef.name().to_string(),
|
||||
source_config: vec![],
|
||||
target_config: vec![],
|
||||
lattice_id: lattice_id.clone(),
|
||||
model_name: "model".to_string(),
|
||||
},
|
||||
TestLatticeSource {
|
||||
links: vec![linkdef],
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
|
||||
let commands = scaler.reconcile().await.expect("Couldn't reconcile");
|
||||
assert_eq!(
|
||||
commands.len(),
|
||||
0,
|
||||
"Scaler shouldn't have returned any commands"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_put_linkdef_from_triggering_events() {
|
||||
let lattice_id = "can_put_linkdef_from_triggering_events";
|
||||
let echo_ref = "fakecloud.azurecr.io/echo:0.3.4".to_string();
|
||||
let echo_id = "MASDASDIAMAREALCOMPONENTECHO";
|
||||
let httpserver_ref = "fakecloud.azurecr.io/httpserver:0.5.2".to_string();
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
// STATE SETUP BEGIN
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::from_iter([(echo_id.to_string(), 1)]),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-brooks-1".to_string()),
|
||||
]),
|
||||
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: "VASDASD".to_string(),
|
||||
provider_ref: httpserver_ref.to_string(),
|
||||
annotations: BTreeMap::from_iter([(
|
||||
APP_SPEC_ANNOTATION.to_string(),
|
||||
"foobar".to_string(),
|
||||
)]),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("should be able to store a host");
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
"VASDASD".to_string(),
|
||||
Provider {
|
||||
id: "VASDASD".to_string(),
|
||||
reference: httpserver_ref.to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("should be able to store provider");
|
||||
|
||||
// STATE SETUP END
|
||||
|
||||
let link_scaler = LinkScaler::new(
|
||||
store.clone(),
|
||||
LinkScalerConfig {
|
||||
source_id: echo_id.to_string(),
|
||||
target: "VASDASD".to_string(),
|
||||
wit_namespace: "wasmcloud".to_string(),
|
||||
wit_package: "httpserver".to_string(),
|
||||
wit_interfaces: vec![],
|
||||
name: "default".to_string(),
|
||||
source_config: vec![],
|
||||
target_config: vec![],
|
||||
lattice_id: lattice_id.to_string(),
|
||||
model_name: "foobar".to_string(),
|
||||
},
|
||||
TestLatticeSource::default(),
|
||||
);
|
||||
|
||||
let commands = link_scaler
|
||||
.reconcile()
|
||||
.await
|
||||
.expect("link scaler to handle reconcile");
|
||||
// Since no link exists, we should expect a put link command
|
||||
assert_eq!(commands.len(), 1);
|
||||
|
||||
// Component starts, put into state and then handle event
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
echo_id.to_string(),
|
||||
Component {
|
||||
id: echo_id.to_string(),
|
||||
reference: echo_ref.to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("should be able to store component");
|
||||
|
||||
let commands = link_scaler
|
||||
.handle_event(&Event::ComponentScaled(ComponentScaled {
|
||||
annotations: BTreeMap::from_iter([(
|
||||
APP_SPEC_ANNOTATION.to_string(),
|
||||
"foobar".to_string(),
|
||||
)]),
|
||||
claims: None,
|
||||
image_ref: echo_ref,
|
||||
component_id: echo_id.to_string(),
|
||||
max_instances: 1,
|
||||
host_id: host_id_one.to_string(),
|
||||
}))
|
||||
.await
|
||||
.expect("should be able to handle components started event");
|
||||
|
||||
assert_eq!(commands.len(), 1);
|
||||
|
||||
let commands = link_scaler
|
||||
.handle_event(&Event::LinkdefSet(LinkdefSet {
|
||||
linkdef: Link::builder()
|
||||
// NOTE: contract, link, and provider id matches but the component is different
|
||||
.source_id("nm0001772")
|
||||
.target("VASDASD")
|
||||
.wit_namespace("wasmcloud")
|
||||
.wit_package("httpserver")
|
||||
.name("default")
|
||||
.build()
|
||||
.unwrap(),
|
||||
}))
|
||||
.await
|
||||
.expect("");
|
||||
assert!(commands.is_empty());
|
||||
|
||||
let commands = link_scaler
|
||||
.handle_event(&Event::ProviderHealthCheckPassed(
|
||||
ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: "VASDASD".to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await
|
||||
.expect("should be able to handle provider health check");
|
||||
assert_eq!(commands.len(), 1);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,66 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use wadm_types::{api::StatusInfo, TraitProperty};
|
||||
|
||||
use crate::{commands::Command, events::Event, scaler::Scaler};
|
||||
|
||||
/// The StatusScaler is a scaler that only reports a predefined status and does not perform any actions.
|
||||
/// It's primarily used as a placeholder for a scaler that wadm failed to initialize for reasons that
|
||||
/// couldn't be caught during deployment, and will not be fixed until a new version of the app is deployed.
|
||||
pub struct StatusScaler {
|
||||
id: String,
|
||||
kind: String,
|
||||
name: String,
|
||||
status: StatusInfo,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Scaler for StatusScaler {
|
||||
fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.name.to_string()
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn handle_event(&self, _event: &Event) -> Result<Vec<Command>> {
|
||||
Ok(Vec::with_capacity(0))
|
||||
}
|
||||
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
Ok(Vec::with_capacity(0))
|
||||
}
|
||||
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
Ok(Vec::with_capacity(0))
|
||||
}
|
||||
}
|
||||
|
||||
impl StatusScaler {
|
||||
pub fn new(
|
||||
id: impl AsRef<str>,
|
||||
kind: impl AsRef<str>,
|
||||
name: impl AsRef<str>,
|
||||
status: StatusInfo,
|
||||
) -> Self {
|
||||
StatusScaler {
|
||||
id: id.as_ref().to_string(),
|
||||
kind: kind.as_ref().to_string(),
|
||||
name: name.as_ref().to_string(),
|
||||
status,
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -4,6 +4,7 @@ use async_nats::{
|
|||
};
|
||||
use futures::StreamExt;
|
||||
use tracing::{info, instrument, warn};
|
||||
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
|
||||
|
||||
use crate::publisher::Publisher;
|
||||
|
||||
|
|
@ -11,16 +12,11 @@ mod handlers;
|
|||
mod notifier;
|
||||
mod parser;
|
||||
mod storage;
|
||||
mod types;
|
||||
|
||||
use handlers::Handler;
|
||||
pub use notifier::ManifestNotifier;
|
||||
pub use parser::CONTENT_TYPE_HEADER;
|
||||
pub(crate) use storage::ModelStorage;
|
||||
pub use types::*;
|
||||
|
||||
/// The default topic prefix for the wadm API;
|
||||
pub const DEFAULT_WADM_TOPIC_PREFIX: &str = "wadm.api";
|
||||
|
||||
const QUEUE_GROUP: &str = "wadm_server";
|
||||
|
||||
|
|
@ -122,7 +118,12 @@ impl<P: Publisher> Server<P> {
|
|||
category: "model",
|
||||
operation: "list",
|
||||
object_name: None,
|
||||
} => self.handler.list_models(msg, account_id, lattice_id).await,
|
||||
} => {
|
||||
warn!("Received deprecated subject: model.list. Please use model.get instead");
|
||||
self.handler
|
||||
.list_models_deprecated(msg, account_id, lattice_id)
|
||||
.await
|
||||
}
|
||||
ParsedSubject {
|
||||
account_id,
|
||||
lattice_id,
|
||||
|
|
@ -134,6 +135,13 @@ impl<P: Publisher> Server<P> {
|
|||
.get_model(msg, account_id, lattice_id, name)
|
||||
.await
|
||||
}
|
||||
ParsedSubject {
|
||||
account_id,
|
||||
lattice_id,
|
||||
category: "model",
|
||||
operation: "get",
|
||||
object_name: None,
|
||||
} => self.handler.list_models(msg, account_id, lattice_id).await,
|
||||
ParsedSubject {
|
||||
account_id,
|
||||
lattice_id,
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
use cloudevents::Event as CloudEvent;
|
||||
use tracing::{instrument, trace};
|
||||
use wadm_types::Manifest;
|
||||
|
||||
use crate::{
|
||||
events::{Event, ManifestPublished, ManifestUnpublished},
|
||||
model::Manifest,
|
||||
publisher::Publisher,
|
||||
};
|
||||
|
||||
|
|
@ -25,14 +25,19 @@ impl<P: Publisher> ManifestNotifier<P> {
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
async fn send_event(&self, lattice_id: &str, event: Event) -> anyhow::Result<()> {
|
||||
async fn send_event(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
event_subject_key: &str,
|
||||
event: Event,
|
||||
) -> anyhow::Result<()> {
|
||||
let event: CloudEvent = event.try_into()?;
|
||||
// NOTE(thomastaylor312): A future improvement could be retries here
|
||||
trace!("Sending notification event");
|
||||
self.publisher
|
||||
.publish(
|
||||
serde_json::to_vec(&event)?,
|
||||
Some(&format!("{}.{lattice_id}", self.prefix)),
|
||||
Some(&format!("{}.{lattice_id}.{event_subject_key}", self.prefix)),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
|
@ -40,6 +45,7 @@ impl<P: Publisher> ManifestNotifier<P> {
|
|||
pub async fn deployed(&self, lattice_id: &str, manifest: Manifest) -> anyhow::Result<()> {
|
||||
self.send_event(
|
||||
lattice_id,
|
||||
"manifest_published",
|
||||
Event::ManifestPublished(ManifestPublished { manifest }),
|
||||
)
|
||||
.await
|
||||
|
|
@ -48,6 +54,7 @@ impl<P: Publisher> ManifestNotifier<P> {
|
|||
pub async fn undeployed(&self, lattice_id: &str, name: &str) -> anyhow::Result<()> {
|
||||
self.send_event(
|
||||
lattice_id,
|
||||
"manifest_unpublished",
|
||||
Event::ManifestUnpublished(ManifestUnpublished {
|
||||
name: name.to_owned(),
|
||||
}),
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
use async_nats::HeaderMap;
|
||||
|
||||
use crate::model::Manifest;
|
||||
use wadm_types::Manifest;
|
||||
|
||||
/// The name of the header in the NATS request to use for content type inference. The header value
|
||||
/// should be a valid MIME type
|
||||
|
|
@ -1,12 +1,10 @@
|
|||
use std::collections::HashSet;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_nats::jetstream::kv::{Operation, Store};
|
||||
use tracing::{debug, instrument, trace};
|
||||
|
||||
use crate::{model::internal::StoredManifest, server::StatusType};
|
||||
|
||||
use super::ModelSummary;
|
||||
use crate::model::StoredManifest;
|
||||
|
||||
// TODO(thomastaylor312): Once async nats has concrete error types for KV, we should switch out
|
||||
// anyhow for concrete error types so we can indicate whether a failure was due to something like a
|
||||
|
|
@ -95,13 +93,13 @@ impl ModelStorage {
|
|||
.await
|
||||
}
|
||||
|
||||
/// Fetches a summary of all models in the given lattice.
|
||||
/// Fetches a summary of all manifests for the given lattice.
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn list(
|
||||
&self,
|
||||
account_id: Option<&str>,
|
||||
lattice_id: &str,
|
||||
) -> Result<Vec<ModelSummary>> {
|
||||
) -> Result<Vec<StoredManifest>> {
|
||||
debug!("Fetching list of models from storage");
|
||||
let futs = self
|
||||
.get_model_set(account_id, lattice_id)
|
||||
|
|
@ -110,31 +108,20 @@ impl ModelStorage {
|
|||
.0
|
||||
.into_iter()
|
||||
// We can't use filter map with futures, but we can use map and then flatten it below
|
||||
.map(|model_name| {
|
||||
async {
|
||||
let manifest = match self.get(account_id, lattice_id, &model_name).await {
|
||||
Ok(Some((manifest, _))) => manifest,
|
||||
Ok(None) => return None,
|
||||
Err(e) => return Some(Err(e)),
|
||||
};
|
||||
Some(Ok(ModelSummary {
|
||||
name: model_name,
|
||||
version: manifest.current_version().to_owned(),
|
||||
description: manifest.get_current().description().map(|s| s.to_owned()),
|
||||
deployed_version: manifest.get_deployed().map(|m| m.version().to_owned()),
|
||||
// TODO(thomastaylor312): Actually fetch the status info from the stored
|
||||
// manifest once we figure it out
|
||||
status: StatusType::default(),
|
||||
status_message: None,
|
||||
}))
|
||||
.map(|model_name| async move {
|
||||
match self.get(account_id, lattice_id, &model_name).await {
|
||||
Ok(Some((manifest, _))) => Some(Ok(manifest)),
|
||||
Ok(None) => None,
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
});
|
||||
|
||||
// Flatten, collect, and sort on name
|
||||
futures::future::join_all(futs)
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect()
|
||||
.collect::<Result<Vec<StoredManifest>>>()
|
||||
}
|
||||
|
||||
/// Deletes the given model from storage. This also removes the model from the list of all
|
||||
|
|
@ -172,7 +159,7 @@ impl ModelStorage {
|
|||
&self,
|
||||
account_id: Option<&str>,
|
||||
lattice_id: &str,
|
||||
) -> Result<Option<(HashSet<String>, u64)>> {
|
||||
) -> Result<Option<(BTreeSet<String>, u64)>> {
|
||||
match self
|
||||
.store
|
||||
.entry(model_set_key(account_id, lattice_id))
|
||||
|
|
@ -180,7 +167,7 @@ impl ModelStorage {
|
|||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
Some(entry) if !matches!(entry.operation, Operation::Delete | Operation::Purge) => {
|
||||
let models: HashSet<String> =
|
||||
let models: BTreeSet<String> =
|
||||
serde_json::from_slice(&entry.value).map_err(anyhow::Error::from)?;
|
||||
Ok(Some((models, entry.revision)))
|
||||
}
|
||||
|
|
@ -206,7 +193,7 @@ impl ModelStorage {
|
|||
debug!("No models exist in storage for delete, returning early");
|
||||
return Ok(());
|
||||
}
|
||||
None => (HashSet::new(), 0),
|
||||
None => (BTreeSet::new(), 0),
|
||||
};
|
||||
|
||||
match operation {
|
||||
|
|
@ -4,9 +4,10 @@ use std::{collections::HashMap, ops::Deref};
|
|||
|
||||
pub mod nats_kv;
|
||||
pub mod reaper;
|
||||
pub(crate) mod snapshot;
|
||||
mod state;
|
||||
|
||||
pub use state::{Actor, Host, Provider, ProviderStatus, WadmActorInstance};
|
||||
pub use state::{Component, Host, Provider, ProviderStatus, WadmComponentInfo};
|
||||
|
||||
/// A trait that must be implemented with a unique identifier for the given type. This is used in
|
||||
/// the construction of keys for a store
|
||||
|
|
@ -51,7 +52,7 @@ pub trait Store: ReadStore {
|
|||
/// By default this will just call [`Store::store_many`] with a single item in the list of data
|
||||
async fn store<T>(&self, lattice_id: &str, id: String, data: T) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone, // Needs to be clone in order to retry updates
|
||||
{
|
||||
self.store_many(lattice_id, [(id, data)]).await
|
||||
}
|
||||
|
|
@ -61,7 +62,7 @@ pub trait Store: ReadStore {
|
|||
///
|
||||
/// The given data can be anything that can be turned into an iterator of (key, value). This
|
||||
/// means you can pass a [`HashMap`](std::collections::HashMap) or something like
|
||||
/// `["key".to_string(), Actor{...}]`
|
||||
/// `["key".to_string(), Component{...}]`
|
||||
///
|
||||
/// This function has several required bounds. It needs to be serialize and deserialize because
|
||||
/// some implementations will need to deserialize the current data before modifying it.
|
||||
|
|
@ -70,7 +71,7 @@ pub trait Store: ReadStore {
|
|||
/// sendable between threads
|
||||
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone, // Needs to be clone in order to retry updates
|
||||
D: IntoIterator<Item = (String, T)> + Send;
|
||||
|
||||
/// Delete a state entry
|
||||
|
|
@ -78,7 +79,7 @@ pub trait Store: ReadStore {
|
|||
/// By default this will just call [`Store::delete_many`] with a single item in the list of data
|
||||
async fn delete<T>(&self, lattice_id: &str, id: &str) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
{
|
||||
self.delete_many::<T, _, _>(lattice_id, [id]).await
|
||||
}
|
||||
|
|
@ -96,7 +97,7 @@ pub trait Store: ReadStore {
|
|||
/// sendable between threads
|
||||
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
D: IntoIterator<Item = K> + Send,
|
||||
K: AsRef<str>;
|
||||
}
|
||||
|
|
@ -106,7 +107,7 @@ pub trait Store: ReadStore {
|
|||
impl<S: Store + Send + Sync> Store for std::sync::Arc<S> {
|
||||
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
|
||||
D: IntoIterator<Item = (String, T)> + Send,
|
||||
{
|
||||
self.as_ref().store_many(lattice_id, data).await
|
||||
|
|
@ -114,7 +115,7 @@ impl<S: Store + Send + Sync> Store for std::sync::Arc<S> {
|
|||
|
||||
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
D: IntoIterator<Item = K> + Send,
|
||||
K: AsRef<str>,
|
||||
{
|
||||
|
|
@ -209,7 +210,7 @@ impl<S: Store + Sync> ScopedStore<S> {
|
|||
/// Store a piece of state. This should overwrite existing state entries
|
||||
pub async fn store<T>(&self, id: String, data: T) -> Result<(), S::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
|
||||
{
|
||||
self.inner.store(&self.lattice_id, id, data).await
|
||||
}
|
||||
|
|
@ -218,7 +219,7 @@ impl<S: Store + Sync> ScopedStore<S> {
|
|||
/// allows for stores to perform multiple writes simultaneously or to leverage transactions
|
||||
pub async fn store_many<T, D>(&self, data: D) -> Result<(), S::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
|
||||
D: IntoIterator<Item = (String, T)> + Send,
|
||||
{
|
||||
self.inner.store_many(&self.lattice_id, data).await
|
||||
|
|
@ -227,7 +228,7 @@ impl<S: Store + Sync> ScopedStore<S> {
|
|||
/// Delete a state entry
|
||||
pub async fn delete<T>(&self, id: &str) -> Result<(), S::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
{
|
||||
self.inner.delete::<T>(&self.lattice_id, id).await
|
||||
}
|
||||
|
|
@ -236,7 +237,7 @@ impl<S: Store + Sync> ScopedStore<S> {
|
|||
/// simultaneously or to leverage transactions
|
||||
pub async fn delete_many<T, D, K>(&self, data: D) -> Result<(), S::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
D: IntoIterator<Item = K> + Send,
|
||||
K: AsRef<str>,
|
||||
{
|
||||
|
|
@ -245,10 +246,3 @@ impl<S: Store + Sync> ScopedStore<S> {
|
|||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper function for generating a unique ID for any given provider. This is exposed purely to
|
||||
/// be a common way of creating a key to access/store provider information
|
||||
pub fn provider_id(public_key: &str, link_name: &str) -> String {
|
||||
// TODO: Update this to also use contract ID when 0.62 comes out
|
||||
format!("{}/{}", public_key, link_name)
|
||||
}
|
||||
|
|
@ -9,17 +9,19 @@
|
|||
//! the encoding in the future. Because of this, DO NOT depend on accessing this data other than
|
||||
//! through this module
|
||||
//!
|
||||
//! All data is currently stored in a single encoded map per type (host, actor, provider), where the
|
||||
//! keys are the ID as given by [`StateId::id`]. Once again, we reserve the right to change this
|
||||
//! All data is currently stored in a single encoded map per type (host, component, provider), where
|
||||
//! the keys are the ID as given by [`StateId::id`]. Once again, we reserve the right to change this
|
||||
//! structure in the future
|
||||
use std::collections::HashMap;
|
||||
use std::io::Error as IoError;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_nats::{
|
||||
jetstream::kv::{Operation, Store as KvStore},
|
||||
Error as NatsError,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use futures::Future;
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use tracing::{debug, error, field::Empty, instrument, trace};
|
||||
use tracing_futures::Instrument;
|
||||
|
|
@ -89,6 +91,73 @@ impl NatsKvStore {
|
|||
Err(e) => Err(NatsStoreError::Nats(e.into())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper that retries update operations
|
||||
// NOTE(thomastaylor312): We could probably make this even better with some exponential backoff,
|
||||
// but this is easy enough for now since generally there isn't a ton of competition for updating
|
||||
// a single lattice
|
||||
async fn update_with_retries<T, F, Fut>(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
key: &str,
|
||||
timeout: Duration,
|
||||
updater: F,
|
||||
) -> Result<(), NatsStoreError>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
F: Fn(HashMap<String, T>) -> Fut,
|
||||
Fut: Future<Output = Result<Vec<u8>, NatsStoreError>>,
|
||||
{
|
||||
let res = tokio::time::timeout(timeout, async {
|
||||
loop {
|
||||
let (current_data, revision) = self
|
||||
.internal_list::<T>(lattice_id)
|
||||
.in_current_span()
|
||||
.await?;
|
||||
debug!(revision, "Updating data in store");
|
||||
let updated_data = updater(current_data).await?;
|
||||
trace!("Writing bytes to store");
|
||||
// If the function doesn't return any data (such as for deletes), just return early.
|
||||
// Everything is an update (right now), even for deletes so the only case we'd have
|
||||
// an empty vec is if we aren't updating anything
|
||||
if updated_data.is_empty() {
|
||||
return Ok(())
|
||||
}
|
||||
match self.store.update(key, updated_data.into(), revision).await {
|
||||
Ok(_) => return Ok(()),
|
||||
Err(e) => {
|
||||
if e.to_string().contains("wrong last sequence") {
|
||||
debug!(%key, %lattice_id, "Got wrong last sequence when trying to update state. Retrying update operation");
|
||||
continue;
|
||||
}
|
||||
return Err(NatsStoreError::Nats(e.into()));
|
||||
}
|
||||
// TODO(#316): Uncomment this code once we can update to the latest
|
||||
// async-nats, which actually allows us to access the inner source of the error
|
||||
// Err(e) => {
|
||||
// let source = match e.source() {
|
||||
// Some(s) => s,
|
||||
// None => return Err(NatsStoreError::Nats(e.into())),
|
||||
// };
|
||||
// match source.downcast_ref::<PublishError>() {
|
||||
// Some(e) if matches!(e.kind(), PublishErrorKind::WrongLastSequence) => {
|
||||
// debug!(%key, %lattice_id, "Got wrong last sequence when trying to update state. Retrying update operation");
|
||||
// continue;
|
||||
// },
|
||||
// _ => return Err(NatsStoreError::Nats(e.into())),
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Err(_e) => Err(NatsStoreError::Other(
|
||||
"Timed out while retrying updates to key".to_string(),
|
||||
)),
|
||||
Ok(res2) => res2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE(thomastaylor312): This implementation should be good enough to start. If we need to optimize
|
||||
|
|
@ -141,7 +210,7 @@ impl Store for NatsKvStore {
|
|||
///
|
||||
/// The given data can be anything that can be turned into an iterator of (key, value). This
|
||||
/// means you can pass a [`HashMap`](std::collections::HashMap) or something like
|
||||
/// `["key".to_string(), Actor{...}]`
|
||||
/// `["key".to_string(), Component{...}]`
|
||||
///
|
||||
/// This function has several required bounds. It needs to be serialize and deserialize because
|
||||
/// some implementations will need to deserialize the current data before modifying it.
|
||||
|
|
@ -151,79 +220,76 @@ impl Store for NatsKvStore {
|
|||
#[instrument(level = "debug", skip(self, data), fields(key = Empty))]
|
||||
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
|
||||
D: IntoIterator<Item = (String, T)> + Send,
|
||||
{
|
||||
let key = generate_key::<T>(lattice_id);
|
||||
tracing::Span::current().record("key", &key);
|
||||
let (mut current_data, revision) = self
|
||||
.internal_list::<T>(lattice_id)
|
||||
.in_current_span()
|
||||
.await?;
|
||||
debug!("Updating data in store");
|
||||
for (id, item) in data.into_iter() {
|
||||
if current_data.insert(id, item).is_some() {
|
||||
// NOTE: We may want to return the old data in the future. For now, keeping it simple
|
||||
trace!("Replaced existing data");
|
||||
} else {
|
||||
trace!("Inserted new entry");
|
||||
};
|
||||
}
|
||||
let serialized = serde_json::to_vec(¤t_data)?;
|
||||
// NOTE(thomastaylor312): This could not matter, but because this is JSON and not consuming
|
||||
// the data it is serializing, we are now holding a vec of the serialized data and the
|
||||
// actual struct in memory. So this drops it immediately to hopefully keep memory usage down
|
||||
// on busy servers
|
||||
drop(current_data);
|
||||
trace!(len = serialized.len(), "Writing bytes to store");
|
||||
self.store
|
||||
.update(key, serialized.into(), revision)
|
||||
.await
|
||||
.map(|_| ())
|
||||
.map_err(|e| NatsStoreError::Nats(e.into()))
|
||||
let data: Vec<(String, T)> = data.into_iter().collect();
|
||||
self.update_with_retries(
|
||||
lattice_id,
|
||||
&key,
|
||||
Duration::from_millis(1500),
|
||||
|mut current_data| async {
|
||||
let cloned = data.clone();
|
||||
async move {
|
||||
for (id, item) in cloned.into_iter() {
|
||||
if current_data.insert(id, item).is_some() {
|
||||
// NOTE: We may want to return the old data in the future. For now, keeping it simple
|
||||
trace!("Replaced existing data");
|
||||
} else {
|
||||
trace!("Inserted new entry");
|
||||
};
|
||||
}
|
||||
serde_json::to_vec(¤t_data).map_err(NatsStoreError::SerDe)
|
||||
}
|
||||
.await
|
||||
},
|
||||
)
|
||||
.in_current_span()
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, data), fields(key = Empty))]
|
||||
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
D: IntoIterator<Item = K> + Send,
|
||||
K: AsRef<str>,
|
||||
{
|
||||
let key = generate_key::<T>(lattice_id);
|
||||
tracing::Span::current().record("key", &key);
|
||||
let (mut current_data, revision) = self
|
||||
.internal_list::<T>(lattice_id)
|
||||
.in_current_span()
|
||||
.await?;
|
||||
debug!("Updating data in store");
|
||||
let mut updated = false;
|
||||
for id in data.into_iter() {
|
||||
if current_data.remove(id.as_ref()).is_some() {
|
||||
// NOTE: We may want to return the old data in the future. For now, keeping it simple
|
||||
trace!(id = %id.as_ref(), "Removing existing data");
|
||||
updated = true;
|
||||
} else {
|
||||
trace!(id = %id.as_ref(), "ID doesn't exist in store, ignoring");
|
||||
};
|
||||
}
|
||||
// If we updated nothing, return early
|
||||
if !updated {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let serialized = serde_json::to_vec(¤t_data)?;
|
||||
// NOTE(thomastaylor312): This could not matter, but because this is JSON and not consuming
|
||||
// the data it is serializing, we are now holding a vec of the serialized data and the
|
||||
// actual struct in memory. So this drops it immediately to hopefully keep memory usage down
|
||||
// on busy servers
|
||||
drop(current_data);
|
||||
trace!(len = serialized.len(), "Writing bytes to store");
|
||||
self.store
|
||||
.update(key, serialized.into(), revision)
|
||||
.await
|
||||
.map(|_| ())
|
||||
.map_err(|e| NatsStoreError::Nats(e.into()))
|
||||
let data: Vec<String> = data.into_iter().map(|s| s.as_ref().to_string()).collect();
|
||||
self.update_with_retries(
|
||||
lattice_id,
|
||||
&key,
|
||||
Duration::from_millis(1500),
|
||||
|mut current_data: HashMap<String, T>| async {
|
||||
let cloned = data.clone();
|
||||
async move {
|
||||
let mut updated = false;
|
||||
for id in cloned.into_iter() {
|
||||
if current_data.remove(&id).is_some() {
|
||||
// NOTE: We may want to return the old data in the future. For now, keeping it simple
|
||||
trace!(%id, "Removing existing data");
|
||||
updated = true;
|
||||
} else {
|
||||
trace!(%id, "ID doesn't exist in store, ignoring");
|
||||
};
|
||||
}
|
||||
// If we updated nothing, return early
|
||||
if !updated {
|
||||
return Ok(Vec::with_capacity(0));
|
||||
}
|
||||
|
||||
serde_json::to_vec(¤t_data).map_err(NatsStoreError::SerDe)
|
||||
}
|
||||
.await
|
||||
},
|
||||
)
|
||||
.in_current_span()
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
//! Contains helpers for reaping Hosts that haven't received a heartbeat within a configured amount
|
||||
//! of time and actors and providers on hosts that no longer exist
|
||||
//! of time and components and providers on hosts that no longer exist
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
|
|
@ -7,7 +7,7 @@ use chrono::{Duration, Utc};
|
|||
use tokio::{task::JoinHandle, time};
|
||||
use tracing::{debug, error, info, instrument, trace, warn};
|
||||
|
||||
use super::{Actor, Host, Provider, Store};
|
||||
use super::{Component, Host, Provider, Store};
|
||||
|
||||
/// A struct that can reap various pieces of data from the given store
|
||||
pub struct Reaper<S> {
|
||||
|
|
@ -102,7 +102,7 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
|
|||
loop {
|
||||
ticker.tick().await;
|
||||
trace!("Tick fired, running reap tasks");
|
||||
// We want to reap hosts first so that the state is up to date for reaping actors and providers
|
||||
// We want to reap hosts first so that the state is up to date for reaping components and providers
|
||||
self.reap_hosts().await;
|
||||
// Now get the current list of hosts
|
||||
let hosts = match self.store.list::<Host>(&self.lattice_id).await {
|
||||
|
|
@ -112,8 +112,9 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
|
|||
continue;
|
||||
}
|
||||
};
|
||||
// Reap actors and providers simultaneously
|
||||
futures::join!(self.reap_actors(&hosts), self.reap_providers(&hosts));
|
||||
// Reap components and providers
|
||||
self.reap_components(&hosts).await;
|
||||
self.reap_providers(&hosts).await;
|
||||
trace!("Completed reap tasks");
|
||||
}
|
||||
}
|
||||
|
|
@ -151,66 +152,49 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
|
|||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, hosts), fields(lattice_id = %self.lattice_id))]
|
||||
async fn reap_actors(&self, hosts: &HashMap<String, Host>) {
|
||||
let actors = match self.store.list::<Actor>(&self.lattice_id).await {
|
||||
async fn reap_components(&self, hosts: &HashMap<String, Host>) {
|
||||
let components = match self.store.list::<Component>(&self.lattice_id).await {
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
error!(error = %e, "Error when fetching actors from store. Will retry on next tick");
|
||||
error!(error = %e, "Error when fetching components from store. Will retry on next tick");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let (actors_to_remove, actors_to_update): (HashMap<String, Actor>, HashMap<String, Actor>) =
|
||||
actors
|
||||
.into_iter()
|
||||
.filter_map(|(id, mut actor)| {
|
||||
let current_num_hosts = actor.instances.len();
|
||||
// Only keep the instances where the host exists and the actor is in its map
|
||||
actor
|
||||
.instances
|
||||
.retain(|host_id, _| hosts.get(host_id).map(|host| host.actors.contains_key(&actor.id)).unwrap_or(false));
|
||||
// Now for the remaining instances, make sure the number of instances is equal
|
||||
// to what is observed on the host, otherwise truncate.
|
||||
// NOTE: If for some reason we start using instance IDs, than things will need
|
||||
// to be updated so we can clear the instance ID
|
||||
let mut did_truncate = false;
|
||||
for (host_id, instances) in actor.instances.iter_mut() {
|
||||
if let Some(host) = hosts.get(host_id) {
|
||||
// This unwrap shouldn't happen because we just retained the instances
|
||||
// that have the actor ID in their list. If it does, we unwrap to
|
||||
// current length so it just skips this logic for now
|
||||
let current_num_instances = *host.actors.get(&actor.id).unwrap_or(&instances.len());
|
||||
if instances.len() > current_num_instances {
|
||||
debug!(%id, %host_id, num_instances = %instances.len(), %current_num_instances, "Number of instances for actor is greater than number of instances observed on host. Truncating to correct number");
|
||||
*instances = instances.drain().take(current_num_instances).collect();
|
||||
did_truncate = true;
|
||||
}
|
||||
// If we have less instances than the host, then it just means the host
|
||||
// heartbeat will update them down the line
|
||||
}
|
||||
}
|
||||
// If we got rid of something or truncated instances, that means this needs to update
|
||||
((current_num_hosts != actor.instances.len()) || did_truncate).then_some((id, actor))
|
||||
})
|
||||
.partition(|(_, actor)| actor.instances.is_empty());
|
||||
let (components_to_remove, components_to_update): (
|
||||
HashMap<String, Component>,
|
||||
HashMap<String, Component>,
|
||||
) = components
|
||||
.into_iter()
|
||||
.map(|(id, mut component)| {
|
||||
// Only keep the instances where the host exists and the component is in its map
|
||||
component.instances.retain(|host_id, _| {
|
||||
hosts
|
||||
.get(host_id)
|
||||
.map(|host| host.components.contains_key(&component.id))
|
||||
.unwrap_or(false)
|
||||
});
|
||||
(id, component)
|
||||
})
|
||||
.partition(|(_, component)| component.instances.is_empty());
|
||||
|
||||
debug!(to_remove = %actors_to_remove.len(), to_update = %actors_to_update.len(), "Filtered out list of actors to update and reap");
|
||||
debug!(to_remove = %components_to_remove.len(), to_update = %components_to_update.len(), "Filtered out list of components to update and reap");
|
||||
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.store_many(&self.lattice_id, actors_to_update)
|
||||
.store_many(&self.lattice_id, components_to_update)
|
||||
.await
|
||||
{
|
||||
warn!(error = %e, "Error when storing updated actors. Will retry on next tick");
|
||||
warn!(error = %e, "Error when storing updated components. Will retry on next tick");
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.delete_many::<Actor, _, _>(&self.lattice_id, actors_to_remove.keys())
|
||||
.delete_many::<Component, _, _>(&self.lattice_id, components_to_remove.keys())
|
||||
.await
|
||||
{
|
||||
warn!(error = %e, "Error when deleting actors from store. Will retry on next tick")
|
||||
warn!(error = %e, "Error when deleting components from store. Will retry on next tick")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -219,7 +203,7 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
|
|||
let providers = match self.store.list::<Provider>(&self.lattice_id).await {
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
error!(error = %e, "Error when fetching actors from store. Will retry on next tick");
|
||||
error!(error = %e, "Error when fetching components from store. Will retry on next tick");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
|
@ -264,10 +248,13 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
storage::{ProviderStatus, ReadStore, WadmActorInstance},
|
||||
storage::{ProviderStatus, ReadStore, WadmComponentInfo},
|
||||
test_util::TestStore,
|
||||
};
|
||||
|
||||
|
|
@ -276,9 +263,7 @@ mod test {
|
|||
let store = Arc::new(TestStore::default());
|
||||
|
||||
let lattice_id = "reaper";
|
||||
let actor_id = "testactor";
|
||||
let actor_instance_id_one = "asdasdj-asdada-132123-ffff";
|
||||
let actor_instance_id_two = "123abc-asdada-132123-ffff";
|
||||
let component_id = "testcomponent";
|
||||
let host1_id = "host1";
|
||||
let host2_id = "host2";
|
||||
|
||||
|
|
@ -288,21 +273,23 @@ mod test {
|
|||
lattice_id,
|
||||
[
|
||||
(
|
||||
actor_id.to_string(),
|
||||
Actor {
|
||||
id: actor_id.to_string(),
|
||||
component_id.to_string(),
|
||||
Component {
|
||||
id: component_id.to_string(),
|
||||
instances: HashMap::from([
|
||||
(
|
||||
host1_id.to_string(),
|
||||
HashSet::from_iter([WadmActorInstance::from_id(
|
||||
actor_instance_id_one.to_string(),
|
||||
)]),
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: BTreeMap::default(),
|
||||
count: 1,
|
||||
}]),
|
||||
),
|
||||
(
|
||||
host2_id.to_string(),
|
||||
HashSet::from_iter([WadmActorInstance::from_id(
|
||||
actor_instance_id_two.to_string(),
|
||||
)]),
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: BTreeMap::default(),
|
||||
count: 1,
|
||||
}]),
|
||||
),
|
||||
]),
|
||||
..Default::default()
|
||||
|
|
@ -310,13 +297,14 @@ mod test {
|
|||
),
|
||||
(
|
||||
"idontexist".to_string(),
|
||||
Actor {
|
||||
Component {
|
||||
id: "idontexist".to_string(),
|
||||
instances: HashMap::from([(
|
||||
host1_id.to_string(),
|
||||
HashSet::from_iter([WadmActorInstance::from_id(
|
||||
actor_instance_id_one.to_string(),
|
||||
)]),
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: BTreeMap::default(),
|
||||
count: 1,
|
||||
}]),
|
||||
)]),
|
||||
..Default::default()
|
||||
},
|
||||
|
|
@ -346,7 +334,7 @@ mod test {
|
|||
(
|
||||
host1_id.to_string(),
|
||||
Host {
|
||||
actors: HashMap::from([(actor_id.to_string(), 1)]),
|
||||
components: HashMap::from([(component_id.to_string(), 1)]),
|
||||
providers: HashSet::default(),
|
||||
id: host1_id.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
|
|
@ -356,7 +344,7 @@ mod test {
|
|||
(
|
||||
host2_id.to_string(),
|
||||
Host {
|
||||
actors: HashMap::from([(actor_id.to_string(), 1)]),
|
||||
components: HashMap::from([(component_id.to_string(), 1)]),
|
||||
providers: HashSet::default(),
|
||||
id: host2_id.to_string(),
|
||||
// Make this host stick around for longer
|
||||
|
|
@ -377,14 +365,18 @@ mod test {
|
|||
// Wait for first node to be reaped (two ticks)
|
||||
tokio::time::sleep(wait * 2).await;
|
||||
|
||||
// Now check that the providers, actors, and hosts were reaped
|
||||
// Now check that the providers, components, and hosts were reaped
|
||||
let hosts = store.list::<Host>(lattice_id).await.unwrap();
|
||||
assert_eq!(hosts.len(), 1, "Only one host should be left");
|
||||
let actors = store.list::<Actor>(lattice_id).await.unwrap();
|
||||
assert_eq!(actors.len(), 1, "Only one actor should remain in the store");
|
||||
actors
|
||||
.get(actor_id)
|
||||
.expect("Should have the correct actor in the store");
|
||||
let components = store.list::<Component>(lattice_id).await.unwrap();
|
||||
assert_eq!(
|
||||
components.len(),
|
||||
1,
|
||||
"Only one component should remain in the store"
|
||||
);
|
||||
components
|
||||
.get(component_id)
|
||||
.expect("Should have the correct component in the store");
|
||||
|
||||
assert!(
|
||||
store.list::<Provider>(lattice_id).await.unwrap().is_empty(),
|
||||
|
|
@ -393,13 +385,11 @@ mod test {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stale_actor() {
|
||||
async fn test_stale_component() {
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
let lattice_id = "reaper";
|
||||
let actor_id = "testactor";
|
||||
let actor_instance_id_one = "asdasdj-asdada-132123-ffff";
|
||||
let actor_instance_id_two = "123abc-asdada-132123-ffff";
|
||||
let component_id = "testcomponent";
|
||||
let host1_id = "host1";
|
||||
let host2_id = "host2";
|
||||
|
||||
|
|
@ -407,21 +397,23 @@ mod test {
|
|||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
actor_id.to_string(),
|
||||
Actor {
|
||||
id: actor_id.to_string(),
|
||||
component_id.to_string(),
|
||||
Component {
|
||||
id: component_id.to_string(),
|
||||
instances: HashMap::from([
|
||||
(
|
||||
host1_id.to_string(),
|
||||
HashSet::from_iter([WadmActorInstance::from_id(
|
||||
actor_instance_id_one.to_string(),
|
||||
)]),
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: BTreeMap::default(),
|
||||
count: 1,
|
||||
}]),
|
||||
),
|
||||
(
|
||||
host2_id.to_string(),
|
||||
HashSet::from_iter([WadmActorInstance::from_id(
|
||||
actor_instance_id_two.to_string(),
|
||||
)]),
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: BTreeMap::default(),
|
||||
count: 1,
|
||||
}]),
|
||||
),
|
||||
]),
|
||||
..Default::default()
|
||||
|
|
@ -437,7 +429,7 @@ mod test {
|
|||
(
|
||||
host1_id.to_string(),
|
||||
Host {
|
||||
actors: HashMap::from([(actor_id.to_string(), 1)]),
|
||||
components: HashMap::from([(component_id.to_string(), 1)]),
|
||||
providers: HashSet::default(),
|
||||
id: host1_id.to_string(),
|
||||
last_seen: Utc::now() + Duration::milliseconds(600),
|
||||
|
|
@ -447,7 +439,7 @@ mod test {
|
|||
(
|
||||
host2_id.to_string(),
|
||||
Host {
|
||||
actors: HashMap::default(),
|
||||
components: HashMap::default(),
|
||||
providers: HashSet::default(),
|
||||
id: host2_id.to_string(),
|
||||
last_seen: Utc::now() + Duration::milliseconds(600),
|
||||
|
|
@ -467,18 +459,18 @@ mod test {
|
|||
// Wait for first tick
|
||||
tokio::time::sleep(wait).await;
|
||||
|
||||
// Make sure we only have one instance of the actor left
|
||||
let actors = store.list::<Actor>(lattice_id).await.unwrap();
|
||||
let actor = actors
|
||||
.get(actor_id)
|
||||
.expect("Should have the correct actor in the store");
|
||||
// Make sure we only have one instance of the component left
|
||||
let components = store.list::<Component>(lattice_id).await.unwrap();
|
||||
let component = components
|
||||
.get(component_id)
|
||||
.expect("Should have the correct component in the store");
|
||||
assert_eq!(
|
||||
actor.instances.len(),
|
||||
component.instances.len(),
|
||||
1,
|
||||
"Only one host should remain in instances"
|
||||
);
|
||||
assert_eq!(
|
||||
actor
|
||||
component
|
||||
.instances
|
||||
.get(host1_id)
|
||||
.expect("Should have instance left on the correct host")
|
||||
|
|
@ -0,0 +1,193 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::debug;
|
||||
use wasmcloud_control_interface::Link;
|
||||
use wasmcloud_secrets_types::SecretConfig;
|
||||
|
||||
use crate::storage::{Component, Host, Provider, ReadStore, StateKind};
|
||||
use crate::workers::{ConfigSource, LinkSource, SecretSource};
|
||||
|
||||
// NOTE(thomastaylor312): This type is real ugly and we should probably find a better way to
|
||||
// structure the ReadStore trait so it doesn't have the generic T we have to work around here. This
|
||||
// is essentially a map of "state kind" -> map of ID to partially serialized state. I did try to
|
||||
// implement some sort of getter trait but it has to be generic across T
|
||||
type InMemoryData = HashMap<String, HashMap<String, serde_json::Value>>;
|
||||
|
||||
/// A store and claims/links source implementation that contains a static snapshot of the data that
|
||||
/// can be refreshed periodically. Please note that this is scoped to a specific lattice ID and
|
||||
/// should be constructed separately for each lattice ID.
|
||||
///
|
||||
/// Since configuration is fetched infrequently, and configuration might be large, we instead
|
||||
/// query the configuration source directly when we need it.
|
||||
///
|
||||
/// NOTE: This is a temporary workaround until we get a proper caching store in place
|
||||
pub struct SnapshotStore<S, L> {
|
||||
store: S,
|
||||
lattice_source: L,
|
||||
lattice_id: String,
|
||||
stored_state: Arc<RwLock<InMemoryData>>,
|
||||
links: Arc<RwLock<Vec<Link>>>,
|
||||
}
|
||||
|
||||
impl<S, L> Clone for SnapshotStore<S, L>
|
||||
where
|
||||
S: Clone,
|
||||
L: Clone,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: self.store.clone(),
|
||||
lattice_source: self.lattice_source.clone(),
|
||||
lattice_id: self.lattice_id.clone(),
|
||||
stored_state: self.stored_state.clone(),
|
||||
links: self.links.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, L> SnapshotStore<S, L>
|
||||
where
|
||||
S: ReadStore,
|
||||
L: LinkSource + ConfigSource + SecretSource,
|
||||
{
|
||||
/// Creates a new snapshot store that is scoped to the given lattice ID
|
||||
pub fn new(store: S, lattice_source: L, lattice_id: String) -> Self {
|
||||
Self {
|
||||
store,
|
||||
lattice_source,
|
||||
lattice_id,
|
||||
stored_state: Default::default(),
|
||||
links: Arc::new(RwLock::new(Vec::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Refreshes the snapshotted data, returning an error if it couldn't update the data
|
||||
pub async fn refresh(&self) -> anyhow::Result<()> {
|
||||
// SAFETY: All of these unwraps are safe because we _just_ deserialized from JSON
|
||||
let providers = self
|
||||
.store
|
||||
.list::<Provider>(&self.lattice_id)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(key, val)| (key, serde_json::to_value(val).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let components = self
|
||||
.store
|
||||
.list::<Component>(&self.lattice_id)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(key, val)| (key, serde_json::to_value(val).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let hosts = self
|
||||
.store
|
||||
.list::<Host>(&self.lattice_id)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|(key, val)| (key, serde_json::to_value(val).unwrap()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
// If we fail to get the links, that likely just means the lattice source is down, so we
|
||||
// just fall back on what we have cached
|
||||
if let Ok(links) = self.lattice_source.get_links().await {
|
||||
*self.links.write().await = links;
|
||||
} else {
|
||||
debug!("Failed to get links from lattice source, using cached links");
|
||||
};
|
||||
|
||||
{
|
||||
let mut stored_state = self.stored_state.write().await;
|
||||
stored_state.insert(Provider::KIND.to_owned(), providers);
|
||||
stored_state.insert(Component::KIND.to_owned(), components);
|
||||
stored_state.insert(Host::KIND.to_owned(), hosts);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S, L> ReadStore for SnapshotStore<S, L>
|
||||
where
|
||||
// NOTE(thomastaylor312): We need this bound so we can pass through the error type.
|
||||
S: ReadStore + Send + Sync,
|
||||
L: Send + Sync,
|
||||
{
|
||||
type Error = S::Error;
|
||||
|
||||
// NOTE(thomastaylor312): See other note about the generic T above, but this is hardcore lolsob
|
||||
async fn get<T>(&self, _lattice_id: &str, id: &str) -> Result<Option<T>, Self::Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned + StateKind,
|
||||
{
|
||||
Ok(self
|
||||
.stored_state
|
||||
.read()
|
||||
.await
|
||||
.get(T::KIND)
|
||||
.and_then(|data| {
|
||||
data.get(id).map(|data| {
|
||||
serde_json::from_value::<T>(data.clone()).expect(
|
||||
"Failed to deserialize data from snapshot, this is programmer error",
|
||||
)
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
async fn list<T>(&self, _lattice_id: &str) -> Result<HashMap<String, T>, Self::Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned + StateKind,
|
||||
{
|
||||
Ok(self
|
||||
.stored_state
|
||||
.read()
|
||||
.await
|
||||
.get(T::KIND)
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(key, val)| {
|
||||
(
|
||||
key,
|
||||
serde_json::from_value::<T>(val).expect(
|
||||
"Failed to deserialize data from snapshot, this is programmer error",
|
||||
),
|
||||
)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S, L> LinkSource for SnapshotStore<S, L>
|
||||
where
|
||||
S: Send + Sync,
|
||||
L: Send + Sync,
|
||||
{
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
|
||||
Ok(self.links.read().await.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S, L> ConfigSource for SnapshotStore<S, L>
|
||||
where
|
||||
S: Send + Sync,
|
||||
L: ConfigSource + Send + Sync,
|
||||
{
|
||||
async fn get_config(&self, name: &str) -> anyhow::Result<Option<HashMap<String, String>>> {
|
||||
self.lattice_source.get_config(name).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S, L> SecretSource for SnapshotStore<S, L>
|
||||
where
|
||||
S: Send + Sync,
|
||||
L: SecretSource + Send + Sync,
|
||||
{
|
||||
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>> {
|
||||
self.lattice_source.get_secret(name).await
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,361 @@
|
|||
use std::borrow::{Borrow, ToOwned};
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::StateKind;
|
||||
use crate::events::{ComponentScaled, HostHeartbeat, HostStarted, ProviderInfo, ProviderStarted};
|
||||
|
||||
/// A wasmCloud Capability provider
|
||||
// NOTE: We probably aren't going to use this _right now_ so we've kept it pretty minimal. But it is
|
||||
// possible that we could query wadm for more general data about the lattice in the future, so we do
|
||||
// want to store this
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
||||
pub struct Provider {
|
||||
/// ID of the provider, normally a public nkey
|
||||
pub id: String,
|
||||
|
||||
/// Name of the provider
|
||||
pub name: String,
|
||||
|
||||
/// Issuer of the (signed) provider
|
||||
pub issuer: String,
|
||||
|
||||
/// The reference used to start the provider. Can be empty if it was started from a file
|
||||
pub reference: String,
|
||||
|
||||
/// The hosts this provider is running on
|
||||
pub hosts: HashMap<String, ProviderStatus>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub enum ProviderStatus {
|
||||
/// The provider is starting and hasn't returned a heartbeat yet
|
||||
Pending,
|
||||
/// The provider is running
|
||||
Running,
|
||||
/// The provider failed to start
|
||||
// TODO(thomastaylor312): In the future, we'll probably want to decay out a provider from state
|
||||
// if it hasn't had a heartbeat
|
||||
// if it fails a recent health check
|
||||
Failed,
|
||||
}
|
||||
|
||||
impl Default for ProviderStatus {
|
||||
fn default() -> Self {
|
||||
Self::Pending
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ProviderStatus {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
Self::Pending => "pending".to_string(),
|
||||
Self::Running => "running".to_string(),
|
||||
Self::Failed => "failed".to_string(),
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl StateKind for Provider {
|
||||
const KIND: &'static str = "provider";
|
||||
}
|
||||
|
||||
impl From<ProviderStarted> for Provider {
|
||||
fn from(value: ProviderStarted) -> Self {
|
||||
let (name, issuer) = value.claims.map(|c| (c.name, c.issuer)).unwrap_or_default();
|
||||
Provider {
|
||||
id: value.provider_id,
|
||||
name,
|
||||
issuer,
|
||||
reference: value.image_ref,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ProviderStarted> for Provider {
|
||||
fn from(value: &ProviderStarted) -> Self {
|
||||
Provider {
|
||||
id: value.provider_id.clone(),
|
||||
name: value
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|c| c.name.clone())
|
||||
.unwrap_or_default(),
|
||||
issuer: value
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|c| c.issuer.clone())
|
||||
.unwrap_or_default(),
|
||||
reference: value.image_ref.clone(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A representation of a unique component (as defined by its annotations) and its count. This struct
|
||||
/// has a custom implementation of PartialEq and Hash that _only_ compares the annotations. This is
|
||||
/// not a very "pure" way of doing things, but it lets us access current counts of components without
|
||||
/// having to do a bunch of extra work.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, Eq)]
|
||||
pub struct WadmComponentInfo {
|
||||
pub annotations: BTreeMap<String, String>,
|
||||
pub count: usize,
|
||||
}
|
||||
|
||||
impl PartialEq for WadmComponentInfo {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.annotations == other.annotations
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for WadmComponentInfo {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.annotations.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<BTreeMap<String, String>> for WadmComponentInfo {
|
||||
fn borrow(&self) -> &BTreeMap<String, String> {
|
||||
&self.annotations
|
||||
}
|
||||
}
|
||||
|
||||
/// A wasmCloud Component
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
||||
pub struct Component {
|
||||
/// ID of the component
|
||||
pub id: String,
|
||||
|
||||
/// Name of the component
|
||||
pub name: String,
|
||||
|
||||
/// Issuer of the (signed) component
|
||||
pub issuer: String,
|
||||
|
||||
/// All instances of this component running in the lattice, keyed by the host ID and contains a hash
|
||||
/// map of annotations -> count for each set of unique annotations
|
||||
pub instances: HashMap<String, HashSet<WadmComponentInfo>>,
|
||||
|
||||
/// The reference used to start the component. Can be empty if it was started from a file
|
||||
pub reference: String,
|
||||
}
|
||||
|
||||
impl Component {
|
||||
/// A helper method that returns the total count of running copies of this component, regardless of
|
||||
/// which host they are running on
|
||||
pub fn count(&self) -> usize {
|
||||
self.instances
|
||||
.values()
|
||||
.map(|instances| instances.iter().map(|info| info.count).sum::<usize>())
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// A helper method that returns the total count of running copies of this component on a specific
|
||||
/// host
|
||||
pub fn count_for_host(&self, host_id: &str) -> usize {
|
||||
self.instances
|
||||
.get(host_id)
|
||||
.map(|instances| instances.iter().map(|info| info.count).sum::<usize>())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
impl StateKind for Component {
|
||||
const KIND: &'static str = "component";
|
||||
}
|
||||
|
||||
impl From<ComponentScaled> for Component {
|
||||
fn from(value: ComponentScaled) -> Self {
|
||||
let (name, issuer) = value.claims.map(|c| (c.name, c.issuer)).unwrap_or_default();
|
||||
Component {
|
||||
id: value.component_id,
|
||||
name,
|
||||
issuer,
|
||||
reference: value.image_ref,
|
||||
instances: HashMap::from_iter([(
|
||||
value.host_id,
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: value.annotations,
|
||||
count: value.max_instances,
|
||||
}]),
|
||||
)]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ComponentScaled> for Component {
|
||||
fn from(value: &ComponentScaled) -> Self {
|
||||
Component {
|
||||
id: value.component_id.clone(),
|
||||
name: value
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|c| c.name.clone())
|
||||
.unwrap_or_default(),
|
||||
issuer: value
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|c| c.issuer.clone())
|
||||
.unwrap_or_default(),
|
||||
reference: value.image_ref.clone(),
|
||||
instances: HashMap::from_iter([(
|
||||
value.host_id.clone(),
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
annotations: value.annotations.clone(),
|
||||
count: value.max_instances,
|
||||
}]),
|
||||
)]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A wasmCloud host
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
||||
pub struct Host {
|
||||
/// A map of component IDs to the number of instances of the component running on the host
|
||||
#[serde(alias = "actors")]
|
||||
pub components: HashMap<String, usize>,
|
||||
|
||||
/// The randomly generated friendly name of the host
|
||||
pub friendly_name: String,
|
||||
|
||||
/// An arbitrary hashmap of string labels attached to the host
|
||||
pub labels: HashMap<String, String>,
|
||||
|
||||
/// A set of running providers on the host
|
||||
pub providers: HashSet<ProviderInfo>,
|
||||
|
||||
/// The current uptime of the host in seconds
|
||||
pub uptime_seconds: usize,
|
||||
|
||||
/// The host version that is running
|
||||
// NOTE(thomastaylor312): Right now a host started event doesn't emit the version, so a newly
|
||||
// started host can't be registered with one. We should probably add that to the host started
|
||||
// event and then modify it here
|
||||
pub version: Option<Version>,
|
||||
|
||||
/// The ID of this host, in the form of its nkey encoded public key
|
||||
pub id: String,
|
||||
|
||||
/// The time when this host was last seen, as a RFC3339 timestamp
|
||||
pub last_seen: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl StateKind for Host {
|
||||
const KIND: &'static str = "host";
|
||||
}
|
||||
|
||||
impl From<HostStarted> for Host {
|
||||
fn from(value: HostStarted) -> Self {
|
||||
Host {
|
||||
friendly_name: value.friendly_name,
|
||||
id: value.id,
|
||||
labels: value.labels,
|
||||
last_seen: Utc::now(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&HostStarted> for Host {
|
||||
fn from(value: &HostStarted) -> Self {
|
||||
Host {
|
||||
friendly_name: value.friendly_name.clone(),
|
||||
id: value.id.clone(),
|
||||
labels: value.labels.clone(),
|
||||
last_seen: Utc::now(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HostHeartbeat> for Host {
|
||||
fn from(value: HostHeartbeat) -> Self {
|
||||
let components = value
|
||||
.components
|
||||
.into_iter()
|
||||
.map(|component| {
|
||||
(
|
||||
component.id().into(), // SAFETY: Unlikely to not fit into a usize, but fallback just in case
|
||||
component.max_instances().try_into().unwrap_or(usize::MAX),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let providers = value
|
||||
.providers
|
||||
.into_iter()
|
||||
.map(|provider| ProviderInfo {
|
||||
provider_id: provider.id().to_string(),
|
||||
// NOTE: Provider should _always_ have an image ref. The control interface type should be updated.
|
||||
provider_ref: provider.image_ref().map(String::from).unwrap_or_default(),
|
||||
annotations: provider
|
||||
.annotations()
|
||||
.map(ToOwned::to_owned)
|
||||
.map(BTreeMap::from_iter)
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Host {
|
||||
components,
|
||||
friendly_name: value.friendly_name,
|
||||
labels: value.labels,
|
||||
providers,
|
||||
uptime_seconds: value.uptime_seconds as usize,
|
||||
version: Some(value.version),
|
||||
id: value.host_id,
|
||||
last_seen: Utc::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&HostHeartbeat> for Host {
|
||||
fn from(value: &HostHeartbeat) -> Self {
|
||||
let components = value
|
||||
.components
|
||||
.iter()
|
||||
.map(|component| {
|
||||
(
|
||||
component.id().to_owned(),
|
||||
// SAFETY: Unlikely to not fit into a usize, but fallback just in case
|
||||
component.max_instances().try_into().unwrap_or(usize::MAX),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let providers = value
|
||||
.providers
|
||||
.iter()
|
||||
.map(|provider| ProviderInfo {
|
||||
provider_id: provider.id().to_owned(),
|
||||
provider_ref: provider.image_ref().map(String::from).unwrap_or_default(),
|
||||
annotations: provider
|
||||
.annotations()
|
||||
.map(ToOwned::to_owned)
|
||||
.map(BTreeMap::from_iter)
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Host {
|
||||
components,
|
||||
friendly_name: value.friendly_name.clone(),
|
||||
labels: value.labels.clone(),
|
||||
providers,
|
||||
uptime_seconds: value.uptime_seconds as usize,
|
||||
version: Some(value.version.clone()),
|
||||
id: value.host_id.clone(),
|
||||
last_seen: Utc::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3,11 +3,15 @@ use std::{collections::HashMap, sync::Arc};
|
|||
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use wasmcloud_control_interface::{HostInventory, LinkDefinition};
|
||||
use wasmcloud_control_interface::{HostInventory, Link};
|
||||
use wasmcloud_secrets_types::SecretConfig;
|
||||
|
||||
use crate::publisher::Publisher;
|
||||
use crate::storage::StateKind;
|
||||
use crate::workers::{Claims, ClaimsSource, InventorySource, LinkSource};
|
||||
use crate::workers::{
|
||||
secret_config_from_map, Claims, ClaimsSource, ConfigSource, InventorySource, LinkSource,
|
||||
SecretSource,
|
||||
};
|
||||
|
||||
fn generate_key<T: StateKind>(lattice_id: &str) -> String {
|
||||
format!("{}_{lattice_id}", T::KIND)
|
||||
|
|
@ -58,7 +62,7 @@ impl crate::storage::ReadStore for TestStore {
|
|||
impl crate::storage::Store for TestStore {
|
||||
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
|
||||
D: IntoIterator<Item = (String, T)> + Send,
|
||||
{
|
||||
let key = generate_key::<T>(lattice_id);
|
||||
|
|
@ -79,7 +83,7 @@ impl crate::storage::Store for TestStore {
|
|||
|
||||
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + StateKind + Send,
|
||||
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
|
||||
D: IntoIterator<Item = K> + Send,
|
||||
K: AsRef<str>,
|
||||
{
|
||||
|
|
@ -107,7 +111,8 @@ impl crate::storage::Store for TestStore {
|
|||
pub struct TestLatticeSource {
|
||||
pub claims: HashMap<String, Claims>,
|
||||
pub inventory: Arc<RwLock<HashMap<String, HostInventory>>>,
|
||||
pub links: Vec<LinkDefinition>,
|
||||
pub links: Vec<Link>,
|
||||
pub config: HashMap<String, HashMap<String, String>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
|
|
@ -126,11 +131,30 @@ impl InventorySource for TestLatticeSource {
|
|||
|
||||
#[async_trait::async_trait]
|
||||
impl LinkSource for TestLatticeSource {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<LinkDefinition>> {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
|
||||
Ok(self.links.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ConfigSource for TestLatticeSource {
|
||||
async fn get_config(&self, name: &str) -> anyhow::Result<Option<HashMap<String, String>>> {
|
||||
Ok(self.config.get(name).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl SecretSource for TestLatticeSource {
|
||||
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>> {
|
||||
let secret_config = self
|
||||
.get_config(format!("secret_{name}").as_str())
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
|
||||
|
||||
secret_config.map(secret_config_from_map).transpose()
|
||||
}
|
||||
}
|
||||
|
||||
/// A publisher that does nothing
|
||||
#[derive(Clone, Default)]
|
||||
pub struct NoopPublisher;
|
||||
|
|
@ -6,7 +6,6 @@ use crate::{
|
|||
manager::{WorkError, WorkResult, Worker},
|
||||
ScopedMessage,
|
||||
},
|
||||
model::CapabilityConfig,
|
||||
};
|
||||
|
||||
use super::insert_managed_annotations;
|
||||
|
|
@ -31,31 +30,19 @@ impl Worker for CommandWorker {
|
|||
#[instrument(level = "trace", skip_all)]
|
||||
async fn do_work(&self, mut message: ScopedMessage<Self::Message>) -> WorkResult<()> {
|
||||
let res = match message.as_ref() {
|
||||
Command::StartActor(actor) => {
|
||||
trace!(command = ?actor, "Handling start actor command");
|
||||
Command::ScaleComponent(component) => {
|
||||
trace!(command = ?component, "Handling scale component command");
|
||||
// Order here is intentional to prevent scalers from overwriting managed annotations
|
||||
let mut annotations = actor.annotations.clone();
|
||||
insert_managed_annotations(&mut annotations, &actor.model_name);
|
||||
let mut annotations = component.annotations.clone();
|
||||
insert_managed_annotations(&mut annotations, &component.model_name);
|
||||
self.client
|
||||
.start_actor(
|
||||
&actor.host_id,
|
||||
&actor.reference,
|
||||
actor.count as u16,
|
||||
Some(annotations),
|
||||
)
|
||||
.await
|
||||
}
|
||||
Command::StopActor(actor) => {
|
||||
trace!(command = ?actor, "Handling stop actor command");
|
||||
// Order here is intentional to prevent scalers from overwriting managed annotations
|
||||
let mut annotations = actor.annotations.clone();
|
||||
insert_managed_annotations(&mut annotations, &actor.model_name);
|
||||
self.client
|
||||
.stop_actor(
|
||||
&actor.host_id,
|
||||
&actor.actor_id,
|
||||
actor.count as u16,
|
||||
Some(annotations),
|
||||
.scale_component(
|
||||
&component.host_id,
|
||||
&component.reference,
|
||||
&component.component_id,
|
||||
component.count,
|
||||
Some(annotations.into_iter().collect()),
|
||||
component.config.clone(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
|
@ -64,21 +51,13 @@ impl Worker for CommandWorker {
|
|||
// Order here is intentional to prevent scalers from overwriting managed annotations
|
||||
let mut annotations = prov.annotations.clone();
|
||||
insert_managed_annotations(&mut annotations, &prov.model_name);
|
||||
let config = prov.config.clone().map(|conf| match conf {
|
||||
// NOTE: We validate the serialization when we store the model so this should be
|
||||
// safe to unwrap
|
||||
CapabilityConfig::Json(conf) => {
|
||||
serde_json::to_string(&conf).unwrap_or_default()
|
||||
}
|
||||
CapabilityConfig::Opaque(conf) => conf,
|
||||
});
|
||||
self.client
|
||||
.start_provider(
|
||||
&prov.host_id,
|
||||
&prov.reference,
|
||||
prov.link_name.clone(),
|
||||
Some(annotations),
|
||||
config,
|
||||
&prov.provider_id,
|
||||
Some(annotations.into_iter().collect()),
|
||||
prov.config.clone(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
|
@ -88,44 +67,51 @@ impl Worker for CommandWorker {
|
|||
let mut annotations = prov.annotations.clone();
|
||||
insert_managed_annotations(&mut annotations, &prov.model_name);
|
||||
self.client
|
||||
.stop_provider(
|
||||
&prov.host_id,
|
||||
&prov.provider_id,
|
||||
prov.link_name
|
||||
.as_deref()
|
||||
.unwrap_or(crate::DEFAULT_LINK_NAME),
|
||||
&prov.contract_id,
|
||||
Some(annotations),
|
||||
)
|
||||
.stop_provider(&prov.host_id, &prov.provider_id)
|
||||
.await
|
||||
}
|
||||
Command::PutLinkdef(ld) => {
|
||||
Command::PutLink(ld) => {
|
||||
trace!(command = ?ld, "Handling put linkdef command");
|
||||
// TODO(thomastaylor312): We should probably change ScopedMessage to allow us `pub`
|
||||
// access to the inner type so we don't have to clone, but no need to worry for now
|
||||
self.client.put_link(ld.clone().try_into()?).await
|
||||
}
|
||||
Command::DeleteLink(ld) => {
|
||||
trace!(command = ?ld, "Handling delete linkdef command");
|
||||
self.client
|
||||
.advertise_link(
|
||||
&ld.actor_id,
|
||||
&ld.provider_id,
|
||||
&ld.contract_id,
|
||||
.delete_link(
|
||||
&ld.source_id,
|
||||
&ld.link_name,
|
||||
ld.values.clone(),
|
||||
&ld.wit_namespace,
|
||||
&ld.wit_package,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Command::DeleteLinkdef(ld) => {
|
||||
trace!(command = ?ld, "Handling delete linkdef command");
|
||||
Command::PutConfig(put_config) => {
|
||||
trace!(command = ?put_config, "Handling put config command");
|
||||
self.client
|
||||
.remove_link(&ld.actor_id, &ld.contract_id, &ld.link_name)
|
||||
.put_config(&put_config.config_name, put_config.config.clone())
|
||||
.await
|
||||
}
|
||||
Command::DeleteConfig(delete_config) => {
|
||||
trace!(command = ?delete_config, "Handling delete config command");
|
||||
self.client.delete_config(&delete_config.config_name).await
|
||||
}
|
||||
}
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"));
|
||||
|
||||
if let Err(e) = res {
|
||||
message.nack().await;
|
||||
return Err(WorkError::Other(e.into()));
|
||||
match res {
|
||||
Ok(ack) if !ack.succeeded() => {
|
||||
message.nack().await;
|
||||
Err(WorkError::Other(
|
||||
anyhow::anyhow!("{}", ack.message()).into(),
|
||||
))
|
||||
}
|
||||
Ok(_) => message.ack().await.map_err(WorkError::from),
|
||||
Err(e) => {
|
||||
message.nack().await;
|
||||
Err(WorkError::Other(e.into()))
|
||||
}
|
||||
}
|
||||
message.ack().await.map_err(WorkError::from)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,324 @@
|
|||
use anyhow::{bail, Context};
|
||||
use async_nats::jetstream::stream::Stream;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::fmt::Debug;
|
||||
use wasmcloud_secrets_types::SecretConfig;
|
||||
|
||||
use tracing::{debug, instrument, trace, warn};
|
||||
use wadm_types::api::Status;
|
||||
use wasmcloud_control_interface::{HostInventory, Link};
|
||||
|
||||
use crate::{commands::Command, publisher::Publisher, APP_SPEC_ANNOTATION};
|
||||
|
||||
/// A subset of needed claims to help populate state
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Claims {
|
||||
pub name: String,
|
||||
pub capabilities: Vec<String>,
|
||||
pub issuer: String,
|
||||
}
|
||||
|
||||
/// A trait for anything that can fetch a set of claims information about components.
|
||||
///
|
||||
/// NOTE: This trait right now exists as a convenience for two things: First, testing. Without
|
||||
/// something like this we require a network connection to unit test. Second, there is no concrete
|
||||
/// claims type returned from the control interface client. This allows us to abstract that away
|
||||
/// until such time that we do export one and we'll be able to do so without breaking our API
|
||||
#[async_trait::async_trait]
|
||||
pub trait ClaimsSource {
|
||||
async fn get_claims(&self) -> anyhow::Result<HashMap<String, Claims>>;
|
||||
}
|
||||
|
||||
/// NOTE(brooksmtownsend): This trait exists in order to query the hosts inventory
|
||||
/// upon receiving a heartbeat since the heartbeat doesn't contain enough
|
||||
/// information to properly update the stored data for components
|
||||
#[async_trait::async_trait]
|
||||
pub trait InventorySource {
|
||||
async fn get_inventory(&self, host_id: &str) -> anyhow::Result<HostInventory>;
|
||||
}
|
||||
|
||||
/// A trait for anything that can fetch the links in a lattice
|
||||
///
|
||||
/// NOTE: This trait right now exists as a convenience for testing. It isn't ideal to have this just
|
||||
/// due to testing, but it does allow us to abstract away the concrete type of the client
|
||||
#[async_trait::async_trait]
|
||||
pub trait LinkSource {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>>;
|
||||
}
|
||||
|
||||
/// A trait for anything that can fetch a piece of named configuration
|
||||
///
|
||||
/// In the future this could be expanded to fetch more than just a single piece of configuration,
|
||||
/// but for now it's limited to a single config in an attempt to keep the scope of fetching
|
||||
/// configuration small, and efficiently pass around data.
|
||||
#[async_trait::async_trait]
|
||||
pub trait ConfigSource {
|
||||
async fn get_config(&self, name: &str) -> anyhow::Result<Option<HashMap<String, String>>>;
|
||||
}
|
||||
|
||||
/// A trait for anything that can fetch a secret.
|
||||
#[async_trait::async_trait]
|
||||
pub trait SecretSource {
|
||||
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>>;
|
||||
}
|
||||
|
||||
/// Converts the configuration map of strings to a secret config
|
||||
pub fn secret_config_from_map(map: HashMap<String, String>) -> anyhow::Result<SecretConfig> {
|
||||
match (
|
||||
map.get("name"),
|
||||
map.get("backend"),
|
||||
map.get("key"),
|
||||
map.get("policy"),
|
||||
map.get("type"),
|
||||
) {
|
||||
(None, _, _, _, _) => bail!("missing name field in secret config"),
|
||||
(_, None, _, _, _) => bail!("missing backend field in secret config"),
|
||||
(_, _, None, _, _) => bail!("missing key field in secret config"),
|
||||
(_, _, _, None, _) => bail!("missing policy field in secret config"),
|
||||
(_, _, _, _, None) => bail!("missing type field in secret config"),
|
||||
(Some(name), Some(backend), Some(key), Some(policy), Some(secret_type)) => {
|
||||
Ok(SecretConfig {
|
||||
name: name.to_string(),
|
||||
backend: backend.to_string(),
|
||||
key: key.to_string(),
|
||||
field: map.get("field").map(|f| f.to_string()),
|
||||
version: map.get("version").map(|v| v.to_string()),
|
||||
policy: serde_json::from_str(policy)
|
||||
.context("failed to deserialize policy from string")?,
|
||||
secret_type: secret_type.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ClaimsSource for wasmcloud_control_interface::Client {
|
||||
async fn get_claims(&self) -> anyhow::Result<HashMap<String, Claims>> {
|
||||
match self.get_claims().await.map_err(|e| anyhow::anyhow!("{e}")) {
|
||||
Ok(ctl_resp) if ctl_resp.succeeded() => {
|
||||
let claims = ctl_resp.data().context("missing claims data")?.to_owned();
|
||||
Ok(claims
|
||||
.into_iter()
|
||||
.filter_map(|mut claim| {
|
||||
// NOTE(thomastaylor312): I'm removing instead of getting since we own the data and I
|
||||
// don't want to clone every time we do this
|
||||
|
||||
// If we don't find a subject, we can't actually get the component ID, so skip this one
|
||||
Some((
|
||||
claim.remove("sub")?,
|
||||
Claims {
|
||||
name: claim.remove("name").unwrap_or_default(),
|
||||
capabilities: claim
|
||||
.remove("caps")
|
||||
.map(|raw| raw.split(',').map(|s| s.to_owned()).collect())
|
||||
.unwrap_or_default(),
|
||||
issuer: claim.remove("iss").unwrap_or_default(),
|
||||
},
|
||||
))
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
_ => Err(anyhow::anyhow!("Failed to get claims")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl InventorySource for wasmcloud_control_interface::Client {
|
||||
async fn get_inventory(&self, host_id: &str) -> anyhow::Result<HostInventory> {
|
||||
match self
|
||||
.get_host_inventory(host_id)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => Ok(ctl_resp
|
||||
.into_data()
|
||||
.context("missing host inventory data")?),
|
||||
ctl_resp => Err(anyhow::anyhow!(
|
||||
"Failed to get inventory for host {host_id}, {}",
|
||||
ctl_resp.message()
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE(thomastaylor312): A future improvement here that would make things more efficient is if this
|
||||
// was just a cache of the links. On startup, it could fetch once, and then it could subscribe to
|
||||
// the KV store for updates. This would allow us to not have to fetch every time we need to get
|
||||
// links
|
||||
#[async_trait::async_trait]
|
||||
impl LinkSource for wasmcloud_control_interface::Client {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
|
||||
match self
|
||||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
|
||||
Ok(ctl_resp.into_data().context("missing link data")?)
|
||||
}
|
||||
ctl_resp => Err(anyhow::anyhow!(
|
||||
"Failed to get links, {}",
|
||||
ctl_resp.message()
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ConfigSource for wasmcloud_control_interface::Client {
|
||||
async fn get_config(&self, name: &str) -> anyhow::Result<Option<HashMap<String, String>>> {
|
||||
match self
|
||||
.get_config(name)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
|
||||
Ok(ctl_resp.into_data())
|
||||
}
|
||||
// TODO(https://github.com/wasmCloud/wasmCloud/issues/1906): The control interface should return a None when config isn't found
|
||||
// instead of returning an error.
|
||||
ctl_resp => {
|
||||
debug!("Failed to get config for {name}, {}", ctl_resp.message());
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl SecretSource for wasmcloud_control_interface::Client {
|
||||
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>> {
|
||||
match self
|
||||
.get_config(name)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
|
||||
secret_config_from_map(ctl_resp.into_data().context("missing secret data")?)
|
||||
.map(Some)
|
||||
}
|
||||
ctl_resp if ctl_resp.data().is_none() => {
|
||||
debug!("Failed to get secret for {name}, {}", ctl_resp.message());
|
||||
Ok(None)
|
||||
}
|
||||
ctl_resp => {
|
||||
debug!("Failed to get secret for {name}, {}", ctl_resp.message());
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct for publishing status updates
|
||||
#[derive(Clone)]
|
||||
pub struct StatusPublisher<Pub> {
|
||||
publisher: Pub,
|
||||
// Stream for querying current status to avoid duplicate updates
|
||||
status_stream: Option<Stream>,
|
||||
// Topic prefix, e.g. wadm.status.default
|
||||
topic_prefix: String,
|
||||
}
|
||||
|
||||
impl<Pub> StatusPublisher<Pub> {
|
||||
/// Creates an new status publisher configured with the given publisher that will send to the
|
||||
/// manifest status topic using the given prefix
|
||||
pub fn new(
|
||||
publisher: Pub,
|
||||
status_stream: Option<Stream>,
|
||||
topic_prefix: &str,
|
||||
) -> StatusPublisher<Pub> {
|
||||
StatusPublisher {
|
||||
publisher,
|
||||
status_stream,
|
||||
topic_prefix: topic_prefix.to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Pub: Publisher> StatusPublisher<Pub> {
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn publish_status(&self, name: &str, status: Status) -> anyhow::Result<()> {
|
||||
let topic = format!("{}.{name}", self.topic_prefix);
|
||||
|
||||
// NOTE(brooksmtownsend): This direct get may not always query the jetstream leader. In the
|
||||
// worst case where the last message isn't all the way updated, we may publish a duplicate
|
||||
// status. This is an acceptable tradeoff to not have to query the leader directly every time.
|
||||
let prev_status = if let Some(status_stream) = &self.status_stream {
|
||||
status_stream
|
||||
.direct_get_last_for_subject(&topic)
|
||||
.await
|
||||
.map(|m| serde_json::from_slice::<Status>(&m.payload).ok())
|
||||
.ok()
|
||||
.flatten()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match prev_status {
|
||||
// If the status hasn't changed, skip publishing
|
||||
Some(prev_status) if prev_status == status => {
|
||||
trace!(%name, "Status hasn't changed since last update. Skipping");
|
||||
Ok(())
|
||||
}
|
||||
_ => {
|
||||
self.publisher
|
||||
.publish(serde_json::to_vec(&status)?, Some(&topic))
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct for publishing commands
|
||||
#[derive(Clone)]
|
||||
pub struct CommandPublisher<Pub> {
|
||||
publisher: Pub,
|
||||
topic: String,
|
||||
}
|
||||
|
||||
impl<Pub> CommandPublisher<Pub> {
|
||||
/// Creates an new command publisher configured with the given publisher that will send to the
|
||||
/// specified topic
|
||||
pub fn new(publisher: Pub, topic: &str) -> CommandPublisher<Pub> {
|
||||
CommandPublisher {
|
||||
publisher,
|
||||
topic: topic.to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Pub: Publisher> CommandPublisher<Pub> {
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn publish_commands(&self, commands: Vec<Command>) -> anyhow::Result<()> {
|
||||
futures::future::join_all(
|
||||
commands
|
||||
.into_iter()
|
||||
// Generally commands are purely internal to wadm and so shouldn't have an error serializing. If it does, warn and continue onward
|
||||
.filter_map(|command| {
|
||||
match serde_json::to_vec(&command) {
|
||||
Ok(data) => Some(data),
|
||||
Err(e) => {
|
||||
warn!(error = %e, ?command, "Got malformed command when trying to serialize. Skipping this command");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.map(|data| self.publisher.publish(data, Some(&self.topic))),
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<anyhow::Result<()>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts managed annotations to the given `annotations` HashMap.
|
||||
pub fn insert_managed_annotations(annotations: &mut BTreeMap<String, String>, model_name: &str) {
|
||||
annotations.extend([
|
||||
(
|
||||
crate::MANAGED_BY_ANNOTATION.to_owned(),
|
||||
crate::MANAGED_BY_IDENTIFIER.to_owned(),
|
||||
),
|
||||
(APP_SPEC_ANNOTATION.to_owned(), model_name.to_owned()),
|
||||
])
|
||||
}
|
||||
|
|
@ -7,5 +7,6 @@ mod event;
|
|||
mod event_helpers;
|
||||
|
||||
pub use command::CommandWorker;
|
||||
pub(crate) use event::get_commands_and_result;
|
||||
pub use event::EventWorker;
|
||||
pub use event_helpers::*;
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 677 B |
|
|
@ -0,0 +1,704 @@
|
|||
{
|
||||
"nodes": {
|
||||
"advisory-db": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1737565911,
|
||||
"narHash": "sha256-WxIWw1mSPJVU1JfIcTdIubU5UoIwwR8h7UcXop/6htg=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "ffa26704690a3dc403edcd94baef103ee48f66eb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"advisory-db_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1730464311,
|
||||
"narHash": "sha256-9xJoP1766XJSO1Qr0Lxg2P6dwPncTr3BJYlFMSXBd/E=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "f3460e5ed91658ab94fa41908cfa44991f9f4f02",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1737689766,
|
||||
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730652660,
|
||||
"narHash": "sha256-+XVYfmVXAiYA0FZT7ijHf555dxCe+AoAT5A6RU+6vSo=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "a4ca93905455c07cb7e3aca95d4faf7601cba458",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_3": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"rust-overlay"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679255352,
|
||||
"narHash": "sha256-nkGwGuNkhNrnN33S4HIDV5NzkzMLU5mNStRn9sZwq8c=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "crane",
|
||||
"rev": "cec65880599a4ec6426186e24342e663464f5933",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"ref": "feat/wit",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": []
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738132439,
|
||||
"narHash": "sha256-7q5vsyPQf6/aQEKAOgZ4ggv++Z2ppPSuPCGKlbPcM88=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "f94e521c1922784c377a2cace90aa89a6b8a1011",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nixpkgs-nixos"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731047492,
|
||||
"narHash": "sha256-F4h8YtTzPWv0/1Z6fc8fMSqKpn7YhOjlgp66cr15tEo=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "da6332e801fbb0418f80f20cefa947c5fe5c18c9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix_3": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679552560,
|
||||
"narHash": "sha256-L9Se/F1iLQBZFGrnQJO8c9wE5z0Mf8OiycPGP9Y96hA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "fb49a9f5605ec512da947a21cc7e4551a3950397",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1726560853,
|
||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_3": {
|
||||
"locked": {
|
||||
"lastModified": 1678901627,
|
||||
"narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"macos-sdk": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1694769349,
|
||||
"narHash": "sha256-TEvVJy+NMPyzgWSk/6S29ZMQR+ICFxSdS3tw247uhFc=",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1730207686,
|
||||
"narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "776e68c1d014c3adde193a18db9d738458cd2ba4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter_2": {
|
||||
"locked": {
|
||||
"lastModified": 1678109515,
|
||||
"narHash": "sha256-C2X+qC80K2C1TOYZT8nabgo05Dw2HST/pSn6s+n6BO8=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "aa9ff6ce4a7f19af6415fb3721eaa513ea6c763c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-flake-tests": {
|
||||
"locked": {
|
||||
"lastModified": 1677844186,
|
||||
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-flake-tests_2": {
|
||||
"locked": {
|
||||
"lastModified": 1677844186,
|
||||
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-log": {
|
||||
"inputs": {
|
||||
"nix-flake-tests": "nix-flake-tests",
|
||||
"nixify": "nixify_2",
|
||||
"nixlib": "nixlib_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681933283,
|
||||
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-log_2": {
|
||||
"inputs": {
|
||||
"nix-flake-tests": "nix-flake-tests_2",
|
||||
"nixify": [
|
||||
"wasmcloud",
|
||||
"wit-deps",
|
||||
"nixify"
|
||||
],
|
||||
"nixlib": [
|
||||
"wasmcloud",
|
||||
"wit-deps",
|
||||
"nixlib"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681933283,
|
||||
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixify": {
|
||||
"inputs": {
|
||||
"advisory-db": "advisory-db_2",
|
||||
"crane": "crane_2",
|
||||
"fenix": "fenix_2",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"macos-sdk": "macos-sdk",
|
||||
"nix-filter": "nix-filter",
|
||||
"nix-log": "nix-log",
|
||||
"nixlib": [
|
||||
"wasmcloud",
|
||||
"nixlib"
|
||||
],
|
||||
"nixpkgs-darwin": "nixpkgs-darwin",
|
||||
"nixpkgs-nixos": "nixpkgs-nixos",
|
||||
"rust-overlay": "rust-overlay_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731068753,
|
||||
"narHash": "sha256-6H+vYAYl/koFsiBEM4WHZhOoOQ2Hfzd+MtcxFfAOOtw=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"rev": "7b83953ebfb22ba1f623ac06312aebee81f2182e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixify_2": {
|
||||
"inputs": {
|
||||
"crane": "crane_3",
|
||||
"fenix": "fenix_3",
|
||||
"flake-utils": "flake-utils_3",
|
||||
"nix-filter": "nix-filter_2",
|
||||
"nixlib": "nixlib",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679748566,
|
||||
"narHash": "sha256-yA4yIJjNCOLoUh0py9S3SywwbPnd/6NPYbXad+JeOl0=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"rev": "80e823959511a42dfec4409fef406a14ae8240f3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib": {
|
||||
"locked": {
|
||||
"lastModified": 1679187309,
|
||||
"narHash": "sha256-H8udmkg5wppL11d/05MMzOMryiYvc403axjDNZy1/TQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "44214417fe4595438b31bdb9469be92536a61455",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib_2": {
|
||||
"locked": {
|
||||
"lastModified": 1679791877,
|
||||
"narHash": "sha256-tTV1Mf0hPWIMtqyU16Kd2JUBDWvfHlDC9pF57vcbgpQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "cc060ddbf652a532b54057081d5abd6144d01971",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib_3": {
|
||||
"locked": {
|
||||
"lastModified": 1731200463,
|
||||
"narHash": "sha256-qDaAweJjdFbVExqs8aG27urUgcgKufkIngHW3Rzustg=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "e04234d263750db01c78a412690363dc2226e68a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1738163270,
|
||||
"narHash": "sha256-B/7Y1v4y+msFFBW1JAdFjNvVthvNdJKiN6EGRPnqfno=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "59e618d90c065f55ae48446f307e8c09565d5ab0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "release-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-darwin": {
|
||||
"locked": {
|
||||
"lastModified": 1730891215,
|
||||
"narHash": "sha256-i85DPrhDuvzgvIWCpJlbfM2UFtNYbapo20MtQXsvay4=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c128e44a249d6180740d0a979b6480d5b795c013",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixpkgs-24.05-darwin",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-nixos": {
|
||||
"locked": {
|
||||
"lastModified": 1730883749,
|
||||
"narHash": "sha256-mwrFF0vElHJP8X3pFCByJR365Q2463ATp2qGIrDUdlE=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "dba414932936fde69f0606b4f1d87c5bc0003ede",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1679577639,
|
||||
"narHash": "sha256-7u7bsNP0ApBnLgsHVROQ5ytoMqustmMVMgtaFS/P7EU=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8f1bcd72727c5d4cd775545595d068be410f2a7e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixpkgs-22.11-darwin",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"advisory-db": "advisory-db",
|
||||
"crane": "crane",
|
||||
"fenix": "fenix",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"wasmcloud": "wasmcloud"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1730989300,
|
||||
"narHash": "sha256-ZWSta9893f/uF5PoRFn/BSUAxF4dKW+TIbdA6rZoGBg=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "1042a8c22c348491a4bade4f664430b03d6f5b5c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1679520343,
|
||||
"narHash": "sha256-AJGSGWRfoKWD5IVTu1wEsR990wHbX0kIaolPqNMEh0c=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "eb791f31e688ae00908eb75d4c704ef60c430a92",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679537973,
|
||||
"narHash": "sha256-R6borgcKeyMIjjPeeYsfo+mT8UdS+OwwbhhStdCfEjg=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "fbc7ae3f14d32e78c0e8d7865f865cc28a46b232",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nixpkgs-nixos"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731032894,
|
||||
"narHash": "sha256-dQSyYPmrQiPr+PGEd+K8038rubFGz7G/dNXVeaGWE0w=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "d52f2a4c103a0acf09ded857b9e2519ae2360e59",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"wasmcloud": {
|
||||
"inputs": {
|
||||
"nixify": "nixify",
|
||||
"nixlib": "nixlib_3",
|
||||
"wit-deps": "wit-deps"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731409523,
|
||||
"narHash": "sha256-Q/BnuJaMyJfY+p9VpdyBWtRjEo4TdRvFMMhfdDFj6cU=",
|
||||
"owner": "wasmCloud",
|
||||
"repo": "wasmCloud",
|
||||
"rev": "579455058513b907c7df4a4ec13728f83c6b782b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "wasmCloud",
|
||||
"ref": "wash-cli-v0.37.0",
|
||||
"repo": "wasmCloud",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"wit-deps": {
|
||||
"inputs": {
|
||||
"nix-log": "nix-log_2",
|
||||
"nixify": [
|
||||
"wasmcloud",
|
||||
"nixify"
|
||||
],
|
||||
"nixlib": [
|
||||
"wasmcloud",
|
||||
"nixlib"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1727963723,
|
||||
"narHash": "sha256-urAGMGMH5ousEeVTZ5AaLPfowXaYQoISNXiutV00iQo=",
|
||||
"owner": "bytecodealliance",
|
||||
"repo": "wit-deps",
|
||||
"rev": "eb7c84564acfe13a4197bb15052fd2e2b3d29775",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "bytecodealliance",
|
||||
"ref": "v0.4.0",
|
||||
"repo": "wit-deps",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
|
|
@ -0,0 +1,264 @@
|
|||
{
|
||||
nixConfig.extra-substituters =
|
||||
[ "https://wasmcloud.cachix.org" "https://crane.cachix.org" ];
|
||||
nixConfig.extra-trusted-public-keys = [
|
||||
"wasmcloud.cachix.org-1:9gRBzsKh+x2HbVVspreFg/6iFRiD4aOcUQfXVDl3hiM="
|
||||
"crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk="
|
||||
];
|
||||
|
||||
description = "A flake for building and running wadm";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/release-24.11";
|
||||
|
||||
crane.url = "github:ipetkov/crane";
|
||||
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.rust-analyzer-src.follows = "";
|
||||
};
|
||||
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
advisory-db = {
|
||||
url = "github:rustsec/advisory-db";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
# The wash CLI flag is always after the latest host release tag we want
|
||||
wasmcloud.url = "github:wasmCloud/wasmCloud/wash-cli-v0.37.0";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self, nixpkgs, crane, fenix, flake-utils, advisory-db, wasmcloud, ... }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
inherit (pkgs) lib;
|
||||
|
||||
craneLib = crane.mkLib pkgs;
|
||||
src = craneLib.cleanCargoSource ./.;
|
||||
|
||||
# Common arguments can be set here to avoid repeating them later
|
||||
commonArgs = {
|
||||
inherit src;
|
||||
strictDeps = true;
|
||||
|
||||
buildInputs = [
|
||||
# Add additional build inputs here
|
||||
] ++ lib.optionals pkgs.stdenv.isDarwin [
|
||||
# Additional darwin specific inputs can be set here if needed
|
||||
];
|
||||
|
||||
# Additional environment variables can be set directly here if needed
|
||||
# MY_CUSTOM_VAR = "some value";
|
||||
};
|
||||
|
||||
craneLibLLvmTools = craneLib.overrideToolchain
|
||||
(fenix.packages.${system}.complete.withComponents [
|
||||
"cargo"
|
||||
"llvm-tools"
|
||||
"rustc"
|
||||
]);
|
||||
|
||||
# Get the lock file for filtering
|
||||
rawLockFile = builtins.fromTOML (builtins.readFile ./Cargo.lock);
|
||||
|
||||
# Filter out the workspace members
|
||||
filteredLockFile = rawLockFile // {
|
||||
package = builtins.filter (x: !lib.strings.hasPrefix "wadm" x.name)
|
||||
rawLockFile.package;
|
||||
};
|
||||
|
||||
cargoVendorDir =
|
||||
craneLib.vendorCargoDeps { cargoLockParsed = filteredLockFile; };
|
||||
|
||||
cargoLock = craneLib.writeTOML "Cargo.lock" filteredLockFile;
|
||||
|
||||
# Build *just* the cargo dependencies (of the entire workspace), but we don't want to build
|
||||
# any of the other things in the crate to avoid rebuilding things in the dependencies when
|
||||
# we change workspace crate dependencies
|
||||
cargoArtifacts = let
|
||||
commonArgs' = removeAttrs commonArgs [ "src" ];
|
||||
|
||||
# Get the manifest file for filtering
|
||||
rawManifestFile = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
|
||||
# Filter out the workspace members from manifest
|
||||
filteredManifestFile = with lib;
|
||||
let
|
||||
filterWadmAttrs =
|
||||
filterAttrs (name: _: !strings.hasPrefix "wadm" name);
|
||||
|
||||
workspace = removeAttrs rawManifestFile.workspace [ "members" ];
|
||||
in rawManifestFile // {
|
||||
workspace = workspace // {
|
||||
dependencies = filterWadmAttrs workspace.dependencies;
|
||||
package = workspace.package // {
|
||||
# pin version to avoid rebuilds on bumps
|
||||
version = "0.0.0";
|
||||
};
|
||||
};
|
||||
|
||||
dependencies = filterWadmAttrs rawManifestFile.dependencies;
|
||||
|
||||
dev-dependencies =
|
||||
filterWadmAttrs rawManifestFile.dev-dependencies;
|
||||
|
||||
build-dependencies =
|
||||
filterWadmAttrs rawManifestFile.build-dependencies;
|
||||
};
|
||||
|
||||
cargoToml = craneLib.writeTOML "Cargo.toml" filteredManifestFile;
|
||||
|
||||
dummySrc = craneLib.mkDummySrc {
|
||||
src = pkgs.runCommand "wadm-dummy-src" { } ''
|
||||
mkdir -p $out
|
||||
cp --recursive --no-preserve=mode,ownership ${src}/. -t $out
|
||||
cp ${cargoToml} $out/Cargo.toml
|
||||
'';
|
||||
};
|
||||
|
||||
args = commonArgs' // {
|
||||
inherit cargoLock cargoToml cargoVendorDir dummySrc;
|
||||
|
||||
cargoExtraArgs = ""; # disable `--locked` passed by default by crane
|
||||
};
|
||||
in craneLib.buildDepsOnly args;
|
||||
|
||||
individualCrateArgs = commonArgs // {
|
||||
inherit (craneLib.crateNameFromCargoToml { inherit src; }) version;
|
||||
# TODO(thomastaylor312) We run unit tests here and e2e tests externally. The nextest step
|
||||
# wasn't letting me pass in the fileset
|
||||
doCheck = true;
|
||||
};
|
||||
|
||||
fileSetForCrate = lib.fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = lib.fileset.unions [
|
||||
./Cargo.toml
|
||||
./Cargo.lock
|
||||
./tests
|
||||
./oam
|
||||
(craneLib.fileset.commonCargoSources ./crates/wadm)
|
||||
(craneLib.fileset.commonCargoSources ./crates/wadm-client)
|
||||
(craneLib.fileset.commonCargoSources ./crates/wadm-types)
|
||||
];
|
||||
};
|
||||
|
||||
# Build the top-level crates of the workspace as individual derivations.
|
||||
# This allows consumers to only depend on (and build) only what they need.
|
||||
# Though it is possible to build the entire workspace as a single derivation,
|
||||
# so this is left up to you on how to organize things
|
||||
#
|
||||
# Note that the cargo workspace must define `workspace.members` using wildcards,
|
||||
# otherwise, omitting a crate (like we do below) will result in errors since
|
||||
# cargo won't be able to find the sources for all members.
|
||||
# TODO(thomastaylor312) I tried using `doInstallCargoArtifacts` and passing in things to the
|
||||
# next derivations as the `cargoArtifacts`, but that ended up always building things twice
|
||||
# rather than caching. We should look into it more and see if there's a way to make it work.
|
||||
wadm-lib = craneLib.cargoBuild (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm";
|
||||
cargoExtraArgs = "-p wadm";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
wadm = craneLib.buildPackage (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm-cli";
|
||||
cargoExtraArgs = "--bin wadm";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
wadm-client = craneLib.cargoBuild (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm-client";
|
||||
cargoExtraArgs = "-p wadm-client";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
wadm-types = craneLib.cargoBuild (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm-types";
|
||||
cargoExtraArgs = "-p wadm-types";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
in {
|
||||
checks = {
|
||||
# Build the crates as part of `nix flake check` for convenience
|
||||
inherit wadm wadm-client wadm-types;
|
||||
|
||||
# Run clippy (and deny all warnings) on the workspace source,
|
||||
# again, reusing the dependency artifacts from above.
|
||||
#
|
||||
# Note that this is done as a separate derivation so that
|
||||
# we can block the CI if there are issues here, but not
|
||||
# prevent downstream consumers from building our crate by itself.
|
||||
workspace-clippy = craneLib.cargoClippy (commonArgs // {
|
||||
inherit cargoArtifacts;
|
||||
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
|
||||
});
|
||||
|
||||
workspace-doc =
|
||||
craneLib.cargoDoc (commonArgs // { inherit cargoArtifacts; });
|
||||
|
||||
# Check formatting
|
||||
workspace-fmt = craneLib.cargoFmt { inherit src; };
|
||||
|
||||
# Audit dependencies
|
||||
workspace-audit = craneLib.cargoAudit { inherit src advisory-db; };
|
||||
|
||||
# Audit licenses
|
||||
# my-workspace-deny = craneLib.cargoDeny {
|
||||
# inherit src;
|
||||
# };
|
||||
|
||||
# TODO: the wadm e2e tests use docker compose and things like `wash up` to test things
|
||||
# (which accesses network currently). We would need to fix those tests to do something
|
||||
# else to work properly. The low hanging fruit here would be to use the built artifact
|
||||
# in the e2e tests so we can output those binaries from the nix build and then just
|
||||
# run the tests from a separate repo. We could also do something like outputting the
|
||||
# prebuilt artifacts out into the current directory to save on build time. But that is
|
||||
# for later us to figure out
|
||||
runE2ETests = pkgs.runCommand "e2e-tests" {
|
||||
nativeBuildInputs = with pkgs;
|
||||
[
|
||||
nats-server
|
||||
# wasmcloud.wasmcloud
|
||||
];
|
||||
} ''
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
|
||||
packages = {
|
||||
inherit wadm wadm-client wadm-types wadm-lib;
|
||||
default = wadm;
|
||||
} // lib.optionalAttrs (!pkgs.stdenv.isDarwin) {
|
||||
workspace-llvm-coverage = craneLibLLvmTools.cargoLlvmCov
|
||||
(commonArgs // { inherit cargoArtifacts; });
|
||||
};
|
||||
|
||||
apps = {
|
||||
wadm = flake-utils.lib.mkApp { drv = wadm; };
|
||||
default = flake-utils.lib.mkApp { drv = wadm; };
|
||||
};
|
||||
|
||||
devShells.default = craneLib.devShell {
|
||||
# Inherit inputs from checks.
|
||||
checks = self.checks.${system};
|
||||
|
||||
RUST_SRC_PATH =
|
||||
"${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
|
||||
# Extra inputs can be added here; cargo and rustc are provided by default.
|
||||
packages = [
|
||||
pkgs.nats-server
|
||||
pkgs.natscli
|
||||
pkgs.docker
|
||||
pkgs.git
|
||||
wasmcloud.outputs.packages.${system}.default
|
||||
];
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
@ -0,0 +1,567 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Manifest",
|
||||
"description": "Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"metadata",
|
||||
"spec"
|
||||
],
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "The OAM version of the manifest",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "The kind or type of manifest described by the spec",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Metadata describing the manifest",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/Metadata"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spec": {
|
||||
"description": "The specification for this manifest",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/Specification"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"definitions": {
|
||||
"CapabilityProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"application": {
|
||||
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SharedApplicationComponentProperties"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"description": "Named configuration to pass to the provider. The merged set of configuration will be passed to the provider at runtime using the provider SDK's `init()` function.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"description": "The component ID to use for this provider. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"secrets": {
|
||||
"description": "Named secret references to pass to the t. The provider will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SecretProperty"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"Component": {
|
||||
"description": "A component definition",
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"properties": {
|
||||
"$ref": "#/definitions/ComponentProperties"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"component"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"properties": {
|
||||
"$ref": "#/definitions/CapabilityProperties"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"capability"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of this component",
|
||||
"type": "string"
|
||||
},
|
||||
"traits": {
|
||||
"description": "A list of various traits assigned to this component",
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/Trait"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ComponentProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"application": {
|
||||
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SharedApplicationComponentProperties"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"description": "Named configuration to pass to the component. The component will be able to retrieve these values at runtime using `wasi:runtime/config.`",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"description": "The component ID to use for this component. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"secrets": {
|
||||
"description": "Named secret references to pass to the component. The component will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SecretProperty"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"ConfigDefinition": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"config": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
},
|
||||
"secrets": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SecretProperty"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ConfigProperty": {
|
||||
"description": "Properties for the config list associated with components, providers, and links\n\n## Usage Defining a config block, like so: ```yaml source_config: - name: \"external-secret-kv\" - name: \"default-port\" properties: port: \"8080\" ```\n\nWill result in two config scalers being created, one with the name `basic-kv` and one with the name `default-port`. Wadm will not resolve collisions with configuration names between manifests.",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "Name of the config to ensure exists",
|
||||
"type": "string"
|
||||
},
|
||||
"properties": {
|
||||
"description": "Optional properties to put with the configuration. If the properties are omitted in the manifest, wadm will assume that the configuration is externally managed and will not attempt to create it, only reporting the status as failed if not found.",
|
||||
"type": [
|
||||
"object",
|
||||
"null"
|
||||
],
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"LinkProperty": {
|
||||
"description": "Properties for links",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"interfaces",
|
||||
"namespace",
|
||||
"package",
|
||||
"target"
|
||||
],
|
||||
"properties": {
|
||||
"interfaces": {
|
||||
"description": "WIT interfaces for the link",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this link",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"namespace": {
|
||||
"description": "WIT namespace for the link",
|
||||
"type": "string"
|
||||
},
|
||||
"package": {
|
||||
"description": "WIT package for the link",
|
||||
"type": "string"
|
||||
},
|
||||
"source": {
|
||||
"description": "Configuration to apply to the source of the link",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/ConfigDefinition"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"source_config": {
|
||||
"deprecated": true,
|
||||
"writeOnly": true,
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"description": "Configuration to apply to the target of the link",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/TargetConfig"
|
||||
}
|
||||
]
|
||||
},
|
||||
"target_config": {
|
||||
"deprecated": true,
|
||||
"writeOnly": true,
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"Metadata": {
|
||||
"description": "The metadata describing the manifest",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"annotations",
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"description": "Optional data for annotating this manifest see <https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format>",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"description": "Optional data for labeling this manifest, see <https://github.com/oam-dev/spec/blob/master/metadata.md#label-format>",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the manifest. This must be unique per lattice",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Policy": {
|
||||
"description": "A policy definition",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of this policy",
|
||||
"type": "string"
|
||||
},
|
||||
"properties": {
|
||||
"description": "The properties for this policy",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": "The type of the policy",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"SecretProperty": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"properties"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of the secret. This is used by a reference by the component or capability to get the secret value as a resource.",
|
||||
"type": "string"
|
||||
},
|
||||
"properties": {
|
||||
"description": "The properties of the secret that indicate how to retrieve the secret value from a secrets backend and which backend to actually query.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SecretSourceProperty"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"SecretSourceProperty": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"key",
|
||||
"policy"
|
||||
],
|
||||
"properties": {
|
||||
"field": {
|
||||
"description": "The field to use for retrieving the secret from the backend. This is optional and can be used to retrieve a specific field from a secret.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"key": {
|
||||
"description": "The key to use for retrieving the secret from the backend.",
|
||||
"type": "string"
|
||||
},
|
||||
"policy": {
|
||||
"description": "The policy to use for retrieving the secret.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "The version of the secret to retrieve. If not supplied, the latest version will be used.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"SharedApplicationComponentProperties": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"component",
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"component": {
|
||||
"description": "The name of the component in the shared application",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the shared application",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Specification": {
|
||||
"description": "A representation of an OAM specification",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"components"
|
||||
],
|
||||
"properties": {
|
||||
"components": {
|
||||
"description": "The list of components for describing an application",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/Component"
|
||||
}
|
||||
},
|
||||
"policies": {
|
||||
"description": "The list of policies describing an application. This is for providing application-wide setting such as configuration for a secrets backend, how to render Kubernetes services, etc. It can be omitted if no policies are needed for an application.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/Policy"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Spread": {
|
||||
"description": "Configuration for various spreading requirements",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"requirements"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of this spread requirement",
|
||||
"type": "string"
|
||||
},
|
||||
"requirements": {
|
||||
"description": "An arbitrary map of labels to match on for scaling requirements",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"weight": {
|
||||
"description": "An optional weight for this spread. Higher weights are given more precedence",
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
],
|
||||
"format": "uint",
|
||||
"minimum": 0.0
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"SpreadScalerProperty": {
|
||||
"description": "Properties for spread scalers",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"instances"
|
||||
],
|
||||
"properties": {
|
||||
"instances": {
|
||||
"description": "Number of instances to spread across matching requirements",
|
||||
"type": "integer",
|
||||
"format": "uint",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"spread": {
|
||||
"description": "Requirements for spreading those instances",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/Spread"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"TargetConfig": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"config": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "The target this link applies to. This should be the name of a component in the manifest",
|
||||
"type": "string"
|
||||
},
|
||||
"secrets": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/SecretProperty"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Trait": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"properties": {
|
||||
"description": "The properties of this trait",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/TraitProperty"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"description": "The type of trait specified. This should be a unique string for the type of scaler. As we plan on supporting custom scalers, these traits are not enumerated",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"TraitProperty": {
|
||||
"description": "Properties for defining traits",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/LinkProperty"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/SpreadScalerProperty"
|
||||
},
|
||||
true
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,18 +1,28 @@
|
|||
# wadm Open Application Model
|
||||
|
||||
The wasmCloud Application Deployment Manager uses the [Open Application Model](https://oam.dev) to define application specifications. Because this specification is extensible and _platform agnostic_, it makes for an ideal way to represent applications with metadata specific to wasmCloud.
|
||||
|
||||
## wasmCloud OAM Components
|
||||
|
||||
The following is a list of the `component`s wasmCloud has added to the model.
|
||||
* `actor` - An actor
|
||||
* `provider` - A capability provider
|
||||
|
||||
- `component` - A WebAssembly component
|
||||
- `provider` - A capability provider
|
||||
|
||||
## wasmCloud OAM Traits
|
||||
|
||||
The following is a list of the `traits` wasmCloud has added via customization to its application model.
|
||||
* `spreadscaler` - Defines the spread of instances of a particular entity across multiple hosts with affinity requirements
|
||||
* `linkdef` - A link definition that describes a link between an actor and a capability provider
|
||||
|
||||
- `spreadscaler` - Defines the spread of instances of a particular entity across multiple hosts with affinity requirements
|
||||
- `link` - A link definition that describes a link between a component and a capability provider or a component and another component
|
||||
|
||||
## JSON Schema
|
||||
|
||||
A JSON schema is automatically generated from our Rust structures and is at the root of the repository: [oam.schema.json](../oam.schema.json). You can regenerate the `oam.schema.json` file by running `cargo run --bin wadm-schema`.
|
||||
|
||||
## Example Application YAML
|
||||
The following is an example YAML file describing an ALC application
|
||||
|
||||
The following is an example YAML file describing an application
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
|
|
@ -20,18 +30,17 @@ kind: Application
|
|||
metadata:
|
||||
name: my-example-app
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: "This is my app"
|
||||
description: 'This is my app revision 2'
|
||||
spec:
|
||||
components:
|
||||
- name: userinfo
|
||||
type: actor
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/fake:1
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 4
|
||||
instances: 4
|
||||
spread:
|
||||
- name: eastcoast
|
||||
requirements:
|
||||
|
|
@ -41,32 +50,35 @@ spec:
|
|||
requirements:
|
||||
zone: us-west-1
|
||||
weight: 20
|
||||
- type: linkdef
|
||||
properties:
|
||||
target: webcap
|
||||
values:
|
||||
port: "8080"
|
||||
|
||||
- name: webcap
|
||||
type: capability
|
||||
properties:
|
||||
contract: wasmcloud:httpserver
|
||||
image: wasmcloud.azurecr.io/httpserver:0.13.1
|
||||
link_name: default
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
target:
|
||||
name: userinfo
|
||||
config: []
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces:
|
||||
- incoming-handler
|
||||
source:
|
||||
config: []
|
||||
|
||||
- name: ledblinky
|
||||
type: capability
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/ledblinky:0.0.1
|
||||
contract: wasmcloud:blinkenlights
|
||||
# default link name is "default"
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 1
|
||||
instances: 1
|
||||
spread:
|
||||
- name: haslights
|
||||
requirements:
|
||||
ledenabled: "true"
|
||||
# default weight is 100
|
||||
ledenabled: 'true'
|
||||
# default weight is 100
|
||||
```
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: config-example
|
||||
annotations:
|
||||
description: 'This is my app'
|
||||
spec:
|
||||
components:
|
||||
- name: http
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
# You can pass any config data you'd like sent to your component as a string->string map
|
||||
config:
|
||||
- name: component_config
|
||||
properties:
|
||||
lang: EN-US
|
||||
|
||||
- name: webcap
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
# You can pass any config data you'd like sent to your provider as a string->string map
|
||||
config:
|
||||
- name: provider_config
|
||||
properties:
|
||||
default-port: '8080'
|
||||
cache_file: '/tmp/mycache.json'
|
||||
|
|
@ -3,32 +3,38 @@ kind: Application
|
|||
metadata:
|
||||
name: my-example-app
|
||||
annotations:
|
||||
version: v0.0.2
|
||||
description: "This is my app revision 2"
|
||||
spec:
|
||||
components:
|
||||
- name: userinfo
|
||||
type: actor
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/fake:1
|
||||
traits:
|
||||
# NOTE: This demonstrates what a custom scaler could look like. This functionality does not currently exist
|
||||
- type: customscaler
|
||||
properties:
|
||||
replicas: 4
|
||||
instances: 4
|
||||
clouds:
|
||||
- aws
|
||||
- azure
|
||||
scale_profile: mini
|
||||
- type: linkdef
|
||||
properties:
|
||||
target: webcap
|
||||
values:
|
||||
port: "8080"
|
||||
|
||||
- name: webcap
|
||||
type: capability
|
||||
properties:
|
||||
contract: wasmcloud:httpserver
|
||||
image: wasmcloud.azurecr.io/httpserver:0.13.1
|
||||
link_name: default
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
target:
|
||||
name: userinfo
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces:
|
||||
- incoming-handler
|
||||
source:
|
||||
config:
|
||||
- name: default-port
|
||||
properties:
|
||||
port: "8080"
|
||||
|
|
|
|||
|
|
@ -3,30 +3,36 @@ kind: Application
|
|||
metadata:
|
||||
name: echo
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: "This is my app"
|
||||
description: 'This is my app'
|
||||
spec:
|
||||
components:
|
||||
- name: echo
|
||||
type: actor
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/echo:0.3.7
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 1
|
||||
- type: linkdef
|
||||
properties:
|
||||
target: httpserver
|
||||
values:
|
||||
address: 0.0.0.0:8080
|
||||
instances: 1
|
||||
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
contract: wasmcloud:httpserver
|
||||
image: wasmcloud.azurecr.io/httpserver:0.17.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 1
|
||||
instances: 1
|
||||
- type: link
|
||||
properties:
|
||||
target:
|
||||
name: echo
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces:
|
||||
- incoming-handler
|
||||
source:
|
||||
config:
|
||||
- name: default-port
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue