Compare commits
682 Commits
gcpaudit-0
...
main
Author | SHA1 | Date |
---|---|---|
|
421197c142 | |
|
755ec40b44 | |
|
ffdf1e221c | |
|
13b5a52f14 | |
|
411f0f7387 | |
|
beabb1f661 | |
|
78247e761a | |
|
e31be591a1 | |
|
885c18ef5f | |
|
b029bc3910 | |
|
11c7d16688 | |
|
0275c81b60 | |
|
4bcabb2efa | |
|
dd90663b10 | |
|
4684790780 | |
|
2487f7c71b | |
|
2fc5772dcc | |
|
f9da9fa465 | |
|
a76335d70f | |
|
7bb3847f0a | |
|
59ae99b4a9 | |
|
4a03991a30 | |
|
b58dd18c4a | |
|
d37f218356 | |
|
f4d1772d1f | |
|
33523cb75f | |
|
f1e0615a45 | |
|
474d8c9b25 | |
|
ffc6ea4e12 | |
|
5ca391e79e | |
|
f28adb7d19 | |
|
a97e226962 | |
|
e36b06de3f | |
|
29644ec0c8 | |
|
19896b7931 | |
|
92ec4dcb6e | |
|
6a7598280d | |
|
1f8a375a12 | |
|
2f4b632705 | |
|
42fe4e2f36 | |
|
b71146b6bb | |
|
d086d34cf8 | |
|
1c135e36d3 | |
|
f212d50c02 | |
|
d3305f8555 | |
|
6e02f917aa | |
|
e8745cf12c | |
|
db2b9c9c51 | |
|
283cb87012 | |
|
de2204270f | |
|
a7da58ce0b | |
|
847bcafbba | |
|
e281227c36 | |
|
22aa85a720 | |
|
f39b7194d5 | |
|
c8f34c51c2 | |
|
89bfe52e89 | |
|
398db32970 | |
|
ab266f5072 | |
|
2f09a401de | |
|
8cb2bc8472 | |
|
ecadcf828d | |
|
785bf34808 | |
|
341801e663 | |
|
ba51c07d5f | |
|
ebf9702547 | |
|
5d23e77c8b | |
|
5ff53a686b | |
|
c133d6700a | |
|
09c0f73f11 | |
|
e29787d19e | |
|
65cb0781a4 | |
|
3fcea87969 | |
|
299d65b4fb | |
|
00ea9d3924 | |
|
4749762c8b | |
|
7e2c91b7dd | |
|
51f25cc840 | |
|
2b5f8a8feb | |
|
5cfa3780e6 | |
|
8f7f35638b | |
|
db8f4121a8 | |
|
03b1df33d9 | |
|
59af3f9abf | |
|
f01e70d6a4 | |
|
5fcee14c54 | |
|
2bb872eed8 | |
|
1fe9569c93 | |
|
0e0737ef3b | |
|
a214622241 | |
|
576b1c9ffc | |
|
77e6472f5a | |
|
af68b7e66d | |
|
8d44358d01 | |
|
a48093f0c2 | |
|
e1a0753f92 | |
|
f7dca71dbb | |
|
0cdc099774 | |
|
8c3cbb2a2d | |
|
3c32e67f77 | |
|
898b3cb9d5 | |
|
d4e8eb2ff6 | |
|
9fd1d28b47 | |
|
56ec9a81be | |
|
c358225d6e | |
|
b1a5800b17 | |
|
4792bca6a6 | |
|
b56ce0aabd | |
|
f697f53526 | |
|
afb97f6ff7 | |
|
319bdd4e94 | |
|
576b4394ee | |
|
e4e1edf481 | |
|
6ef6f85c3f | |
|
896d6bd3d0 | |
|
dd775b54bc | |
|
400cdcbbcd | |
|
53ba10dd75 | |
|
c64a5c8427 | |
|
74b643add4 | |
|
bc645a814f | |
|
5dd873d6b1 | |
|
5842a338fb | |
|
fe732fc242 | |
|
8a00cd0c3a | |
|
994245aacf | |
|
bacfc08fa0 | |
|
298b671a66 | |
|
6e458f9fdd | |
|
8f55be8aec | |
|
089e4bb36a | |
|
ef1682ccf8 | |
|
53db6b4a6d | |
|
a116c381f6 | |
|
a2af2f414a | |
|
2a07283f0b | |
|
de3f2e5a51 | |
|
3933adc6d4 | |
|
f195192f04 | |
|
e3e4d0ca95 | |
|
1b1c65ff10 | |
|
3f35135b52 | |
|
379f998ee1 | |
|
9dcac81d76 | |
|
f046209944 | |
|
40ffcae272 | |
|
730e3fcbdb | |
|
c5f52708c7 | |
|
3a4c46adb8 | |
|
54c828718e | |
|
2e7dff5a1d | |
|
3b144477c2 | |
|
4b9a35e6a8 | |
|
729baed0ba | |
|
4b6e9f313f | |
|
be90a9810b | |
|
6d6d44c092 | |
|
95ced3e185 | |
|
f2dcd4d7b5 | |
|
c9cbb98077 | |
|
9c1c488086 | |
|
c1e1c8f4b6 | |
|
b9092985a4 | |
|
a5840d1629 | |
|
6ac5f7589f | |
|
75e6c2d9d0 | |
|
b698fa73a9 | |
|
e53b45e96c | |
|
a8b91211cb | |
|
5f82db4d59 | |
|
f91ed60792 | |
|
3225dab268 | |
|
48d71023bb | |
|
81b7e1df62 | |
|
199b754965 | |
|
f85ec1bbb7 | |
|
fe408536dd | |
|
212bb3bc48 | |
|
7db1914cb2 | |
|
6d9cd7543c | |
|
ae39190fc9 | |
|
d65c37986e | |
|
eb349205da | |
|
abe12da694 | |
|
af547e21ee | |
|
17dfd80d7a | |
|
6cce9247e0 | |
|
a7705eb261 | |
|
9264813c27 | |
|
28618ac28e | |
|
d09985d5d5 | |
|
b9c6b0a47a | |
|
40f892bf3f | |
|
70562d8500 | |
|
6da6129061 | |
|
3c8710effc | |
|
303fd1eb9c | |
|
9b6951aab9 | |
|
52a089c55c | |
|
178a7a3d1d | |
|
ce3c427139 | |
|
6070c05ca0 | |
|
99f0e94b26 | |
|
53542abc38 | |
|
2b5e4c5d54 | |
|
e9a9999d4a | |
|
4022aa5d31 | |
|
93bfe63036 | |
|
e53017b841 | |
|
f5b6730482 | |
|
280f318d09 | |
|
ec628cf542 | |
|
0bf13c4a0a | |
|
5e5486055d | |
|
010a67eca6 | |
|
4e4bb8c798 | |
|
270cae323e | |
|
a6c5e954d3 | |
|
21eebf5ca0 | |
|
1a584742cf | |
|
adafbd2285 | |
|
751ae3bcaa | |
|
1e88c07ca5 | |
|
8a7be7f6f5 | |
|
abca308d48 | |
|
8ac597feff | |
|
036828b964 | |
|
da57e8df35 | |
|
a301d3e70d | |
|
0354b3d568 | |
|
4903b391ee | |
|
b4feb0b199 | |
|
24629d8867 | |
|
b9a0d7f200 | |
|
2dec5194fd | |
|
53dc445907 | |
|
db523d8831 | |
|
0afb3c6bdb | |
|
4bbea658b6 | |
|
b991bac366 | |
|
32629e6780 | |
|
b9b04078a1 | |
|
a8e7d5f309 | |
|
d4985b5277 | |
|
803bca3af3 | |
|
ba8d9aefd8 | |
|
ce568f4135 | |
|
129e5c1c9e | |
|
9b06ae20b7 | |
|
22f21ad72b | |
|
68aa11b9f9 | |
|
96d7746eb6 | |
|
cc549664b7 | |
|
99c7f3ddb8 | |
|
4c444157bf | |
|
0cea86b2ba | |
|
d245d66c25 | |
|
0913bfa768 | |
|
f0a399d789 | |
|
11e69492af | |
|
c8596ce0f8 | |
|
2d9f21ce49 | |
|
75cc932fd8 | |
|
a690118a1f | |
|
9661fe7d94 | |
|
e348237e93 | |
|
8ff297c8c0 | |
|
b084b02f21 | |
|
c70b746ed0 | |
|
91e307c1b4 | |
|
cb6f03c05d | |
|
ddf3f6cfda | |
|
7fef864e20 | |
|
b8140c8aa1 | |
|
c122ed409d | |
|
e25a1f8a19 | |
|
3f714b93b3 | |
|
ea1149164f | |
|
8b4767feec | |
|
0d595a2275 | |
|
ecff28f894 | |
|
89712a554e | |
|
ab18fe1517 | |
|
c3b79f5508 | |
|
033304290e | |
|
d69e7013ed | |
|
97df3bbc42 | |
|
ff332cb3c1 | |
|
3dbc627f4a | |
|
794c76fd17 | |
|
37dc1a6ae9 | |
|
ec5d8553f3 | |
|
5b374f75bf | |
|
ca2c56066f | |
|
37fdf54e61 | |
|
27f663b2ab | |
|
ce4e3fcf04 | |
|
6837ad9cf7 | |
|
a78b09641f | |
|
d6e6c6ee4e | |
|
86e4d26ba2 | |
|
e33a030f10 | |
|
65c9973a06 | |
|
e318e18254 | |
|
09c8a93eea | |
|
d4e288aa55 | |
|
4bcb38b510 | |
|
3d2e23dc98 | |
|
d81c8c504e | |
|
911e33d85a | |
|
adec84aaa1 | |
|
9a50a76571 | |
|
937aba6da5 | |
|
5875b85fd0 | |
|
e80fd67259 | |
|
4c41b3912d | |
|
d8fd49fef7 | |
|
b0fcd6fba1 | |
|
41d2b399d5 | |
|
e10eef70f5 | |
|
67c4870552 | |
|
d3f0850baf | |
|
0b7065dd25 | |
|
ad1c759adb | |
|
7c7cb4bd13 | |
|
9656d5ef7d | |
|
6ad48d7e1c | |
|
00f6c771e0 | |
|
103b5b232c | |
|
80c962f16e | |
|
d8a42ad50c | |
|
a7a7974c6c | |
|
955cc10c60 | |
|
9a6f285ce7 | |
|
dcc78b76e2 | |
|
39666c0566 | |
|
18fed960de | |
|
9bab8034c9 | |
|
adccf6c2f4 | |
|
3bdbfb462d | |
|
14ba7228ae | |
|
9064007daf | |
|
4c88069901 | |
|
5689000c17 | |
|
20c607bfaf | |
|
1e67a44bfb | |
|
a20d2dfdfe | |
|
085e627d2d | |
|
691014574c | |
|
471dd904c2 | |
|
7ba8f84884 | |
|
f00e48efe5 | |
|
8094fa32a8 | |
|
b2135b1cc8 | |
|
563729cf4b | |
|
bb1d3ec149 | |
|
ba252e31d7 | |
|
2986623460 | |
|
dfdc424baf | |
|
a56d432475 | |
|
3fcacd1bc9 | |
|
8558865819 | |
|
a59561cfb6 | |
|
e09e250fe0 | |
|
429430412b | |
|
f2fe57d142 | |
|
391b7cdecd | |
|
d9ccf1b41f | |
|
2b5d40db53 | |
|
b68e48ce8f | |
|
947abb0fab | |
|
7295c7b6f9 | |
|
2ea10833b9 | |
|
7377728ee3 | |
|
a472cb1eff | |
|
d4f5aaf300 | |
|
6aa12f87d7 | |
|
c68b87944a | |
|
55ac30593e | |
|
7feb90ca5b | |
|
c133e5ff42 | |
|
ddfd6d9b98 | |
|
2e669631b1 | |
|
6d5a24e4fb | |
|
eb95ab9c27 | |
|
8fe42f8ecd | |
|
b512a7f243 | |
|
36fa7370b6 | |
|
0baafb0afc | |
|
467f7c74a7 | |
|
1224a37c0f | |
|
eedd891449 | |
|
ade9618de3 | |
|
10390ff969 | |
|
c1d32cd196 | |
|
9201ea7c45 | |
|
b124ff0b47 | |
|
ceba133311 | |
|
63d9dd663f | |
|
2b5e19f5c0 | |
|
cf27666b64 | |
|
7214ed99de | |
|
1da1fc0d10 | |
|
e5a4c209c6 | |
|
f321ad743c | |
|
275672a759 | |
|
87da20ca07 | |
|
dff7758074 | |
|
17b55be12a | |
|
e56b80dbb1 | |
|
d3120a4ce9 | |
|
fdb08c41de | |
|
28582b7bc8 | |
|
1dc00aea3b | |
|
06371c1050 | |
|
7cfd78bb2b | |
|
50aae7ab40 | |
|
69948acd22 | |
|
fde5100c7d | |
|
1171418cf1 | |
|
6c27356af3 | |
|
5b45ddc55a | |
|
935102282f | |
|
9663407521 | |
|
47b7f90951 | |
|
b53587bedf | |
|
2d52d84d75 | |
|
ff835b2b5d | |
|
f6e5098d76 | |
|
fc7d613cd8 | |
|
56c0599ebf | |
|
69956e9338 | |
|
3cb67a923d | |
|
980adfd2ec | |
|
281847c959 | |
|
bdedf0d4c5 | |
|
342a3474a3 | |
|
094873bedc | |
|
fae2b7903e | |
|
5c061397dc | |
|
42e49c7d89 | |
|
7e92fd6e3e | |
|
288bc5c0a2 | |
|
02eb75a3c1 | |
|
515d40a276 | |
|
b59b03a06a | |
|
e1e6d5e2a1 | |
|
12e3afe13f | |
|
1dc8b8272f | |
|
04bda84841 | |
|
e7e5531d16 | |
|
11ab7cb31b | |
|
453dd87b3c | |
|
208b2b4707 | |
|
a383f07d13 | |
|
35ec8874c0 | |
|
34dac6163c | |
|
7da2323aa0 | |
|
fd257f2d50 | |
|
a476fb5a48 | |
|
c53998860c | |
|
78c9bc9c8d | |
|
d28ea19f5e | |
|
fa297815dd | |
|
3a2f821596 | |
|
8b30c001b4 | |
|
b9c5f803b6 | |
|
2fbea5932c | |
|
bb9801e7fa | |
|
59018d7090 | |
|
e61c52a3a9 | |
|
8f06b508dd | |
|
b63c0efc82 | |
|
13e3d11a55 | |
|
fc03215e93 | |
|
26e55fa80f | |
|
0890696b32 | |
|
5ddea47f5b | |
|
45e716e8c4 | |
|
406c51778f | |
|
cb0fdb1493 | |
|
79c085e498 | |
|
489ef6d788 | |
|
d4e72b8710 | |
|
23bf05eebf | |
|
1e8052c840 | |
|
6a448c2e4b | |
|
f720e4ac87 | |
|
14a58d3257 | |
|
27747ebf57 | |
|
fa2c05e80b | |
|
faf66360b7 | |
|
6a834a368b | |
|
73c11bd3af | |
|
f77892214d | |
|
6b89390238 | |
|
aceb2c2f78 | |
|
8df5a6b2c9 | |
|
8aeeaa53c5 | |
|
b5b9a4db2f | |
|
0a8a799e22 | |
|
b095b5d69e | |
|
acb73c757e | |
|
2e570d125c | |
|
183bc0b1d0 | |
|
520d1678b5 | |
|
6f493c3d49 | |
|
e94e21f9b4 | |
|
7403eee735 | |
|
cea76009a0 | |
|
6d1d58031d | |
|
ece830a668 | |
|
b8f9e32ef9 | |
|
2277f83d2f | |
|
8e2cb63141 | |
|
ac3e8a9d58 | |
|
5fbfe1fc4b | |
|
f1a7628df8 | |
|
40bbf69cd0 | |
|
1c569c449d | |
|
4585f3ba6d | |
|
a00cc75bc8 | |
|
c011440fa6 | |
|
84340ee760 | |
|
c9b77f5998 | |
|
b90a4917de | |
|
7310bbe40c | |
|
b69d0d0912 | |
|
fbd9f4833d | |
|
b31ad613da | |
|
0f93bb46ee | |
|
c13c93dfc9 | |
|
9490f6264c | |
|
56e3a81094 | |
|
85f956a9fd | |
|
980fa2e46c | |
|
92ee2d33f5 | |
|
e99e98caa1 | |
|
bc831863a0 | |
|
9c1cab5ac7 | |
|
ea17a79ccc | |
|
ef2a608365 | |
|
512f1ff361 | |
|
f1c64d727a | |
|
ed9790a965 | |
|
c9e92164f9 | |
|
cee5818f42 | |
|
aaee539faf | |
|
5e23552779 | |
|
63b7093365 | |
|
867b844462 | |
|
e66527da1a | |
|
ab63a13b72 | |
|
1a559ef56f | |
|
31c2f535d1 | |
|
a492a54ea1 | |
|
4686bf8320 | |
|
bd22a53b90 | |
|
04f6e543ec | |
|
0e7ef76987 | |
|
563eea578b | |
|
d486cb06fa | |
|
e08db5a112 | |
|
e2d7458b72 | |
|
f56b5f8e41 | |
|
ba30578c64 | |
|
7c1f048c61 | |
|
565d1bb281 | |
|
5cdf91322b | |
|
c4ed2ca417 | |
|
d775f538c8 | |
|
f43ca43326 | |
|
65aed62a38 | |
|
db113db628 | |
|
737f336d49 | |
|
274f99a590 | |
|
06814a7908 | |
|
42fcdae988 | |
|
472fd1fc53 | |
|
d2e1605236 | |
|
23728c61c8 | |
|
6ec224e29a | |
|
15e3a94824 | |
|
5008a791de | |
|
acd2641a02 | |
|
1f2fa778b3 | |
|
e45070ffde | |
|
83d5b928e5 | |
|
9298bcb5f8 | |
|
e2565ed24b | |
|
80bfc71eb0 | |
|
6f7ef799af | |
|
ee22f2e5cc | |
|
abea0e4cb6 | |
|
4a3da4848d | |
|
bf6e917f09 | |
|
fb4eb7a5bd | |
|
7b26d6e3aa | |
|
3e04dc2cda | |
|
225ba6148c | |
|
155732cfcf | |
|
09a006e0d0 | |
|
42e09c0c1b | |
|
8be5e08630 | |
|
90c73b83e6 | |
|
774b6c6ffd | |
|
5ab853d403 | |
|
ec37c953b3 | |
|
b0f3f0bf00 | |
|
ba8a0e8389 | |
|
a394acb695 | |
|
33c740c36c | |
|
9f5c9806fc | |
|
a71cbc2cb5 | |
|
4358dfcde1 | |
|
f37dd7487f | |
|
ee3cd60031 | |
|
456f306b17 | |
|
4fafb03bdd | |
|
efc5971644 | |
|
e9ad6a671e | |
|
6083be8fa2 | |
|
20a0f25b7e | |
|
d957f8958a | |
|
2ec47c414c | |
|
b0a27351dc | |
|
1523297cc7 | |
|
3d7ce9e274 | |
|
5aceeb642e | |
|
b31948c140 | |
|
f6775232d6 | |
|
3924e28c03 | |
|
24e9f229e0 | |
|
0879a81384 | |
|
2f2e62467f | |
|
2a71ea8605 | |
|
8d3dd24c73 | |
|
5e2953f873 | |
|
16da99d297 | |
|
aeaef3a2e7 | |
|
2c4a275766 | |
|
a99691aa7a | |
|
e45db30295 | |
|
dc604e1268 | |
|
451cbb59e6 | |
|
a13d908da4 | |
|
192c79c868 | |
|
7aca8fe0ad | |
|
f42b948865 | |
|
b51d7ae65f | |
|
0f850be287 | |
|
d0312cc491 | |
|
3292c48761 | |
|
af1580eeba | |
|
ef07168841 | |
|
c61b4db83d | |
|
d91805f13f | |
|
110e6651a5 | |
|
9920d3558f | |
|
ff78f42187 | |
|
746ea983c7 | |
|
256f94cb3f | |
|
9a1f86a15f | |
|
0e4a68736c | |
|
4b93e5658f | |
|
29fa24d109 | |
|
6a99500af7 | |
|
0c21c8a589 | |
|
1212700d7b | |
|
195752b7b9 | |
|
c298b2504e | |
|
f57f32e326 | |
|
5968c364f3 | |
|
0c02e9f48b | |
|
93725a3731 | |
|
e83e4d779f | |
|
bf5bded38b | |
|
7a9233d1df | |
|
b31e54a0f0 | |
|
980c55493c | |
|
00cc76ffbd | |
|
bb67db7e26 |
|
@ -1,6 +1,6 @@
|
|||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||
|
||||
1. If this is your first time, please read our contributor guidelines in the [CONTRIBUTING.md](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md) file and learn how to compile Falco from source [here](https://falco.org/docs/source).
|
||||
1. If this is your first time, please read our contributor guidelines in the [CONTRIBUTING.md](https://github.com/falcosecurity/.github/blob/main/CONTRIBUTING.md) file and learn how to compile Falco from source [here](https://falco.org/docs/source).
|
||||
2. Please label this pull request according to what type of issue you are addressing.
|
||||
3. Please add a release note!
|
||||
4. If the PR is unfinished while opening it specify a wip in the title before the actual title, for example, "wip: my awesome feature"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
RULES_FILE=$1
|
||||
RULES_DIR=$1
|
||||
CONFIG_FILE=$2
|
||||
PLUGIN_NAME=$3
|
||||
RESULT_FILE=$4
|
||||
|
@ -21,8 +21,24 @@ done
|
|||
|
||||
cur_branch=`git rev-parse HEAD`
|
||||
echo Current branch is \"$cur_branch\"
|
||||
echo Checking version for rules file \"$RULES_FILE\"...
|
||||
cp $RULES_FILE tmp_rule_file.yaml
|
||||
echo Checking version for rules file in dir \"$RULES_DIR\"...
|
||||
# Get the rules files and save them.
|
||||
# We sort the rules files but first we remove the file extension.
|
||||
rules_files=$(ls ${RULES_DIR}/* | while read -r line; do echo "${line%.yaml}"; done | sort)
|
||||
# Add the extension to the files.
|
||||
# Append the .yaml extension back to the sorted strings
|
||||
rules_files=$(echo "${rules_files}" | sed 's/$/.yaml/')
|
||||
echo Rule files found: ${rules_files}
|
||||
|
||||
# We save the current rules files before going back to the previous
|
||||
# version.
|
||||
prefix="tmp-"
|
||||
for rules_file in ${rules_files}; do
|
||||
new_file="${prefix}$(basename "$rules_file")"
|
||||
echo "Copying rules file ${rules_file} to temporary file ${new_file}"
|
||||
cp "$rules_file" "$new_file"
|
||||
tmp_rules+=" $new_file"
|
||||
done
|
||||
|
||||
git checkout tags/$LATEST_TAG
|
||||
chmod +x $CHECKER_TOOL
|
||||
|
@ -30,13 +46,13 @@ $CHECKER_TOOL \
|
|||
compare \
|
||||
--falco-image=$FALCO_DOCKER_IMAGE \
|
||||
-c $CONFIG_FILE \
|
||||
-l $RULES_FILE \
|
||||
-r tmp_rule_file.yaml \
|
||||
-l ${rules_files} \
|
||||
-r ${tmp_rules} \
|
||||
${extra_flags} \
|
||||
1>tmp_res.txt
|
||||
git switch --detach $cur_branch
|
||||
|
||||
echo '##' $(basename $RULES_FILE) >> $RESULT_FILE
|
||||
echo '##' $(basename $RULES_DIR) >> $RESULT_FILE
|
||||
echo Comparing \`$cur_branch\` with latest tag \`$LATEST_TAG\` >> $RESULT_FILE
|
||||
echo "" >> $RESULT_FILE
|
||||
if [ -s tmp_res.txt ]
|
||||
|
@ -47,5 +63,5 @@ else
|
|||
fi
|
||||
echo "" >> $RESULT_FILE
|
||||
|
||||
rm -f tmp_rule_file.yaml
|
||||
rm -f ${tmp_rules}
|
||||
rm -f tmp_res.txt
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directories:
|
||||
- ./build/*
|
||||
- ./plugins/*
|
||||
- ./shared/go/*/*
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
groups:
|
||||
gomod:
|
||||
update-types:
|
||||
- "patch"
|
||||
- package-ecosystem: cargo
|
||||
directories:
|
||||
- /plugins/*
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
groups:
|
||||
cargo:
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
groups:
|
||||
actions:
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
|
@ -0,0 +1,77 @@
|
|||
#!/bin/bash
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
# Plugins for which we need to check if there exist as alternative plugin.
|
||||
# If so, then we set them as a dependency. This is a must for rulesfiles
|
||||
# that have multiple plugins that satisfy their requirements and the plugin we are
|
||||
# checking is an alternative.
|
||||
# It accepts a single value or coma separated values.
|
||||
PLUGINS=$1
|
||||
|
||||
filtered_entries=()
|
||||
|
||||
# Extract plugins requirement from all files and save in a local file.
|
||||
# Combine the sections from multiple files and save the output to file.
|
||||
yq eval-all --no-doc '.[].required_plugin_versions | select(. != null and . != "")' ${@:2} > combined_requirements.yaml
|
||||
# Remove duplicates from the top level.
|
||||
yq eval-all --inplace 'unique_by(.name)' combined_requirements.yaml
|
||||
|
||||
#echo $(cat combined_requirements.yaml)
|
||||
|
||||
for YAML_FILE in "combined_requirements.yaml"; do
|
||||
#echo "Processing file $YAML_FILE"
|
||||
# Get the length of the entries list
|
||||
length=$(yq eval '. | length' "$YAML_FILE")
|
||||
# Iterate over each index in the entries list
|
||||
for ((i = 0; i < length; i++)); do
|
||||
# Access the entry by index using yq
|
||||
entry=$(yq eval '.['"$i"']' "$YAML_FILE")
|
||||
|
||||
# Extract name and version from the entry
|
||||
name=$(echo "$entry" | yq eval '.name' -)
|
||||
version=$(echo "$entry" | yq eval '.version' -)
|
||||
# If a plugin we are considering exists as an alternative of another one, then we just skip.
|
||||
# This case could happen when we are processing multiple files and one of them overrides the
|
||||
# plugin since it has some specific rules for that plugin.
|
||||
to_be_skipped=false
|
||||
for alternative in $(yq eval '.[].alternatives[].name' combined_requirements.yaml);do
|
||||
if [[ "$alternative" == "$name" ]]; then
|
||||
to_be_skipped=true
|
||||
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$to_be_skipped" = true ];then
|
||||
#echo "skipping plugin ${name} because already an alternative"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if alternatives exist
|
||||
alternatives=$(echo "$entry" | yq eval '.alternatives[]?')
|
||||
if [ -n "$alternatives" ]; then
|
||||
is_alternative=false
|
||||
# Get the length of the alternatives list
|
||||
alt_length=$(echo "$entry" | yq eval '.alternatives | length' -)
|
||||
# Iterate over each alternative
|
||||
for ((j = 0; j < alt_length; j++)); do
|
||||
alt_entry=$(echo "$entry" | yq eval '.alternatives['"$j"']?' -)
|
||||
alt_name=$(echo "$alt_entry" | yq eval '.name' -)
|
||||
alt_version=$(echo "$alt_entry" | yq eval '.version' -)
|
||||
# If our plugin is set as an alternative then we use it as a dependency.
|
||||
if [[ " ${PLUGINS//,/ } " =~ " $alt_name " ]]; then
|
||||
#echo "Preferring alternative plugin ${alt_name} over ${name}"
|
||||
is_alternative=true
|
||||
name=$alt_name
|
||||
version=$alt_version
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
filtered_entries+=("$name:$version")
|
||||
done
|
||||
done
|
||||
|
||||
# Output the filtered entries
|
||||
printf "%s\n" "${filtered_entries[@]}"
|
|
@ -5,7 +5,7 @@ PLUGIN=$1
|
|||
set +e pipefail
|
||||
echo Searching tag with prefix prefix \"${PLUGIN}-\"...
|
||||
git fetch --tags origin
|
||||
latest_tag=`git describe --match="${PLUGIN}-*.*.*" --exclude="${PLUGIN}-*.*.*-*" --abbrev=0 --tags $(git rev-list --tags="${PLUGIN}-*.*.*" --max-count=1)`
|
||||
latest_tag=`git describe --match="${PLUGIN}-[0-9]*" --match="plugins/${PLUGIN}/v*" --abbrev=0 --tags`
|
||||
set -e pipefail
|
||||
|
||||
latest_ver="0.0.0"
|
||||
|
@ -14,9 +14,13 @@ then
|
|||
echo Not previous tag has been found
|
||||
else
|
||||
echo Most recent tag found is \"$latest_tag\"
|
||||
latest_ver=$(echo $latest_tag | cut -d '-' -f 2-)
|
||||
if [[ "${latest_tag}" == "plugins/${PLUGIN}"* ]]; then
|
||||
latest_ver="${latest_tag##*/v}"
|
||||
else
|
||||
latest_ver="${latest_tag##*-}"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo Setting plugin version for "${PLUGIN}" to $latest_ver
|
||||
echo "version=$latest_ver" >> $GITHUB_OUTPUT
|
||||
echo "ref=${PLUGIN}-$latest_ver" >> $GITHUB_OUTPUT
|
||||
echo "ref=${latest_tag}" >> $GITHUB_OUTPUT
|
||||
|
|
|
@ -18,24 +18,17 @@ if [ ! -f "$config_file" ]; then
|
|||
# we collect all plugin dependencies across all plugin rulesets
|
||||
# todo(jasondellaluce): find a way to avoid ignoring alternatives
|
||||
if [ -d "$rules_dir" ]; then
|
||||
echo Extracting plugin dependencies from rules files...
|
||||
rules_files=$(ls $rules_dir/*)
|
||||
for rules_file in "$rules_files"; do
|
||||
echo Extracting plugin dependencies from rules file "${rules_file}"...
|
||||
rules_deps=$(cat $rules_file | yq -r '.[].required_plugin_versions | select(. != null and . != "")[] | [.name + ":" + .version] | @csv')
|
||||
for dep in $rules_deps; do
|
||||
plugin_name=$(echo $dep | tr -d '"' | cut -d ':' -f 1)
|
||||
if [[ ${deps} != *"$plugin_name"* ]]; then
|
||||
deps="${deps} "${plugin_name}
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo Extracting plugin dependencies from rules file "${rules_files}"...
|
||||
rules_deps=$($GITHUB_WORKSPACE/.github/extract-plugins-deps-from-rulesfile.sh $PLUGIN $rules_files)
|
||||
echo "${rules_deps}"
|
||||
fi
|
||||
|
||||
mkdir -p $(echo $config_file | sed 's:[^/]*$::')
|
||||
touch $config_file
|
||||
echo "plugins:" >> $config_file
|
||||
for dep in $deps; do
|
||||
for dep in $rules_deps; do
|
||||
dep=$(echo $dep | tr -d '"' | cut -d ':' -f 1)
|
||||
echo " - name: ${dep}" >> $config_file
|
||||
echo " library_path: lib${dep}.so" >> $config_file
|
||||
done
|
||||
|
|
|
@ -2,7 +2,7 @@ name: Build Plugins PR
|
|||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
branches: [main]
|
||||
|
||||
# Checks if any concurrent jobs under the same pull request or branch are being executed
|
||||
# NOTE: this will cancel every workflow that is being ran against a PR as group is just the github ref (without the workflow name)
|
||||
|
@ -10,7 +10,7 @@ concurrency:
|
|||
group: ci-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
jobs:
|
||||
build-plugins:
|
||||
uses: ./.github/workflows/reusable_build_packages.yaml
|
||||
with:
|
||||
|
@ -20,7 +20,7 @@ jobs:
|
|||
|
||||
get-changed-plugins:
|
||||
uses: ./.github/workflows/reusable_get_changed_plugins.yaml
|
||||
|
||||
|
||||
build-rules-tool:
|
||||
needs: [get-changed-plugins]
|
||||
if: needs.get-changed-plugins.outputs.changed-plugins != '[]' && needs.get-changed-plugins.outputs.changed-plugins != ''
|
||||
|
@ -28,7 +28,7 @@ jobs:
|
|||
with:
|
||||
output: rules-checker
|
||||
repository: falcosecurity/rules
|
||||
|
||||
|
||||
validate-plugins:
|
||||
needs: [build-plugins, get-changed-plugins, build-rules-tool]
|
||||
if: needs.get-changed-plugins.outputs.changed-plugins != '[]' && needs.get-changed-plugins.outputs.changed-plugins != ''
|
||||
|
@ -39,12 +39,12 @@ jobs:
|
|||
uses: ./.github/workflows/reusable_validate_plugins.yaml
|
||||
with:
|
||||
plugin: ${{ matrix.plugin }}
|
||||
falcoctl-version: 0.6.2
|
||||
falco-image: falcosecurity/falco-no-driver:0.36.2
|
||||
falcoctl-version: 0.11.0
|
||||
falco-image: falcosecurity/falco:0.40.0
|
||||
plugins-artifact: plugins-x86_64-${{ github.event.number }}.tar.gz
|
||||
rules-checker: ./rules-checker
|
||||
arch: x86_64
|
||||
|
||||
|
||||
suggest-rules-version:
|
||||
needs: [build-plugins, get-changed-plugins, build-rules-tool]
|
||||
if: needs.get-changed-plugins.outputs.changed-plugins != '[]' && needs.get-changed-plugins.outputs.changed-plugins != ''
|
||||
|
@ -55,8 +55,13 @@ jobs:
|
|||
uses: ./.github/workflows/reusable_suggest_rules_version.yaml
|
||||
with:
|
||||
plugin: ${{ matrix.plugin }}
|
||||
falco-image: falcosecurity/falco-no-driver:0.36.2
|
||||
falco-image: falcosecurity/falco:0.40.0
|
||||
plugins-artifact: plugins-x86_64-${{ github.event.number }}.tar.gz
|
||||
rules-checker: ./rules-checker
|
||||
arch: x86_64
|
||||
job-index: ${{ strategy.job-index }}
|
||||
|
||||
upload-pr-info:
|
||||
needs: [suggest-rules-version]
|
||||
if: needs.get-changed-plugins.outputs.changed-plugins != '[]' && needs.get-changed-plugins.outputs.changed-plugins != ''
|
||||
uses: ./.github/workflows/reusable_upload_pr_info.yaml
|
||||
|
|
|
@ -13,10 +13,10 @@ name: "Golang CodeQL"
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '28 11 * * 2'
|
||||
|
||||
|
@ -43,15 +43,15 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: 1.21
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
with:
|
||||
languages: go
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
|
@ -62,7 +62,7 @@ jobs:
|
|||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
@ -76,4 +76,4 @@ jobs:
|
|||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
name: Build and test container plugin
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'plugins/container/**'
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'plugins/container/**'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-others:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ windows-latest, macos-latest ]
|
||||
include:
|
||||
- os: windows-latest
|
||||
artifact-name: 'libcontainer-win'
|
||||
artifact-path: 'plugins/container/container.dll'
|
||||
- os: macos-latest
|
||||
artifact-name: 'libcontainer-osx'
|
||||
artifact-path: 'plugins/container/libcontainer.dylib'
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version-file: plugins/container/go-worker/go.mod
|
||||
cache-dependency-path: plugins/container/go-worker/go.sum
|
||||
|
||||
- name: Build plugin library
|
||||
working-directory: plugins/container
|
||||
run: make
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: ${{ matrix.artifact-name }}
|
||||
path: ${{ matrix.artifact-path }}
|
||||
|
||||
build-linux:
|
||||
name: build-linux-${{ matrix.arch }}
|
||||
runs-on: ${{ (matrix.arch == 'arm64' && 'ubuntu-22.04-arm') || 'ubuntu-22.04' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ amd64, arm64 ]
|
||||
container: golang:1.23-bullseye
|
||||
steps:
|
||||
- name: Install plugin deps
|
||||
run: apt-get update && apt-get install -y --no-install-recommends zip unzip ninja-build
|
||||
|
||||
- name: Install updated cmake version ⛓️
|
||||
run: |
|
||||
curl -L -o /tmp/cmake.tar.gz https://github.com/Kitware/CMake/releases/download/v3.31.4/cmake-3.31.4-linux-$(uname -m).tar.gz
|
||||
gzip -d /tmp/cmake.tar.gz
|
||||
tar -xpf /tmp/cmake.tar --directory=/tmp
|
||||
cp -R /tmp/cmake-3.31.4-linux-$(uname -m)/* /usr
|
||||
rm -rf /tmp/cmake-3.31.4-linux-$(uname -m)
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Safe directory
|
||||
run: git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
|
||||
- name: Build plugin library
|
||||
working-directory: plugins/container
|
||||
run: make
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: libcontainer-${{ matrix.arch }}
|
||||
path: 'plugins/container/libcontainer.so'
|
||||
|
||||
test:
|
||||
name: test-${{ matrix.arch }}
|
||||
needs: [build-linux]
|
||||
runs-on: ${{ (matrix.arch == 'arm64' && 'ubuntu-24.04-arm') || 'ubuntu-24.04' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ amd64, arm64 ]
|
||||
steps:
|
||||
# libbtrfs: needed by podman package - build dep.
|
||||
- name: Install go test deps
|
||||
run: sudo apt-get install -y --no-install-recommends libbtrfs-dev
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version-file: plugins/container/go-worker/go.mod
|
||||
cache-dependency-path: plugins/container/go-worker/go.sum
|
||||
|
||||
- name: Build go-worker executable
|
||||
working-directory: plugins/container
|
||||
run: make -C go-worker exe
|
||||
|
||||
- name: Run tests
|
||||
working-directory: plugins/container
|
||||
run: |
|
||||
systemctl --user start podman
|
||||
make test
|
||||
|
||||
falco-tests:
|
||||
needs: [build-linux]
|
||||
name: falco-tests-${{ matrix.arch }}
|
||||
runs-on: ${{ (matrix.arch == 'arm64' && 'ubuntu-22.04-arm') || 'ubuntu-22.04' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ amd64, arm64 ]
|
||||
container:
|
||||
image: falcosecurity/falco:master-debian
|
||||
steps:
|
||||
- name: Download library
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: libcontainer-${{ matrix.arch }}
|
||||
path: /usr/share/falco/plugins/
|
||||
|
||||
- name: Run falcosecurity/testing Falco tests
|
||||
uses: falcosecurity/testing@main
|
||||
with:
|
||||
test-falco: 'true'
|
||||
test-falcoctl: 'false'
|
||||
test-k8saudit: 'false'
|
||||
test-dummy: 'false'
|
||||
static: 'false'
|
||||
test-drivers: 'false'
|
||||
show-all: 'true'
|
||||
sudo: ''
|
||||
|
||||
libs-tests:
|
||||
needs: [build-linux]
|
||||
uses: falcosecurity/libs/.github/workflows/reusable_e2e_tests.yaml@master
|
||||
with:
|
||||
container_plugin_artifact_name: 'libcontainer-amd64'
|
||||
secrets: inherit
|
||||
|
||||
formatting-check:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run clang-format style check
|
||||
uses: jidicula/clang-format-action@4726374d1aa3c6aecf132e5197e498979588ebc8 #v4.15.0
|
||||
with:
|
||||
clang-format-version: '18'
|
||||
check-path: plugins/container
|
||||
exclude-regex: 'plugin_config_schema.h'
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
if: github.event.workflow_run.event == 'pull_request'
|
||||
steps:
|
||||
- name: 'Download artifact'
|
||||
uses: actions/github-script@v6.4.1
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
with:
|
||||
script: |
|
||||
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
|
||||
|
@ -39,7 +39,7 @@ jobs:
|
|||
run: unzip pr.zip
|
||||
|
||||
- name: 'Comment on PR'
|
||||
uses: actions/github-script@v6.4.1
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
name: Build dummy_c plugin
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'plugins/dummy_c/**'
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'plugins/dummy_c/**'
|
||||
workflow_dispatch:
|
||||
|
@ -21,7 +21,7 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout ⤵️
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -31,7 +31,7 @@ jobs:
|
|||
sudo apt install -y --no-install-recommends build-essential
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
with:
|
||||
languages: cpp
|
||||
|
||||
|
@ -41,16 +41,16 @@ jobs:
|
|||
make libdummy_c.so
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
|
||||
formatting-check:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run clang-format style check
|
||||
uses: jidicula/clang-format-action@f62da5e3d3a2d88ff364771d9d938773a618ab5e #v4.11.0
|
||||
uses: jidicula/clang-format-action@4726374d1aa3c6aecf132e5197e498979588ebc8 #v4.15.0
|
||||
with:
|
||||
clang-format-version: '14'
|
||||
check-path: plugins/dummy_c
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
name: Build K8smeta plugin
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'plugins/k8smeta/**'
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'plugins/k8smeta/**'
|
||||
workflow_dispatch:
|
||||
|
@ -21,12 +21,12 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout ⤵️
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
|
@ -37,7 +37,7 @@ jobs:
|
|||
sudo apt install -y --no-install-recommends cmake build-essential autoconf libtool pkg-config
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
with:
|
||||
languages: cpp
|
||||
|
||||
|
@ -49,7 +49,7 @@ jobs:
|
|||
make k8smeta -j6
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4
|
||||
|
||||
- name: Build and run tests 🏎️
|
||||
run: |
|
||||
|
@ -63,10 +63,10 @@ jobs:
|
|||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run clang-format style check
|
||||
uses: jidicula/clang-format-action@f62da5e3d3a2d88ff364771d9d938773a618ab5e #v4.11.0
|
||||
uses: jidicula/clang-format-action@4726374d1aa3c6aecf132e5197e498979588ebc8 #v4.15.0
|
||||
with:
|
||||
clang-format-version: '14'
|
||||
check-path: plugins/k8smeta
|
||||
|
|
|
@ -2,17 +2,17 @@ name: Update Plugins-dev
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
workflow_dispatch:
|
||||
|
||||
# Checks if any concurrent jobs is already being executed for master and cancel it.
|
||||
# Checks if any concurrent jobs is already being executed for main and cancel it.
|
||||
concurrency:
|
||||
group: ci-master
|
||||
group: ci-main
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-plugins-dev:
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_build_packages.yaml@master
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_build_packages.yaml@main
|
||||
with:
|
||||
makecommand: make packages -j4
|
||||
suffix: dev
|
||||
|
@ -20,7 +20,7 @@ jobs:
|
|||
|
||||
get-changed-plugins:
|
||||
uses: ./.github/workflows/reusable_get_changed_plugins.yaml
|
||||
|
||||
|
||||
build-rules-tool:
|
||||
needs: [get-changed-plugins]
|
||||
if: needs.get-changed-plugins.outputs.changed-plugins != '[]' && needs.get-changed-plugins.outputs.changed-plugins != ''
|
||||
|
@ -28,7 +28,7 @@ jobs:
|
|||
with:
|
||||
output: rules-checker
|
||||
repository: falcosecurity/rules
|
||||
|
||||
|
||||
validate-plugins:
|
||||
needs: [build-plugins-dev, get-changed-plugins, build-rules-tool]
|
||||
if: needs.get-changed-plugins.outputs.changed-plugins != '[]' && needs.get-changed-plugins.outputs.changed-plugins != ''
|
||||
|
@ -39,15 +39,23 @@ jobs:
|
|||
uses: ./.github/workflows/reusable_validate_plugins.yaml
|
||||
with:
|
||||
plugin: ${{ matrix.plugin }}
|
||||
falcoctl-version: 0.6.2
|
||||
falco-image: falcosecurity/falco-no-driver:0.36.2
|
||||
falcoctl-version: 0.11.0
|
||||
falco-image: falcosecurity/falco:0.40.0
|
||||
plugins-artifact: plugins-x86_64-dev.tar.gz
|
||||
rules-checker: ./rules-checker
|
||||
arch: x86_64
|
||||
|
||||
|
||||
publish-plugins-dev:
|
||||
needs: [build-plugins-dev, validate-plugins]
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_publish_packages.yaml@master
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_publish_packages.yaml@main
|
||||
with:
|
||||
suffix: dev
|
||||
secrets: inherit
|
||||
secrets: inherit
|
||||
|
||||
publish-oci-artifacts-dev:
|
||||
needs: [ build-plugins-dev, validate-plugins ]
|
||||
uses: ./.github/workflows/reusable-publish-oci-artifacts.yaml
|
||||
with:
|
||||
dev-tag: main
|
||||
suffix: dev
|
||||
secrets: inherit
|
|
@ -0,0 +1,43 @@
|
|||
name: Check registry.yaml
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "registry.yaml"
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "registry.yaml"
|
||||
workflow_dispatch:
|
||||
|
||||
# Checks if any concurrent jobs under the same pull request or branch are being executed
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-registry:
|
||||
name: check-registry
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout ⤵️
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: "1.21"
|
||||
check-latest: true
|
||||
|
||||
- name: Build registry artifact tool
|
||||
working-directory: build/registry
|
||||
run: make
|
||||
|
||||
# Formal validation only.
|
||||
# When a new plugin is added and not yet released,
|
||||
# its OCI repo has not been created yet, so we can't validate it.
|
||||
- name: Verify the correctness of registry.yaml
|
||||
working-directory: build/registry
|
||||
run: ./bin/registry check ../../registry.yaml
|
|
@ -16,22 +16,22 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
package: ${{ steps.regex-match.outputs.group1 }}
|
||||
|
||||
|
||||
steps:
|
||||
- name: Extract semver ℹ️
|
||||
uses: actions-ecosystem/action-regex-match@v2
|
||||
- name: Validate tag ℹ️
|
||||
uses: actions-ecosystem/action-regex-match@9e6c4fb3d5e898f505be7a1fb6e7b0a278f6665b # v2.0.2
|
||||
id: regex-match
|
||||
with:
|
||||
text: ${{ github.ref_name }}
|
||||
regex: '^([a-z]+[a-z0-9_\-]*)-(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-((0|[1-9][0-9]*|[0-9]*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9][0-9]*|[0-9]*[a-zA-Z-][0-9a-zA-Z-]*))*))?$'
|
||||
|
||||
- name: Fail on non semver tag
|
||||
regex: '^plugins\/([a-z0-9_-]+)\/v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
|
||||
|
||||
- name: Fail on invalid tag
|
||||
if: steps.regex-match.outputs.match == ''
|
||||
run: exit 1
|
||||
|
||||
build-plugins:
|
||||
needs: extract-info
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_build_packages.yaml@master
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_build_packages.yaml@main
|
||||
with:
|
||||
makecommand: make release/${{ needs.extract-info.outputs.package }} -j4
|
||||
suffix: stable
|
||||
|
@ -49,15 +49,22 @@ jobs:
|
|||
uses: ./.github/workflows/reusable_validate_plugins.yaml
|
||||
with:
|
||||
plugin: ${{ needs.extract-info.outputs.package }}
|
||||
falcoctl-version: 0.6.2
|
||||
falco-image: falcosecurity/falco-no-driver:0.36.2
|
||||
falcoctl-version: 0.11.0
|
||||
falco-image: falcosecurity/falco:0.40.0
|
||||
plugins-artifact: plugins-x86_64-stable.tar.gz
|
||||
rules-checker: ./rules-checker
|
||||
arch: x86_64
|
||||
|
||||
publish-plugins:
|
||||
needs: [build-plugins, validate-plugins]
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_publish_packages.yaml@master
|
||||
uses: falcosecurity/plugins/.github/workflows/reusable_publish_packages.yaml@main
|
||||
with:
|
||||
suffix: stable
|
||||
secrets: inherit
|
||||
secrets: inherit
|
||||
|
||||
publish-oci-artifacts:
|
||||
needs: [ build-plugins, validate-plugins ]
|
||||
uses: ./.github/workflows/reusable-publish-oci-artifacts.yaml
|
||||
with:
|
||||
suffix: stable
|
||||
secrets: inherit
|
||||
|
|
|
@ -1,6 +1,14 @@
|
|||
name: Update OCI Artifacts
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
inputs:
|
||||
dev-tag:
|
||||
description: The tag used for oci artifacts published from the main branch.
|
||||
required: false
|
||||
type: string
|
||||
suffix:
|
||||
description: Suffix for uploading packages (dev or stable)
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
publish-oci-artifacts:
|
||||
|
@ -14,15 +22,27 @@ jobs:
|
|||
matrix: ${{ steps.oci_build.outputs.REGISTRY_UPDATE_STATUS }}
|
||||
|
||||
steps:
|
||||
- name: Download x86_64 plugins
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: plugins-x86_64-${{ inputs.suffix }}.tar.gz
|
||||
path: /tmp/plugins-x86_64
|
||||
|
||||
- name: Download aarch64 plugins
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: plugins-aarch64-${{ inputs.suffix }}.tar.gz
|
||||
path: /tmp/plugins-aarch64
|
||||
|
||||
- name: Checkout Plugins
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '^1.21'
|
||||
go-version: "^1.21"
|
||||
|
||||
- name: Build registry artifact tool
|
||||
working-directory: build/registry
|
||||
|
@ -37,12 +57,18 @@ jobs:
|
|||
REPO_GITHUB: https://github.com/${{ github.repository_owner }}/plugins.git
|
||||
working-directory: build/registry
|
||||
run: |
|
||||
REGISTRY_UPDATE_STATUS=$(./bin/registry update-oci-registry ../../registry.yaml)
|
||||
REGISTRY_UPDATE_STATUS=$(./bin/registry update-oci-registry \
|
||||
../../registry.yaml \
|
||||
--plugins-amd64-path /tmp/plugins-x86_64 \
|
||||
--plugins-arm64-path /tmp/plugins-aarch64 \
|
||||
--rulesfiles-path /tmp/plugins-x86_64 \
|
||||
--dev-tag "${{ inputs.dev-tag }}"
|
||||
)
|
||||
echo "REGISTRY_UPDATE_STATUS=${REGISTRY_UPDATE_STATUS}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Create signatures of the plugin artifacts as OCI artifacts
|
||||
sign-oci-artifacts:
|
||||
needs: [ publish-oci-artifacts ]
|
||||
needs: [publish-oci-artifacts]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: ${{ needs.publish-oci-artifacts.outputs.matrix != '[]' }}
|
||||
|
@ -57,13 +83,12 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.1.0
|
||||
with:
|
||||
cosign-release: 'v2.1.0'
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
|
||||
- run: cosign version
|
||||
|
||||
- name: Log into ghcr.io
|
||||
uses: docker/login-action@master
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -71,4 +96,3 @@ jobs:
|
|||
|
||||
- name: Sign the artifacts with GitHub OIDC Token
|
||||
run: cosign sign --yes ${{ matrix.value.repository.ref }}@${{ matrix.value.artifact.digest }}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
# This is a reusable workflow used by master and release CI
|
||||
# This is a reusable workflow used by main and release CI
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
|
@ -10,43 +10,65 @@ on:
|
|||
description: Suffix for uploading packages (dev or stable)
|
||||
required: true
|
||||
type: string
|
||||
|
||||
|
||||
jobs:
|
||||
build-packages:
|
||||
name: build-packages-${{ matrix.arch }}
|
||||
runs-on: ${{ (matrix.arch == 'aarch64' && 'actuated-arm64-8cpu-16gb') || 'ubuntu-22.04' }}
|
||||
runs-on: ${{ (matrix.arch == 'aarch64' && 'ubuntu-22.04-arm') || 'ubuntu-22.04' }}
|
||||
strategy:
|
||||
matrix:
|
||||
arch: [x86_64, aarch64]
|
||||
container: golang:1.18
|
||||
# Upgrading to a newer debian version would make the build process generate
|
||||
# binaries that require newer GLIBC version so we need to be based on bullseye for now
|
||||
container: golang:1.23-bullseye
|
||||
steps:
|
||||
- name: Install deps
|
||||
run: |
|
||||
apt update
|
||||
apt install -y --no-install-recommends git awscli make build-essential autoconf libtool pkg-config
|
||||
apt install -y --no-install-recommends awscli build-essential autoconf libelf-dev libtool autotools-dev \
|
||||
automake zip unzip ninja-build wget lsb-release software-properties-common gnupg
|
||||
|
||||
- name: Install updated clang version ⛓️
|
||||
run: |
|
||||
wget https://apt.llvm.org/llvm.sh
|
||||
chmod u+x llvm.sh
|
||||
./llvm.sh 19
|
||||
ln -s /usr/bin/clang-19 /usr/bin/clang
|
||||
|
||||
- name: Install updated cmake version ⛓️
|
||||
run: |
|
||||
curl -L -o /tmp/cmake.tar.gz https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-$(uname -m).tar.gz
|
||||
curl -L -o /tmp/cmake.tar.gz https://github.com/Kitware/CMake/releases/download/v3.31.4/cmake-3.31.4-linux-$(uname -m).tar.gz
|
||||
gzip -d /tmp/cmake.tar.gz
|
||||
tar -xpf /tmp/cmake.tar --directory=/tmp
|
||||
cp -R /tmp/cmake-3.22.5-linux-$(uname -m)/* /usr
|
||||
rm -rf /tmp/cmake-3.22.5-linux-$(uname -m)
|
||||
cp -R /tmp/cmake-3.31.4-linux-$(uname -m)/* /usr
|
||||
rm -rf /tmp/cmake-3.31.4-linux-$(uname -m)
|
||||
|
||||
- name: Install Rust 🦀
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # v1
|
||||
with:
|
||||
toolchain: "1.86.0"
|
||||
|
||||
- name: Install bpf-linker
|
||||
run: |
|
||||
cargo install bpf-linker@0.9.14
|
||||
|
||||
- name: Configure Rust cache
|
||||
uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 #v2
|
||||
|
||||
- name: Checkout Plugins ⤵️
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Safe directory
|
||||
run: git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
|
||||
|
||||
- name: Run build 🏗️
|
||||
run: ${{ inputs.makecommand }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: plugins-${{ matrix.arch }}-${{ inputs.suffix }}.tar.gz
|
||||
path: output/*.tar.gz
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# This is a reusable workflow used by master and release CI
|
||||
# This is a reusable workflow used by main and release CI
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
|
@ -12,31 +12,31 @@ on:
|
|||
required: false
|
||||
default: falcosecurity/rules
|
||||
type: string
|
||||
|
||||
|
||||
jobs:
|
||||
build-rules-checker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.19.0'
|
||||
go-version: "1.19.0"
|
||||
|
||||
- name: Checkout rules
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
repository: ${{ inputs.repository }}
|
||||
|
||||
- name: Build checker tool
|
||||
working-directory: build/checker
|
||||
run: go build -o ${{ inputs.output }}
|
||||
|
||||
|
||||
- name: Test checker tool
|
||||
working-directory: build/checker
|
||||
run: go test ./... -cover
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: rules-tool.tar.gz
|
||||
path: build/checker/${{ inputs.output }}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# This is a reusable workflow used by master CI
|
||||
# This is a reusable workflow used by main CI
|
||||
on:
|
||||
workflow_call:
|
||||
outputs:
|
||||
|
@ -13,12 +13,12 @@ jobs:
|
|||
changed-plugins: ${{ steps.set-changed-plugins.outputs.changed-plugins }}
|
||||
steps:
|
||||
- name: Checkout rules
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-plugins
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
uses: Ana06/get-changed-files@25f79e676e7ea1868813e21465014798211fad8c # v2.3.0
|
||||
with:
|
||||
format: space-delimited
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# This is a reusable workflow used by master and release CI
|
||||
# This is a reusable workflow used by main and release CI
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
|
@ -6,7 +6,7 @@ on:
|
|||
description: Suffix for uploading packages (dev or stable)
|
||||
required: true
|
||||
type: string
|
||||
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
@ -14,26 +14,26 @@ permissions:
|
|||
env:
|
||||
AWS_S3_BUCKET: falco-distribution
|
||||
AWS_S3_PREFIX: plugins
|
||||
AWS_S3_REGION: eu-west-1
|
||||
|
||||
AWS_S3_REGION: eu-west-1
|
||||
|
||||
jobs:
|
||||
publish-packages:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download x86_64 plugins
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: plugins-x86_64-${{ inputs.suffix }}.tar.gz
|
||||
path: /tmp/plugins-x86_64
|
||||
|
||||
|
||||
- name: Download aarch64 plugins
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: plugins-aarch64-${{ inputs.suffix }}.tar.gz
|
||||
path: /tmp/plugins-aarch64
|
||||
|
||||
|
||||
- name: Configure AWS credentials 🔧⚙️
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1
|
||||
with:
|
||||
role-to-assume: "arn:aws:iam::292999226676:role/github_actions-plugins-s3"
|
||||
aws-region: ${{ env.AWS_S3_REGION }}
|
||||
|
@ -42,5 +42,4 @@ jobs:
|
|||
run: |
|
||||
for package in /tmp/plugins-*/*.tar.gz; do
|
||||
aws s3 cp --no-progress $package s3://${{ env.AWS_S3_BUCKET}}/${{ env.AWS_S3_PREFIX }}/${{ inputs.suffix }}/
|
||||
done
|
||||
|
||||
done
|
||||
|
|
|
@ -35,10 +35,13 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install system dependencies
|
||||
run: pip install yq
|
||||
run: sudo wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
|
||||
|
||||
- name: Setup plugin config and rules
|
||||
id: get-config
|
||||
|
@ -49,10 +52,10 @@ jobs:
|
|||
run: ./.github/get-latest-plugin-version.sh ${{ inputs.plugin }}
|
||||
|
||||
- name: Download rules tool
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: rules-tool.tar.gz
|
||||
|
||||
|
||||
# note: here we're loading the locally-built plugins, whereas another
|
||||
# solution would be to pull them with falcoctl. The flaw with this
|
||||
# approach is that we load the same plugin for both the "old" and the
|
||||
|
@ -60,11 +63,11 @@ jobs:
|
|||
# the two rulesets depend on plugins with different majors.
|
||||
# todo(jasondellaluce): fix this corner case in the future
|
||||
- name: Download plugins
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: ${{ inputs.plugins-artifact }}
|
||||
path: /tmp/plugins-${{ inputs.arch }}
|
||||
|
||||
|
||||
- name: Extract plugins
|
||||
run: |
|
||||
for archive in /tmp/plugins-*/*.tar.gz; do
|
||||
|
@ -74,7 +77,7 @@ jobs:
|
|||
sudo mkdir -p /usr/share/falco/plugins
|
||||
sudo cp -r *.so /usr/share/falco/plugins || true
|
||||
popd && rm -fr tmpdir
|
||||
done
|
||||
done
|
||||
|
||||
- name: Compare changed files with previous versions
|
||||
id: compare
|
||||
|
@ -83,10 +86,8 @@ jobs:
|
|||
rules_dir=${{ steps.get-config.outputs.rules_dir }}
|
||||
|
||||
if [ -d "$rules_dir" ]; then
|
||||
rules_files=$(ls $rules_dir/*)
|
||||
for rules_file in $rules_files; do
|
||||
./.github/compare-rule-files.sh \
|
||||
"$rules_file" \
|
||||
"$rules_dir" \
|
||||
${{ steps.get-config.outputs.config_file }} \
|
||||
${{ inputs.plugin }} \
|
||||
rule_result.txt \
|
||||
|
@ -100,7 +101,6 @@ jobs:
|
|||
fi
|
||||
cat rule_result.txt >> result.txt
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -s result.txt ]; then
|
||||
|
@ -114,43 +114,9 @@ jobs:
|
|||
cp ${{ steps.compare.outputs.comment_file }} ./pr/COMMENT-${{ inputs.job-index }}
|
||||
|
||||
- name: Upload PR info as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: steps.compare.outputs.comment_file != ''
|
||||
with:
|
||||
name: pr-${{ inputs.job-index }}
|
||||
path: pr/
|
||||
retention-days: 1
|
||||
|
||||
upload-pr-info:
|
||||
needs: [check-version]
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download PR infos
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: tmp-artifacts
|
||||
|
||||
- name: Save PR info
|
||||
run: |
|
||||
mkdir -p ./pr
|
||||
echo ${{ github.event.number }} > ./pr/NR
|
||||
touch ./pr/COMMENT
|
||||
echo "# Rules files suggestions" >> ./pr/COMMENT
|
||||
echo "" >> ./pr/COMMENT
|
||||
files=$(find ./tmp-artifacts/)
|
||||
for file in $files; do
|
||||
if [[ $file =~ "COMMENT" ]]; then
|
||||
cat $file >> ./pr/COMMENT
|
||||
fi
|
||||
done
|
||||
echo Uploading PR info...
|
||||
cat ./pr/COMMENT
|
||||
echo ""
|
||||
|
||||
- name: Upload PR info as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: pr
|
||||
path: pr/
|
||||
retention-days: 1
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# This is a reusable workflow used by the PR CI
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
upload-pr-info:
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download PR infos
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
path: tmp-artifacts
|
||||
|
||||
- name: Save PR info
|
||||
run: |
|
||||
mkdir -p ./pr
|
||||
echo ${{ github.event.number }} > ./pr/NR
|
||||
touch ./pr/COMMENT
|
||||
echo "# Rules files suggestions" >> ./pr/COMMENT
|
||||
echo "" >> ./pr/COMMENT
|
||||
files=$(find ./tmp-artifacts/)
|
||||
for file in $files; do
|
||||
if [[ $file =~ "COMMENT" ]]; then
|
||||
cat $file >> ./pr/COMMENT
|
||||
fi
|
||||
done
|
||||
echo Uploading PR info...
|
||||
cat ./pr/COMMENT
|
||||
echo ""
|
||||
|
||||
- name: Upload PR info as artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: pr
|
||||
path: pr/
|
||||
retention-days: 1
|
|
@ -1,4 +1,4 @@
|
|||
# This is a reusable workflow used by master and release CI
|
||||
# This is a reusable workflow used by main and release CI
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
|
@ -34,28 +34,25 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
container: golang:1.18
|
||||
env:
|
||||
GOFLAGS: '-buildvcs=false'
|
||||
GOFLAGS: "-buildvcs=false"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
apt update -y
|
||||
apt install -y --no-install-recommends pip git jq
|
||||
pip install yq
|
||||
run: wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
- name: Setup plugin config and rules
|
||||
id: get-config
|
||||
run: ./.github/setup-plugin-config-rules.sh ${{ inputs.plugin }}
|
||||
|
||||
- name: Download rules tool
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: rules-tool.tar.gz
|
||||
|
||||
- name: Download plugins
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: ${{ inputs.plugins-artifact }}
|
||||
path: /tmp/plugins-${{ inputs.arch }}
|
||||
|
@ -69,13 +66,13 @@ jobs:
|
|||
|
||||
mkdir -p /etc/falco/falco
|
||||
mkdir -p /usr/share/falco/plugins
|
||||
|
||||
|
||||
# avoids git exit status 128: detected dubious ownership in repository
|
||||
git config --global --add safe.directory $(pwd)
|
||||
|
||||
for plugin_name in $loaded_plugins; do
|
||||
echo Installing locally-built plugin "$plugin_name"...
|
||||
|
||||
|
||||
# At release time we only build the released plugin, so it's possible
|
||||
# that validation requires a plugin that we haven't built locally.
|
||||
# in those cases, we build it on-the-fly perform validation with it.
|
||||
|
@ -122,23 +119,23 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install system dependencies
|
||||
run: pip install yq
|
||||
|
||||
run: sudo wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
|
||||
|
||||
- name: Setup plugin config and rules
|
||||
id: get-config
|
||||
run: ./.github/setup-plugin-config-rules.sh ${{ inputs.plugin }}
|
||||
|
||||
- name: Download plugins
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: ${{ inputs.plugins-artifact }}
|
||||
path: /tmp/plugins-${{ inputs.arch }}
|
||||
|
||||
- name: Download rules tool
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: rules-tool.tar.gz
|
||||
|
||||
|
@ -146,7 +143,7 @@ jobs:
|
|||
run: |
|
||||
curl --fail -LS "https://github.com/falcosecurity/falcoctl/releases/download/v${{ inputs.falcoctl-version }}/falcoctl_${{ inputs.falcoctl-version }}_linux_${{ inputs.arch == 'x86_64' && 'amd64' || 'arm64' }}.tar.gz" | tar -xz
|
||||
sudo install -o root -g root -m 0755 falcoctl /usr/local/bin/falcoctl
|
||||
|
||||
|
||||
# note(jsondellaluce): exploring the set of all dependencies including their
|
||||
# alternatives and all the possible combinations of different versions would
|
||||
# result in a combinatorial explosion. As such, we take the simple route
|
||||
|
@ -181,48 +178,49 @@ jobs:
|
|||
sudo mkdir -p /usr/share/falco/plugins
|
||||
|
||||
rules_files=$(ls ${{ steps.get-config.outputs.rules_dir }}/*)
|
||||
for rules_file in $rules_files; do
|
||||
deps=$(cat $rules_file | yq -r '.[].required_plugin_versions | select(. != null and . != "")[] | [.name + ":" + .version] | @csv')
|
||||
deps=$(./.github/extract-plugins-deps-from-rulesfile.sh \
|
||||
"${{ inputs.plugin }}" \
|
||||
"$rules_files")
|
||||
echo "Deps: ${deps}"
|
||||
ver_diff=0
|
||||
has_updates=1
|
||||
while [ "$has_updates" -eq 1 ]; do
|
||||
has_updates=0
|
||||
for dep in $deps; do
|
||||
echo "Plugin: ${dep}"
|
||||
plugin_name=$(echo $dep | tr -d '"' | cut -d ':' -f 1)
|
||||
|
||||
ver_diff=0
|
||||
has_updates=1
|
||||
while [ "$has_updates" -eq 1 ]; do
|
||||
has_updates=0
|
||||
for dep in $deps; do
|
||||
plugin_name=$(echo $dep | tr -d '"' | cut -d ':' -f 1)
|
||||
# forcing zero patch version to forbid patch-like dependencies
|
||||
# bumping minor version at every iteration
|
||||
plugin_ver=$(echo $dep | tr -d '"' | cut -d ':' -f 2)
|
||||
plugin_ver_major=$(echo $plugin_ver | cut -d '.' -f 1)
|
||||
plugin_ver_minor=$(expr $(echo $plugin_ver | cut -d '.' -f 2) + $ver_diff)
|
||||
plugin_ver_patch=0
|
||||
plugin_ver="${plugin_ver_major}.${plugin_ver_minor}.${plugin_ver_patch}"
|
||||
|
||||
# forcing zero patch version to forbid patch-like dependencies
|
||||
# bumping minor version at every iteration
|
||||
plugin_ver=$(echo $dep | tr -d '"' | cut -d ':' -f 2)
|
||||
plugin_ver_major=$(echo $plugin_ver | cut -d '.' -f 1)
|
||||
plugin_ver_minor=$(expr $(echo $plugin_ver | cut -d '.' -f 2) + $ver_diff)
|
||||
plugin_ver_patch=0
|
||||
plugin_ver="${plugin_ver_major}.${plugin_ver_minor}.${plugin_ver_patch}"
|
||||
set +e pipefail
|
||||
sudo falcoctl artifact install ${plugin_name}:${plugin_ver}
|
||||
if [ $? -eq 0 ]; then
|
||||
echo Installed plugin "${plugin_name}" at version "${plugin_ver}"
|
||||
has_updates=1
|
||||
else
|
||||
echo Can\'t pull plugin "${plugin_name}" at version "${plugin_ver}"
|
||||
echo Attempt installing locally-built plugin "${plugin_name}"...
|
||||
for archive in $(ls /tmp/plugins-${{ inputs.arch }}/${plugin_name}-*); do
|
||||
echo Extracting archive "$archive"...
|
||||
mkdir -p tmpdir && pushd tmpdir
|
||||
tar -xvf $archive
|
||||
sudo cp -r *.so /usr/share/falco/plugins || true
|
||||
popd && rm -fr tmpdir
|
||||
done
|
||||
fi
|
||||
set -e pipefail
|
||||
done
|
||||
ver_diff=$(expr $ver_diff + 1)
|
||||
|
||||
set +e pipefail
|
||||
sudo falcoctl artifact install ${plugin_name}:${plugin_ver}
|
||||
if [ $? -eq 0 ]; then
|
||||
echo Installed plugin "${plugin_name}" at version "${plugin_ver}"
|
||||
has_updates=1
|
||||
else
|
||||
echo Can\'t pull plugin "${plugin_name}" at version "${plugin_ver}"
|
||||
echo Attempt installing locally-built plugin "${plugin_name}"...
|
||||
for archive in $(ls /tmp/plugins-${{ inputs.arch }}/${plugin_name}-*); do
|
||||
echo Extracting archive "$archive"...
|
||||
mkdir -p tmpdir && pushd tmpdir
|
||||
tar -xvf $archive
|
||||
sudo cp -r *.so /usr/share/falco/plugins || true
|
||||
popd && rm -fr tmpdir
|
||||
done
|
||||
fi
|
||||
set -e pipefail
|
||||
done
|
||||
ver_diff=$(expr $ver_diff + 1)
|
||||
|
||||
./.github/validate-rules.sh \
|
||||
"${{ inputs.falco-image }}" \
|
||||
"${{ inputs.rules-checker }}" \
|
||||
"${{ steps.get-config.outputs.config_file }}" \
|
||||
"$rules_file"
|
||||
done
|
||||
./.github/validate-rules.sh \
|
||||
"${{ inputs.falco-image }}" \
|
||||
"${{ inputs.rules-checker }}" \
|
||||
"${{ steps.get-config.outputs.config_file }}" \
|
||||
"$rules_files"
|
||||
done
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
repos:
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: 'v18.1.3'
|
||||
hooks:
|
||||
- id: clang-format
|
||||
name: clang-format-18
|
||||
files: ^plugins/container/.*$
|
||||
exclude: plugin_config_schema\.h
|
||||
stages: [pre-commit]
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: 'v14.0.6'
|
||||
hooks:
|
||||
- id: clang-format
|
||||
name: clang-format-14
|
||||
files: ^plugins/(k8smeta|dummy_c)/.*$
|
||||
stages: [pre-commit]
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: rust-fmt
|
||||
name: rust-fmt
|
||||
description: Format files with rustfmt.
|
||||
entry: rustfmt +nightly --color always
|
||||
types: [rust]
|
||||
language: system
|
||||
stages: [pre-commit]
|
||||
- id: dco
|
||||
name: dco
|
||||
entry: ./tools/local_hooks/dco-pre-commit-msg.sh
|
||||
language: script
|
||||
stages: [prepare-commit-msg]
|
18
Makefile
18
Makefile
|
@ -27,6 +27,7 @@ plugins-clean = $(addprefix clean/,$(plugins))
|
|||
plugins-changelogs = $(addprefix changelog/,$(plugins))
|
||||
plugins-packages = $(addprefix package/,$(plugins))
|
||||
plugins-releases = $(addprefix release/,$(plugins))
|
||||
plugins-tidy = $(addprefix tidy/,$(plugins))
|
||||
|
||||
.PHONY: all
|
||||
all: check-registry $(plugins)
|
||||
|
@ -43,6 +44,9 @@ $(plugins): build/readme/readme
|
|||
&& make readme READMETOOL=../../build/readme/bin/readme \
|
||||
&& echo "$@ readme generated" || :
|
||||
|
||||
tidy/%:
|
||||
+cd plugins/$@ && [-f go.mod] && $(GO) mod tidy
|
||||
|
||||
.PHONY: clean
|
||||
clean: $(plugins-clean) clean/packages clean/build/utils/version clean/build/registry/registry clean/build/changelog/changelog clean/build/readme/readme
|
||||
|
||||
|
@ -54,6 +58,17 @@ clean/packages:
|
|||
$(plugins-clean):
|
||||
+cd plugins/$(shell basename $@) && make clean
|
||||
|
||||
.PHONY: $(plugins-tidy)
|
||||
$(plugins-tidy):
|
||||
+cd plugins/$(shell basename $@) && [ -f go.mod ] && $(GO) mod tidy || true
|
||||
|
||||
.PHONY: tidy
|
||||
tidy: $(plugins-tidy)
|
||||
+cd build/utils && $(GO) mod tidy
|
||||
+cd build/readme && $(GO) mod tidy
|
||||
+cd build/registry && $(GO) mod tidy
|
||||
+cd build/utils && $(GO) mod tidy
|
||||
|
||||
.PHONY: packages
|
||||
packages: clean/packages $(plugins-packages)
|
||||
|
||||
|
@ -70,7 +85,8 @@ package/%: clean/% % build/utils/version
|
|||
@echo "$(PLUGIN_NAME) package built"
|
||||
# build rules package, if any
|
||||
mkdir -p $(OUTPUT_DIR)/$(PLUGIN_NAME)-rules
|
||||
cp -r plugins/$(PLUGIN_NAME)/rules/* $(OUTPUT_DIR)/$(PLUGIN_NAME)-rules/ && \
|
||||
# symlinks are ignored when creating the rules package. Only regular files are considered.
|
||||
find plugins/$(PLUGIN_NAME)/rules/* -type f -exec cp -t $(OUTPUT_DIR)/$(PLUGIN_NAME)-rules/ {} + && \
|
||||
tar -zcvf $(OUTPUT_DIR)/$(PLUGIN_NAME)-rules-$(PLUGIN_VERSION).tar.gz -C \
|
||||
$(OUTPUT_DIR)/$(PLUGIN_NAME)-rules $$(ls -A ${OUTPUT_DIR}/$(PLUGIN_NAME)-rules) || :
|
||||
@test $(OUTPUT_DIR)/$(PLUGIN_NAME)-rules-$(PLUGIN_VERSION).tar.gz && echo "$(PLUGIN_NAME) rules package built"
|
||||
|
|
3
OWNERS
3
OWNERS
|
@ -2,8 +2,9 @@ approvers:
|
|||
- mstemm
|
||||
- leogr
|
||||
- jasondellaluce
|
||||
- LucaGuerra
|
||||
- ekoops
|
||||
emeritus_approvers:
|
||||
- ldegio
|
||||
- leodido
|
||||
- fntlnz
|
||||
|
||||
|
|
147
README.md
147
README.md
|
@ -2,56 +2,20 @@
|
|||
|
||||
[](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#core-scope) [](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#stable) [](./LICENSE)
|
||||
|
||||
Note: *The plugin system is a new feature introduced since Falco 0.31.0. You can find more detail in the original [proposal document](https://github.com/falcosecurity/falco/blob/master/proposals/20210501-plugin-system.md).*
|
||||
This repository is the central hub for the Falco Plugin ecosystem. It serves two main purposes:
|
||||
|
||||
This repository contains the [Plugin Registry](#plugin-registry) and the [plugins officially maintained](#falcusecurity-plugins) by the Falcosecurity organization. [Plugins](https://falco.org/docs/plugins) can be used to extend [Falco](https://github.com/falcosecurity/falco) and of applications using [Falcosecurity libs](https://github.com/falcosecurity/libs). Please refer to the [official documentation](https://falco.org/docs/plugins) to better understand the plugin system's concepts and architecture.
|
||||
- **Be a registry:** A comprehensive catalog of plugins recognized by The Falco Project, regardless of where their source code is hosted.
|
||||
- **Monorepo for Falcosecurity plugins:** Official plugins hosted and maintained by The Falco Project, with robust release and distribution processes.
|
||||
|
||||
For more information about the plugin system’s architecture and concepts, please see the [official documentation](https://falco.org/docs/plugins).
|
||||
|
||||
---
|
||||
|
||||
## Plugin Registry
|
||||
|
||||
The Registry contains metadata and information about every plugin known and recognized by the Falcosecurity organization. It lists plugins hosted either in this repository or in other repositories. These plugins are developed for Falco and made available to the community. Check out the sections below to know how to [register your plugins](#registering-a-new-plugin) and see plugins currently contained in the registry.
|
||||
The registry contains metadata and information about every plugin known and recognized by the Falcosecurity organization. It lists plugins hosted either in this repository or in other repositories. These plugins are developed for Falco and made available to the community.
|
||||
|
||||
### Registering a new Plugin
|
||||
|
||||
Registering your plugin inside the registry helps ensure that some technical constraints are respected, such as that a [given ID is used by exactly one plugin with event source capability](https://falco.org/docs/plugins/#plugin-event-ids) and allows plugin authors to [coordinate about event source formats](https://falco.org/docs/plugins/#plugin-event-sources-and-interoperability). Moreover, this is a great way to share your plugin project with the community and engage with it, thus gaining new users and **increasing its visibility**. We encourage you to register your plugin in this registry before publishing it. You can add your plugins in this registry regardless of where its source code is hosted (there's a `url` field for this specifically).
|
||||
|
||||
The registration process involves adding an entry about your plugin inside the [registry.yaml](./registry.yaml) file by creating a Pull Request in this repository. Please be mindful of a few constraints that are automatically checked and required for your plugin to be accepted:
|
||||
|
||||
- The `name` field is mandatory and must be **unique** across all the plugins in the registry
|
||||
- *(Sourcing Capability Only)* The `id` field is mandatory and must be **unique** in the registry across all the plugins with event source capability
|
||||
- See [docs/plugin-ids.md](./docs/plugin-ids.md) for more information about plugin IDs
|
||||
- The plugin `name` must match this [regular expression](https://en.wikipedia.org/wiki/Regular_expression): `^[a-z]+[a-z0-9-_\-]*$` (however, its not recommended to use `_` in the name, unless you are trying to match the name of a source or for particular reasons)
|
||||
- The `source` *(Sourcing Capability Only)* and `sources` *(Extraction Capability Only)* must match this [regular expression](https://en.wikipedia.org/wiki/Regular_expression): `^[a-z]+[a-z0-9_]*$`
|
||||
- The `url` field should point to the plugin source code
|
||||
- The `rules_url` field should point to the default ruleset, if any
|
||||
|
||||
For reference, here's an example of an entry for a plugin with both event sourcing and field extraction capabilities:
|
||||
```yaml
|
||||
- name: k8saudit
|
||||
description: ...
|
||||
authors: ...
|
||||
contact: ...
|
||||
maintainers:
|
||||
- name: The Falco Authors
|
||||
email: cncf-falco-dev@lists.cncf.io
|
||||
keywords:
|
||||
- audit
|
||||
- audit-log
|
||||
- audit-events
|
||||
- kubernetes
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit/rules
|
||||
url: ...
|
||||
license: ...
|
||||
capabilities:
|
||||
sourcing:
|
||||
supported: true
|
||||
id: 2
|
||||
source: k8s_audit
|
||||
extraction:
|
||||
supported: true
|
||||
```
|
||||
|
||||
You can find the full registry specification here: *(coming soon...)*
|
||||
> Check out the [Registering a Plugin](./docs/registering-a-plugin.md) to know how to add your plugin to this registry.
|
||||
|
||||
### Registered Plugins
|
||||
|
||||
|
@ -63,37 +27,98 @@ These comments and the text between them should not be edited by hand -->
|
|||
| Name | Capabilities | Description
|
||||
| --- | --- | --- |
|
||||
| plugin-id-zero-value | **Event Sourcing** <br/>ID: 0 <br/>`` | This ID is reserved for particular purposes and cannot be registered. A plugin author should not use this ID unless specified by the documentation. <br/><br/> Authors: N/A <br/> License: N/A |
|
||||
| [k8saudit](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit) | **Event Sourcing** <br/>ID: 1 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events and monitor Kubernetes Clusters <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [cloudtrail](https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail) | **Event Sourcing** <br/>ID: 2 <br/>`aws_cloudtrail` <br/>**Field Extraction** <br/> `aws_cloudtrail` | Reads Cloudtrail JSON logs from files/S3 and injects as events <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [json](https://github.com/falcosecurity/plugins/tree/master/plugins/json) | **Field Extraction** <br/> *All Sources* | Extract values from any JSON payload <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [dummy](https://github.com/falcosecurity/plugins/tree/master/plugins/dummy) | **Event Sourcing** <br/>ID: 3 <br/>`dummy` <br/>**Field Extraction** <br/> `dummy` | Reference plugin used to document interface <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [dummy_c](https://github.com/falcosecurity/plugins/tree/master/plugins/dummy_c) | **Event Sourcing** <br/>ID: 4 <br/>`dummy_c` <br/>**Field Extraction** <br/> `dummy_c` | Like dummy, but written in C++ <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| test | **Event Sourcing** <br/>ID: 999 <br/>`test` | This ID is reserved for source plugin development. Any plugin author can use this ID, but authors can expect events from other developers with this ID. After development is complete, the author should request an actual ID <br/><br/> Authors: N/A <br/> License: N/A |
|
||||
| [k8saudit](https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit) | **Event Sourcing** <br/>ID: 1 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events and monitor Kubernetes Clusters <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [cloudtrail](https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail) | **Event Sourcing** <br/>ID: 2 <br/>`aws_cloudtrail` <br/>**Field Extraction** <br/> `aws_cloudtrail` | Reads Cloudtrail JSON logs from files/S3 and injects as events <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [json](https://github.com/falcosecurity/plugins/tree/main/plugins/json) | **Field Extraction** <br/> *All Sources* | Extract values from any JSON payload <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [dummy](https://github.com/falcosecurity/plugins/tree/main/plugins/dummy) | **Event Sourcing** <br/>ID: 3 <br/>`dummy` <br/>**Field Extraction** <br/> `dummy` | Reference plugin used to document interface <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [dummy_c](https://github.com/falcosecurity/plugins/tree/main/plugins/dummy_c) | **Event Sourcing** <br/>ID: 4 <br/>`dummy_c` <br/>**Field Extraction** <br/> `dummy_c` | Like dummy, but written in C++ <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [docker](https://github.com/Issif/docker-plugin) | **Event Sourcing** <br/>ID: 5 <br/>`docker` <br/>**Field Extraction** <br/> `docker` | Docker Events <br/><br/> Authors: [Thomas Labarussias](https://github.com/Issif) <br/> License: Apache-2.0 |
|
||||
| [seccompagent](https://github.com/kinvolk/seccompagent) | **Event Sourcing** <br/>ID: 6 <br/>`seccompagent` <br/>**Field Extraction** <br/> `seccompagent` | Seccomp Agent Events <br/><br/> Authors: [Alban Crequy](https://github.com/kinvolk/seccompagent) <br/> License: Apache-2.0 |
|
||||
| [okta](https://github.com/falcosecurity/plugins/tree/master/plugins/okta) | **Event Sourcing** <br/>ID: 7 <br/>`okta` <br/>**Field Extraction** <br/> `okta` | Okta Log Events <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [github](https://github.com/falcosecurity/plugins/tree/master/plugins/github) | **Event Sourcing** <br/>ID: 8 <br/>`github` <br/>**Field Extraction** <br/> `github` | Github Webhook Events <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [k8saudit-eks](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit-eks) | **Event Sourcing** <br/>ID: 9 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events from AWS EKS Clusters <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [okta](https://github.com/falcosecurity/plugins/tree/main/plugins/okta) | **Event Sourcing** <br/>ID: 7 <br/>`okta` <br/>**Field Extraction** <br/> `okta` | Okta Log Events <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [github](https://github.com/falcosecurity/plugins/tree/main/plugins/github) | **Event Sourcing** <br/>ID: 8 <br/>`github` <br/>**Field Extraction** <br/> `github` | Github Webhook Events <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [k8saudit-eks](https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-eks) | **Event Sourcing** <br/>ID: 9 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events from AWS EKS Clusters <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [nomad](https://github.com/albertollamaso/nomad-plugin/tree/main) | **Event Sourcing** <br/>ID: 10 <br/>`nomad` <br/>**Field Extraction** <br/> `nomad` | Read Hashicorp Nomad Events Stream <br/><br/> Authors: [Alberto Llamas](https://github.com/albertollamaso/nomad-plugin/issues) <br/> License: Apache-2.0 |
|
||||
| [dnscollector](https://github.com/SysdigDan/dnscollector-falco-plugin) | **Event Sourcing** <br/>ID: 11 <br/>`dnscollector` <br/>**Field Extraction** <br/> `dnscollector` | DNS Collector Events <br/><br/> Authors: [Daniel Moloney](https://github.com/SysdigDan/dnscollector-falco-plugin/issues) <br/> License: Apache-2.0 |
|
||||
| [gcpaudit](https://github.com/falcosecurity/plugins/tree/master/plugins/gcpaudit) | **Event Sourcing** <br/>ID: 12 <br/>`gcp_auditlog` <br/>**Field Extraction** <br/> `gcp_auditlog` | Read GCP Audit Logs <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [gcpaudit](https://github.com/falcosecurity/plugins/tree/main/plugins/gcpaudit) | **Event Sourcing** <br/>ID: 12 <br/>`gcp_auditlog` <br/>**Field Extraction** <br/> `gcp_auditlog` | Read GCP Audit Logs <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [syslogsrv](https://github.com/nabokihms/syslogsrv-falco-plugin/tree/main/plugins/syslogsrv) | **Event Sourcing** <br/>ID: 13 <br/>`syslogsrv` <br/>**Field Extraction** <br/> `syslogsrv` | Syslog Server Events <br/><br/> Authors: [Maksim Nabokikh](https://github.com/nabokihms/syslogsrv-falco-plugin/issues) <br/> License: Apache-2.0 |
|
||||
| [salesforce](https://github.com/an1245/falco-plugin-salesforce/) | **Event Sourcing** <br/>ID: 14 <br/>`salesforce` <br/>**Field Extraction** <br/> `salesforce` | Falco plugin providing basic runtime threat detection and auditing logging for Salesforce <br/><br/> Authors: [Andy](https://github.com/an1245/falco-plugin-salesforce/issues) <br/> License: Apache-2.0 |
|
||||
| test | **Event Sourcing** <br/>ID: 999 <br/>`test` | This ID is reserved for source plugin development. Any plugin author can use this ID, but authors can expect events from other developers with this ID. After development is complete, the author should request an actual ID <br/><br/> Authors: N/A <br/> License: N/A |
|
||||
| [k8smeta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) | **Field Extraction** <br/> `syscall` | Enriche Falco syscall flow with Kubernetes Metadata <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [box](https://github.com/an1245/falco-plugin-box/) | **Event Sourcing** <br/>ID: 15 <br/>`box` <br/>**Field Extraction** <br/> `box` | Falco plugin providing basic runtime threat detection and auditing logging for Box <br/><br/> Authors: [Andy](https://github.com/an1245/falco-plugin-box/issues) <br/> License: Apache-2.0 |
|
||||
| [k8smeta](https://github.com/falcosecurity/plugins/tree/main/plugins/k8smeta) | **Field Extraction** <br/> `syscall` | Enriche Falco syscall flow with Kubernetes Metadata <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [k8saudit-gke](https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-gke) | **Event Sourcing** <br/>ID: 16 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events from GKE Clusters <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [journald](https://github.com/gnosek/falco-journald-plugin) | **Event Sourcing** <br/>ID: 17 <br/>`journal` <br/>**Field Extraction** <br/> `journal` | Read Journald events into Falco <br/><br/> Authors: [Grzegorz Nosek](https://github.com/gnosek/falco-journald-plugin) <br/> License: Apache-2.0 |
|
||||
| [kafka](https://github.com/falcosecurity/plugins/tree/main/plugins/kafka) | **Event Sourcing** <br/>ID: 18 <br/>`kafka` | Read events from Kafka topics into Falco <br/><br/> Authors: [Hunter Madison](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [gitlab](https://github.com/an1245/falco-plugin-gitlab) | **Event Sourcing** <br/>ID: 19 <br/>`gitlab` <br/>**Field Extraction** <br/> `gitlab` | Falco plugin providing basic runtime threat detection and auditing logging for GitLab <br/><br/> Authors: [Andy](https://github.com/an1245/falco-plugin-gitlab/issues) <br/> License: Apache-2.0 |
|
||||
| [keycloak](https://github.com/mattiaforc/falco-keycloak-plugin) | **Event Sourcing** <br/>ID: 20 <br/>`keycloak` <br/>**Field Extraction** <br/> `keycloak` | Falco plugin for sourcing and extracting Keycloak user/admin events <br/><br/> Authors: [Mattia Forcellese](https://github.com/mattiaforc/falco-keycloak-plugin/issues) <br/> License: Apache-2.0 |
|
||||
| [k8saudit-aks](https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-aks) | **Event Sourcing** <br/>ID: 21 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events from Azure AKS Clusters <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [k8saudit-ovh](https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-ovh) | **Event Sourcing** <br/>ID: 22 <br/>`k8s_audit` <br/>**Field Extraction** <br/> `k8s_audit` | Read Kubernetes Audit Events from OVHcloud MKS Clusters <br/><br/> Authors: [Aurélie Vache](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [dummy_rs](https://github.com/falcosecurity/plugins/tree/main/plugins/dummy_rs) | **Event Sourcing** <br/>ID: 23 <br/>`dummy_rs` <br/>**Field Extraction** <br/> `dummy_rs` | Like dummy, but written in Rust <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [container](https://github.com/falcosecurity/plugins/tree/main/plugins/container) | **Field Extraction** <br/> `syscall` | Enriche Falco syscall flow with Container Metadata <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [krsi](https://github.com/falcosecurity/plugins/tree/main/plugins/krsi) | **Field Extraction** <br/> `syscall` | Security (KRSI) events support for Falco <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [collector](https://github.com/falcosecurity/plugins/tree/main/plugins/collector) | **Event Sourcing** <br/>ID: 24 <br/>`collector` | Generic collector to ingest raw payloads into Falco <br/><br/> Authors: [The Falco Authors](https://falco.org/community) <br/> License: Apache-2.0 |
|
||||
| [awselb](https://github.com/yukinakanaka/falco-plugin-aws-elb) | **Event Sourcing** <br/>ID: 25 <br/>`awselb` <br/>**Field Extraction** <br/> `awselb` | AWS Elastic Load Balancer access logs events <br/><br/> Authors: [Yuki Nakamura](https://github.com/yukinakanaka/falco-plugin-aws-elb/issues) <br/> License: Apache-2.0 |
|
||||
|
||||
<!-- REGISTRY:TABLE -->
|
||||
|
||||
## Hosted Plugins
|
||||
## Falcosecurity Plugins
|
||||
|
||||
Another purpose of this repository is to host and maintain the plugins owned by the Falcosecurity organization. Each plugin is a standalone project and has its own directory, and they are all inside the [plugins](https://github.com/falcosecurity/plugins/tree/master/plugins) folder.
|
||||
Along with the registry, this repository hosts the official plugins maintained by the Falcosecurity organization. Each plugin is an independent project with its own directory in the [plugins folder](https://github.com/falcosecurity/plugins/tree/main/plugins).
|
||||
|
||||
The `master` branch contains the most up-to-date state of development, and each plugin is regularly released. Please check our [Release Process](./release.md) to know how plugins are released and how artifacts are distributed. Dev builds are published each time a Pull Request gets merged into `master`, whereas stable builds are released and published only when a new release gets tagged. You can find the published artifacts at https://download.falco.org/?prefix=plugins.
|
||||
The `main` branch reflects the latest development state, and plugins are released on a regular basis. Development builds are published automatically when a Pull Request is merged into `main`, while stable builds are released only when a new tag is created. You can find all published artifacts at [download.falco.org](https://download.falco.org/?prefix=plugins). For details on the release process, please see our [Release Process](./release.md).
|
||||
|
||||
If you wish to contribute your plugin to the Falcosecurity organization, you just need to open a Pull Request to add it inside the `plugins` folder and to add it inside the registry. In order to be hosted in this repository, plugins must be licensed under the [Apache 2.0 License](./LICENSE).
|
||||
The instructions below explain how to install and apply only to plugins from this repository.
|
||||
|
||||
### Installing Plugins
|
||||
|
||||
Plugins hosted in this repository are built and distributed through Falco's official channels. You can easily install them using either [falcoctl](https://github.com/falcosecurity/falcoctl) or the [Falco Helm chart](https://github.com/falcosecurity/charts/tree/master/charts/falco).
|
||||
|
||||
#### Using falcoctl
|
||||
|
||||
1. **Install falcoctl:** If you haven't already, follow the [falcoctl installation guide](https://github.com/falcosecurity/falcoctl?tab=readme-ov-file#installation).
|
||||
2. **Install a Plugin:** Execute the following command, replacing `<plugin-name>` with the name of the plugin you wish to install:
|
||||
```bash
|
||||
falcoctl index update falcosecurity
|
||||
falcoctl artifact install <plugin-name>
|
||||
```
|
||||
> Depending on your environment, you may need to run the above commands with `sudo`.
|
||||
3. Configure Falco to load the plugin as described in the [plugin's documentation](https://falco.org/docs/concepts/plugins/usage/#loading-plugins-in-falco).
|
||||
|
||||
|
||||
#### Using the Falco Helm Chart
|
||||
|
||||
When installing Falco using the Helm chart, you can instruct the chart to install a specific plugin by setting the `falcoctl.config.artifact.install.refs` value and then adding the relevant plugin configuration under `falco`.
|
||||
|
||||
The Helm charts provides a preset [values-k8saudit.yaml](https://github.com/falcosecurity/charts/blob/master/charts/falco/values-k8saudit.yaml) file that can be used to install the `k8saudit` plugin or as example for installing other plugins.
|
||||
|
||||
## Contributing
|
||||
|
||||
If you want to help and wish to contribute, please review our [contribution guidelines](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md). Code contributions are always encouraged and welcome!
|
||||
If you want to help and wish to contribute, please review our [contribution guidelines](https://github.com/falcosecurity/.github/blob/main/CONTRIBUTING.md). Code contributions are always encouraged and welcome!
|
||||
|
||||
If you wish to contribute a plugin to The Falco Project, simply open a Pull Request to add your plugin to the `/plugins` folder and [update the registry accordingly](./docs/registering-a-plugin.md). Note that to be hosted in this repository, plugins must be licensed under the [Apache 2.0 License](./LICENSE).
|
||||
|
||||
### Enforcing coding style and repo policies locally
|
||||
|
||||
This repository supports enforcing coding style and policies locally through the `pre-commit` framework. `pre-commit`
|
||||
allows to automatically install `git-hooks` that will be executed at every new commit. The following is the list of
|
||||
`git-hooks` defined in `.pre-commit-config.yaml` (notice that some of them only target files written in a specific
|
||||
language):
|
||||
1. the `rust-fmt` hook - a `pre-commit` git hook running `rust fmt` on the staged changes
|
||||
2. the `dco` hook - a `pre-commit-msg` git hook running adding the `DCO` on the commit if not present
|
||||
|
||||
The following steps describe how to install these hooks.
|
||||
|
||||
##### Step 1
|
||||
|
||||
Install `pre-commit` framework following the [official documentation](https://pre-commit.com/#installation).
|
||||
|
||||
> __Please note__: you have to follow only the "Installation" section.
|
||||
|
||||
#### Step 2
|
||||
|
||||
Install `pre-commit` git hooks:
|
||||
```bash
|
||||
pre-commit install --hook-type pre-commit --hook-type prepare-commit-msg --overwrite
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/registry"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
|
@ -47,10 +48,12 @@ func git(args ...string) (output []string, err error) {
|
|||
}
|
||||
|
||||
// an empty string matches the last tag with no match filtering
|
||||
func gitGetLatestTagWithMatch(match string) (string, error) {
|
||||
func gitGetLatestTagWithMatch(match []string) (string, error) {
|
||||
args := []string{"describe", "--tags", "--abbrev=0"}
|
||||
if len(match) > 0 {
|
||||
args = append(args, "--match", match)
|
||||
for _, m := range match {
|
||||
args = append(args, "--match", m)
|
||||
}
|
||||
}
|
||||
tags, err := git(args...)
|
||||
if err != nil {
|
||||
|
@ -81,6 +84,21 @@ func gitListCommits(from, to string) ([]string, error) {
|
|||
return logs, nil
|
||||
}
|
||||
|
||||
func pluginSource(pname string) string {
|
||||
reg, err := registry.LoadRegistryFromFile("registry.yaml")
|
||||
if err != nil {
|
||||
fail(fmt.Errorf("an error occurred while loading registry entries from file %q: %v", "registry.yaml", err))
|
||||
}
|
||||
|
||||
for _, plugin := range reg.Plugins {
|
||||
if plugin.Name == pname && plugin.Capabilities.Sourcing.Supported {
|
||||
return plugin.Capabilities.Sourcing.Source
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func fail(err error) {
|
||||
fmt.Printf("error: %s\n", err)
|
||||
os.Exit(1)
|
||||
|
@ -109,13 +127,14 @@ func main() {
|
|||
|
||||
// if from is not specified, we use the latest tag matching the plugin name
|
||||
if len(from) == 0 {
|
||||
match := ""
|
||||
match := []string{}
|
||||
if len(plugin) > 0 {
|
||||
match = plugin + "-[0-9].[0-9].[0-9]*"
|
||||
match = append(match, "plugins/"+plugin+"/v[0-9]*.[0-9]*.[0-9]*")
|
||||
match = append(match, plugin+"-[0-9]*.[0-9]*.[0-9]*")
|
||||
}
|
||||
tag, err := gitGetLatestTagWithMatch(match)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "not tag with match '"+match+"' not found, using commits from whole history:", err.Error())
|
||||
fmt.Fprintln(os.Stderr, "no matching tag found for plugin '"+plugin+"', using commits from whole history:", err.Error())
|
||||
} else {
|
||||
from = tag
|
||||
}
|
||||
|
@ -127,15 +146,26 @@ func main() {
|
|||
fail(err)
|
||||
}
|
||||
|
||||
var rgx *regexp.Regexp
|
||||
var rgx, rgxSource, rgxDeps *regexp.Regexp
|
||||
if len(plugin) > 0 {
|
||||
// craft a regex to filter all plugin-related commits that follow
|
||||
// the conventional commit format
|
||||
rgx, _ = regexp.Compile("^[a-f0-9]+ [a-zA-Z]+\\(([a-zA-Z\\/]+\\/)?" + plugin + "(\\/[a-zA-Z\\/]+)?\\):.*")
|
||||
|
||||
// use source name of the plugin as well, if it has sourcing capabilities
|
||||
pluginSource := pluginSource(plugin)
|
||||
if pluginSource != "" {
|
||||
rgxSource, _ = regexp.Compile("^[a-f0-9]+ [a-zA-Z]+\\(([a-zA-Z\\/]+\\/)?" + pluginSource + "(\\/[a-zA-Z\\/]+)?\\):.*")
|
||||
}
|
||||
|
||||
// craft a regex to filter all plugin-related dependabot commits
|
||||
rgxDeps, _ = regexp.Compile("^[a-f0-9]+ build\\(deps\\):.*" + plugin + "$")
|
||||
}
|
||||
|
||||
for _, c := range commits {
|
||||
if len(c) > 0 && (rgx == nil || rgx.MatchString(c)) {
|
||||
if len(c) > 0 && (rgx == nil || rgx.MatchString(c) ||
|
||||
(rgxSource != nil && rgxSource.MatchString(c)) ||
|
||||
rgxDeps.MatchString(c)) {
|
||||
fmt.Println(formatCommitLine(c) + "\n")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,39 @@
|
|||
module github.com/falcosecurity/plugins/build/changelog
|
||||
|
||||
go 1.17
|
||||
go 1.23.0
|
||||
|
||||
require github.com/spf13/pflag v1.0.5
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/falcosecurity/plugins/build/registry v0.0.0-20240514080945-0e7ef7698747
|
||||
github.com/spf13/pflag v1.0.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/docker/cli v24.0.5+incompatible // indirect
|
||||
github.com/docker/docker v25.0.6+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/falcosecurity/falcoctl v0.6.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/afero v1.9.5 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.16.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
oras.land/oras-go/v2 v2.2.1 // indirect
|
||||
)
|
||||
|
|
|
@ -1,2 +1,516 @@
|
|||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/cli v24.0.5+incompatible h1:WeBimjvS0eKdH4Ygx+ihVq1Q++xg36M/rMi4aXAvodc=
|
||||
github.com/docker/cli v24.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg=
|
||||
github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/falcosecurity/falcoctl v0.6.1 h1:Klg3jHi/PL1Inw9DO9kGpzL6ka+TjI4oDl6kvm1I+VY=
|
||||
github.com/falcosecurity/falcoctl v0.6.1/go.mod h1:4Hx4h3KtcaQzPKxvYn5S9x4IHxwd6QRK9Gu04HHNbhE=
|
||||
github.com/falcosecurity/plugins/build/registry v0.0.0-20240514080945-0e7ef7698747 h1:d+YgxJXgcmu9LX5ixICSTaN3y5MmgCnxW8TfPu5i+Eg=
|
||||
github.com/falcosecurity/plugins/build/registry v0.0.0-20240514080945-0e7ef7698747/go.mod h1:I/unuAO/urquhDsyOE+YmcY0FNBInVtLfZ5VwD3FUMo=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
|
||||
github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
|
||||
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
|
||||
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
||||
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
|
||||
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
|
||||
gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
oras.land/oras-go/v2 v2.2.1 h1:3VJTYqy5KfelEF9c2jo1MLSpr+TM3mX8K42wzZcd6qE=
|
||||
oras.land/oras-go/v2 v2.2.1/go.mod h1:GeAwLuC4G/JpNwkd+bSZ6SkDMGaaYglt6YK2WvZP7uQ=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
|
|
@ -48,6 +48,11 @@ func fieldsRenderArgRow(a *sdk.FieldEntryArg) string {
|
|||
return strings.Join(res, ", ")
|
||||
}
|
||||
|
||||
// renderNewLines replaces '\n' character with "<br/>" for proper table formatting.
|
||||
func renderNewLines(desc string) string {
|
||||
return strings.ReplaceAll(desc, "\n", "<br/>")
|
||||
}
|
||||
|
||||
func fieldsEditor(p *loader.Plugin, s string) (string, error) {
|
||||
if !p.HasCapExtraction() {
|
||||
return s, nil
|
||||
|
@ -74,7 +79,7 @@ func fieldsEditor(p *loader.Plugin, s string) (string, error) {
|
|||
row = append(row, "`"+f.Type+"`")
|
||||
}
|
||||
row = append(row, fieldsRenderArgRow(&f.Arg))
|
||||
row = append(row, f.Desc)
|
||||
row = append(row, renderNewLines(f.Desc))
|
||||
table.Append(row)
|
||||
}
|
||||
table.Render()
|
||||
|
|
|
@ -3,7 +3,7 @@ module github.com/falcosecurity/plugins/build/readme
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/pflag v1.0.6
|
||||
)
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3 h1:nmlBUmeAgEhcEHhSDWeEYgD9WdiHR9uMWyog5Iv7GIA=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3/go.mod h1:NP+y22DYOS+G3GDXIXNmzf0CBL3nfPPMoQuHvAzfitQ=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5 h1:ke/+kTt0PwedM8+IGTKcW3LrUI/xiJNDCSzqTSW+CvI=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5/go.mod h1:NP+y22DYOS+G3GDXIXNmzf0CBL3nfPPMoQuHvAzfitQ=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/falcosecurity/plugins/build/registry/cmd/validateRegistry"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/falcosecurity/plugins/build/registry/internal/options"
|
||||
|
@ -82,13 +84,19 @@ func main() {
|
|||
},
|
||||
}
|
||||
|
||||
var (
|
||||
pluginsAMD64Path string
|
||||
pluginsARM64Path string
|
||||
rulesfilesPath string
|
||||
devTag string
|
||||
)
|
||||
updateOCIRegistry := &cobra.Command{
|
||||
Use: "update-oci-registry <registryFilename>",
|
||||
Short: "Update the oci registry starting from the registry file and s3 bucket",
|
||||
Args: cobra.ExactArgs(1),
|
||||
DisableFlagsInUseLine: true,
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
status, err := oci.DoUpdateOCIRegistry(opts.Context, args[0])
|
||||
status, err := oci.DoUpdateOCIRegistry(opts.Context, args[0], pluginsAMD64Path, pluginsARM64Path, rulesfilesPath, devTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -97,6 +105,12 @@ func main() {
|
|||
},
|
||||
}
|
||||
|
||||
ociFlags := updateOCIRegistry.Flags()
|
||||
ociFlags.StringVar(&pluginsAMD64Path, "plugins-amd64-path", "", "Path to plugins for the amd64 architecture")
|
||||
ociFlags.StringVar(&pluginsARM64Path, "plugins-arm64-path", "", "Path to plugins for the arm64 architecture")
|
||||
ociFlags.StringVar(&rulesfilesPath, "rulesfiles-path", "", "Path to rulesfiles")
|
||||
ociFlags.StringVar(&devTag, "dev-tag", "", "Tag for devel versions")
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Version: "0.2.0",
|
||||
|
@ -105,6 +119,7 @@ func main() {
|
|||
rootCmd.AddCommand(tableCmd)
|
||||
rootCmd.AddCommand(updateIndexCmd)
|
||||
rootCmd.AddCommand(updateOCIRegistry)
|
||||
rootCmd.AddCommand(validateRegistry.NewValidateRegistry(context.Background()))
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Printf("error: %s\n", err)
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package validateRegistry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/falcosecurity/falcoctl/pkg/oci/authn"
|
||||
ocipuller "github.com/falcosecurity/falcoctl/pkg/oci/puller"
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/oci"
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/registry"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func NewValidateRegistry(ctx context.Context) *cobra.Command {
|
||||
updateOCIRegistry := &cobra.Command{
|
||||
Use: "validate-registry <registryFilename>",
|
||||
Short: "Check that an OCI repo exists for each plugin in the registry file",
|
||||
Args: cobra.ExactArgs(1),
|
||||
DisableFlagsInUseLine: true,
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
return validateRegistry(ctx, args[0])
|
||||
},
|
||||
}
|
||||
|
||||
return updateOCIRegistry
|
||||
}
|
||||
|
||||
func validateRegistry(ctx context.Context, registryFile string) error {
|
||||
reg, err := registry.LoadRegistryFromFile(registryFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("an error occurred while loading registry entries from file %q: %v", registryFile, err)
|
||||
}
|
||||
|
||||
ociClient := authn.NewClient()
|
||||
|
||||
puller := ocipuller.NewPuller(ociClient, false, nil)
|
||||
// For each plugin in the registry index, look for new ones to be released, and publish them.
|
||||
for _, plugin := range reg.Plugins {
|
||||
// Filter out plugins that are not owned by falcosecurity.
|
||||
if !strings.HasPrefix(plugin.URL, oci.PluginsRepo) {
|
||||
klog.V(2).Infof("skipping plugin %q with authors %q: it is not maintained by %q",
|
||||
plugin.Name, plugin.Authors, oci.FalcoAuthors)
|
||||
continue
|
||||
}
|
||||
klog.Infof("Checking OCI repo for plugin %q", plugin.Name)
|
||||
ref := fmt.Sprintf("ghcr.io/falcosecurity/plugins/plugin/%s:latest", plugin.Name)
|
||||
// We just retrieve the descriptor from the remote repository,
|
||||
// if it fails, likely the repository does not exist
|
||||
if _, err := puller.Descriptor(ctx, ref); err != nil {
|
||||
return fmt.Errorf("plugin %s seems to not have an OCI repository: %w", plugin.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,87 +1,165 @@
|
|||
module github.com/falcosecurity/plugins/build/registry
|
||||
|
||||
go 1.21
|
||||
go 1.23.4
|
||||
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.0
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.81
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/falcosecurity/falcoctl v0.6.1
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3
|
||||
github.com/onsi/ginkgo/v2 v2.10.0
|
||||
github.com/onsi/gomega v1.27.8
|
||||
github.com/falcosecurity/falcoctl v0.11.0
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
oras.land/oras-go/v2 v2.2.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
oras.land/oras-go/v2 v2.6.0
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
cloud.google.com/go/compute v1.23.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 // indirect
|
||||
github.com/aws/smithy-go v1.14.2 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/docker/cli v24.0.5+incompatible // indirect
|
||||
github.com/docker/docker v24.0.7+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||
github.com/cilium/ebpf v0.17.3 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/creasty/defaults v1.8.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/falcosecurity/driverkit v0.20.5 // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.25.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/magiconair/properties v1.8.9 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 // indirect
|
||||
github.com/oras-project/oras-credentials-go v0.3.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
|
||||
github.com/pterm/pterm v0.12.67 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/pterm/pterm v0.12.80 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/afero v1.9.5 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.16.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/viper v1.19.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.5.0 // indirect
|
||||
k8s.io/api v0.32.3 // indirect
|
||||
k8s.io/apimachinery v0.32.3 // indirect
|
||||
k8s.io/cli-runtime v0.32.2 // indirect
|
||||
k8s.io/client-go v0.32.2 // indirect
|
||||
k8s.io/component-base v0.32.2 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect
|
||||
k8s.io/kubectl v0.32.2 // indirect
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
|
||||
modernc.org/libc v1.61.13 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.8.2 // indirect
|
||||
modernc.org/sqlite v1.35.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -59,7 +59,7 @@ func ExtractTarGz(fileName, destDir string) ([]string, error) {
|
|||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
return nil, fmt.Errorf("unexepected dir inside the archive, expected to find only files without any tree structure")
|
||||
case tar.TypeReg:
|
||||
case tar.TypeReg, tar.TypeSymlink:
|
||||
f := filepath.Join(destDir, filepath.Clean(header.Name))
|
||||
if !strings.HasPrefix(f, filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("illegal file path: %q", f)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/cloudtrail
|
||||
description: Reads Cloudtrail JSON logs from files/S3 and injects as events
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail
|
||||
keywords:
|
||||
- audit
|
||||
- user-activity
|
||||
|
@ -29,13 +29,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail
|
||||
- name: cloudtrail-rules
|
||||
type: rulesfile
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/ruleset/cloudtrail
|
||||
description: Reads Cloudtrail JSON logs from files/S3 and injects as events
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail
|
||||
keywords:
|
||||
- audit
|
||||
- user-activity
|
||||
|
@ -47,13 +47,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail/rules
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail/rules
|
||||
- name: dummy
|
||||
type: plugin
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/dummy
|
||||
description: Reference plugin used to document interface
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/dummy
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/dummy
|
||||
keywords:
|
||||
- dummy
|
||||
license: Apache-2.0
|
||||
|
@ -61,13 +61,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/dummy
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/dummy
|
||||
- name: dummy_c
|
||||
type: plugin
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/dummy_c
|
||||
description: Like dummy, but written in C++
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/dummy_c
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/dummy_c
|
||||
keywords:
|
||||
- dummy_c
|
||||
license: Apache-2.0
|
||||
|
@ -75,7 +75,7 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/dummy_c
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/dummy_c
|
||||
- name: falco-rules
|
||||
type: rulesfile
|
||||
registry: ghcr.io
|
||||
|
@ -95,7 +95,7 @@
|
|||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/github
|
||||
description: Github Webhook Events
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/github
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/github
|
||||
keywords:
|
||||
- audit
|
||||
- log-events
|
||||
|
@ -107,13 +107,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/github
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/github
|
||||
- name: github-rules
|
||||
type: rulesfile
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/ruleset/github
|
||||
description: Github Webhook Events
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/github
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/github
|
||||
keywords:
|
||||
- audit
|
||||
- log-events
|
||||
|
@ -126,13 +126,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/github/rules
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/github/rules
|
||||
- name: json
|
||||
type: plugin
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/json
|
||||
description: Extract values from any JSON payload
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/json
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/json
|
||||
keywords:
|
||||
- json-events
|
||||
- json-payload
|
||||
|
@ -143,13 +143,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/json
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/json
|
||||
- name: k8saudit
|
||||
type: plugin
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/k8saudit
|
||||
description: Read Kubernetes Audit Events and monitor Kubernetes Clusters
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit
|
||||
keywords:
|
||||
- audit
|
||||
- audit-log
|
||||
|
@ -161,13 +161,13 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit
|
||||
- name: k8saudit-eks
|
||||
type: plugin
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/k8saudit-eks
|
||||
description: Read Kubernetes Audit Events from AWS EKS Clusters
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit-eks
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-eks
|
||||
keywords:
|
||||
- audit
|
||||
- audit-log
|
||||
|
@ -179,13 +179,13 @@
|
|||
license: Apache-2.0
|
||||
maintainers: []
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit-eks
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-eks
|
||||
- name: k8saudit-rules
|
||||
type: rulesfile
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/ruleset/k8saudit
|
||||
description: Read Kubernetes Audit Events and monitor Kubernetes Clusters
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit
|
||||
keywords:
|
||||
- audit
|
||||
- audit-log
|
||||
|
@ -197,20 +197,20 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit/rules
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit/rules
|
||||
- name: okta
|
||||
type: plugin
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/plugin/okta
|
||||
signature:
|
||||
cosign:
|
||||
certificate-oidc-issuer: https://token.actions.githubusercontent.com
|
||||
certificate-oidc-issuer-regexp: ""
|
||||
certificate-identity: ""
|
||||
certificate-identity-regexp: https://github.com/LucaGuerra/cool-falco-ruleset/
|
||||
certificate-github-workflow: ""
|
||||
certificate-oidc-issuer: https://token.actions.githubusercontent.com
|
||||
certificate-oidc-issuer-regexp: ""
|
||||
certificate-identity: ""
|
||||
certificate-identity-regexp: https://github.com/LucaGuerra/cool-falco-ruleset/
|
||||
certificate-github-workflow: ""
|
||||
description: Okta Log Events
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/okta
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/okta
|
||||
keywords:
|
||||
- audit
|
||||
- log-events
|
||||
|
@ -220,20 +220,20 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/okta
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/okta
|
||||
- name: okta-rules
|
||||
type: rulesfile
|
||||
registry: ghcr.io
|
||||
repository: falcosecurity/plugins/ruleset/okta
|
||||
signature:
|
||||
cosign:
|
||||
certificate-oidc-issuer: https://token.actions.githubusercontent.com
|
||||
certificate-oidc-issuer-regexp: ""
|
||||
certificate-identity: ""
|
||||
certificate-identity-regexp: https://github.com/LucaGuerra/cool-falco-ruleset/
|
||||
certificate-github-workflow: ""
|
||||
certificate-oidc-issuer: https://token.actions.githubusercontent.com
|
||||
certificate-oidc-issuer-regexp: ""
|
||||
certificate-identity: ""
|
||||
certificate-identity-regexp: https://github.com/LucaGuerra/cool-falco-ruleset/
|
||||
certificate-github-workflow: ""
|
||||
description: Okta Log Events
|
||||
home: https://github.com/falcosecurity/plugins/tree/master/plugins/okta
|
||||
home: https://github.com/falcosecurity/plugins/tree/main/plugins/okta
|
||||
keywords:
|
||||
- audit
|
||||
- log-events
|
||||
|
@ -244,4 +244,4 @@
|
|||
- email: cncf-falco-dev@lists.cncf.io
|
||||
name: The Falco Authors
|
||||
sources:
|
||||
- https://github.com/falcosecurity/plugins/tree/master/plugins/okta/rules
|
||||
- https://github.com/falcosecurity/plugins/tree/main/plugins/okta/rules
|
||||
|
|
|
@ -37,8 +37,8 @@ plugins:
|
|||
- audit-log
|
||||
- audit-events
|
||||
- kubernetes
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit/rules
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit/rules
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
sourcing:
|
||||
|
@ -59,8 +59,8 @@ plugins:
|
|||
- user-activity
|
||||
- api-usage
|
||||
- aws
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/master/plugins/cloudtrail/rules
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/main/plugins/cloudtrail/rules
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
sourcing:
|
||||
|
@ -80,7 +80,7 @@ plugins:
|
|||
- json-events
|
||||
- json-payload
|
||||
- extractor
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/json
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/json
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
extraction:
|
||||
|
@ -92,7 +92,7 @@ plugins:
|
|||
maintainers:
|
||||
- name: The Falco Authors
|
||||
email: cncf-falco-dev@lists.cncf.io
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/dummy
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/dummy
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
sourcing:
|
||||
|
@ -108,7 +108,7 @@ plugins:
|
|||
maintainers:
|
||||
- name: The Falco Authors
|
||||
email: cncf-falco-dev@lists.cncf.io
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/dummy_c
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/dummy_c
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
sourcing:
|
||||
|
@ -163,8 +163,8 @@ plugins:
|
|||
- audit
|
||||
- log-events
|
||||
- okta
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/okta
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/master/plugins/okta/rules
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/okta
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/main/plugins/okta/rules
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
sourcing:
|
||||
|
@ -190,8 +190,8 @@ plugins:
|
|||
- webhook
|
||||
- github-activity
|
||||
- github
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/github
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/master/plugins/github/rules
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/github
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/main/plugins/github/rules
|
||||
license: Apache-2.0
|
||||
capabilities:
|
||||
sourcing:
|
||||
|
@ -204,8 +204,8 @@ plugins:
|
|||
description: Read Kubernetes Audit Events from AWS EKS Clusters
|
||||
authors: The Falco Authors
|
||||
contact: https://falco.org/community
|
||||
url: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit-eks
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit/rules
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit-eks
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit/rules
|
||||
license: Apache-2.0
|
||||
keywords:
|
||||
- audit
|
||||
|
@ -213,7 +213,7 @@ plugins:
|
|||
- audit-events
|
||||
- kubernetes
|
||||
- eks
|
||||
- aws
|
||||
- aws
|
||||
capabilities:
|
||||
sourcing:
|
||||
supported: true
|
||||
|
|
|
@ -23,6 +23,9 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/loader"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins"
|
||||
|
||||
"github.com/falcosecurity/falcoctl/pkg/oci"
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/common"
|
||||
)
|
||||
|
@ -78,7 +81,25 @@ func rulesfileConfig(name, version, filePath string) (*oci.ArtifactConfig, error
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
func pluginConfig(name, version, filePath string) (*oci.ArtifactConfig, error) {
|
||||
func pluginConfig(name, version string, pluginInfo *plugins.Info) (*oci.ArtifactConfig, error) {
|
||||
// Check that the name we got from the registry.yaml is the same as the embedded one in the plugin at build time.
|
||||
if name != pluginInfo.Name {
|
||||
return nil, fmt.Errorf("mismatch between name in registry.yaml (%q) and name found in plugin shared object (%q)", name, pluginInfo.Name)
|
||||
}
|
||||
|
||||
cfg := &oci.ArtifactConfig{
|
||||
Name: name,
|
||||
Version: version,
|
||||
Dependencies: nil,
|
||||
Requirements: nil,
|
||||
}
|
||||
|
||||
_ = cfg.SetRequirement(common.PluginAPIVersion, pluginInfo.RequiredAPIVersion)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func pluginInfo(filePath string) (*plugins.Info, error) {
|
||||
// Create temp dir.
|
||||
tmpDir, err := os.MkdirTemp("", "registry-oci-")
|
||||
if err != nil {
|
||||
|
@ -90,32 +111,18 @@ func pluginConfig(name, version, filePath string) (*oci.ArtifactConfig, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &oci.ArtifactConfig{
|
||||
Name: name,
|
||||
Version: version,
|
||||
Dependencies: nil,
|
||||
Requirements: nil,
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
// skip files that are not a shared library such as README files.
|
||||
if !strings.HasSuffix(file, ".so") {
|
||||
continue
|
||||
}
|
||||
// Get the requirement for the given file.
|
||||
req, err := pluginRequirement(file)
|
||||
if err != nil && !errors.Is(err, ErrReqNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
// If found add it to the requirements list.
|
||||
if err == nil {
|
||||
_ = cfg.SetRequirement(req.Name, req.Version)
|
||||
// Get the plugin info.
|
||||
plugin, err := loader.NewPlugin(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open plugin %q: %w", file, err)
|
||||
}
|
||||
return plugin.Info(), nil
|
||||
}
|
||||
|
||||
if cfg.Requirements == nil {
|
||||
return nil, fmt.Errorf("no requirements found for plugin %q", filePath)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
return nil, fmt.Errorf("no plugin found in archive %q", filePath)
|
||||
}
|
||||
|
|
|
@ -40,15 +40,9 @@ const (
|
|||
RegistryUser = "REGISTRY_USER"
|
||||
RegistryOCI = "REGISTRY"
|
||||
RepoGithub = "REPO_GITHUB"
|
||||
region = "eu-west-1" // TODO: make it discoverable
|
||||
pluginPrefix = "plugins/stable/"
|
||||
maxKeys = 128
|
||||
falcoAuthors = "The Falco Authors"
|
||||
// Architectures as used in the names of the archives uploaded in the S3 bucket.
|
||||
x86_arch_s3 = "x86_64"
|
||||
arm_aarch64_s3 = "aarch64"
|
||||
// Architectures as used in the OCI manifests. We translate the archs from S3 notation to the OCI one.
|
||||
amd64OCI = "amd64"
|
||||
arm64OCI = "arm64"
|
||||
archive_suffix = ".tar.gz"
|
||||
FalcoAuthors = "The Falco Authors"
|
||||
PluginsRepo = "https://github.com/falcosecurity/plugins"
|
||||
archiveSuffix = ".tar.gz"
|
||||
amd64Platform = "linux/amd64"
|
||||
arm64Platform = "linux/arm64"
|
||||
)
|
||||
|
|
|
@ -21,31 +21,25 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins"
|
||||
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/common"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/blang/semver"
|
||||
"github.com/falcosecurity/falcoctl/pkg/oci"
|
||||
"github.com/falcosecurity/falcoctl/pkg/oci/authn"
|
||||
ocipusher "github.com/falcosecurity/falcoctl/pkg/oci/pusher"
|
||||
"github.com/falcosecurity/falcoctl/pkg/oci/repository"
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/registry"
|
||||
"k8s.io/klog/v2"
|
||||
"oras.land/oras-go/v2/registry/remote"
|
||||
"oras.land/oras-go/v2/registry/remote/auth"
|
||||
)
|
||||
|
||||
var (
|
||||
bucketName = "falco-distribution"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
// registryToken authentication token for the OCI registry.
|
||||
registryToken string
|
||||
|
@ -95,184 +89,16 @@ func refFromPluginEntry(cfg *config, plugin *registry.Plugin, rulesFile bool) st
|
|||
return filepath.Join(cfg.registryHost, cfg.registryUser, namespace, plugin.Name)
|
||||
}
|
||||
|
||||
// s3ArtifactName returns the prefix name of the archive uploaded in the s3 bucket.
|
||||
// It uses the same logic used by the makefile used to upload the artifacts in the s3 bucket.
|
||||
func s3ArtifactNamePrefix(plugin *registry.Plugin, version string, rulesFile bool) string {
|
||||
if rulesFile {
|
||||
return fmt.Sprintf("%s-rules-%s%s", plugin.Name, version, archive_suffix)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-linux", plugin.Name, version)
|
||||
}
|
||||
|
||||
func platformFromS3Key(key string) string {
|
||||
if strings.Contains(key, x86_arch_s3) {
|
||||
// Instead of "x86_64" we return "amd64" the one to be used in the oci artifact.
|
||||
return fmt.Sprintf("linux/%s", amd64OCI)
|
||||
}
|
||||
|
||||
if strings.Contains(key, arm_aarch64_s3) {
|
||||
// Instead of "aarch64" we return "arm64" the one to be used in the oci artifact.
|
||||
return fmt.Sprintf("linux/%s", arm64OCI)
|
||||
}
|
||||
|
||||
// Return empty string if it does not contain one of the expected architectures.
|
||||
return ""
|
||||
}
|
||||
|
||||
func currentPlatform() string {
|
||||
return fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// latestVersionArtifact returns the latest version of the artifact that exists in the remote repository pointed by the reference.
|
||||
func latestVersionArtifact(ctx context.Context, ref string, ociClient remote.Client) (string, error) {
|
||||
var versions []semver.Version
|
||||
var repo *repository.Repository
|
||||
var err error
|
||||
|
||||
// Create the repository object for the ref.
|
||||
if repo, err = repository.NewRepository(ref, repository.WithClient(ociClient)); err != nil {
|
||||
return "", fmt.Errorf("unable to create repository for ref %q: %w", ref, err)
|
||||
}
|
||||
|
||||
// Get all the tags for the given artifact in the remote repository.
|
||||
remoteTags, err := repo.Tags(ctx)
|
||||
// Only way to know if the repo does not exist is to check the content of the error.
|
||||
if err != nil && !strings.Contains(err.Error(), "unexpected status code 404") {
|
||||
klog.Warningf("unable to get latest version from remote repository for %q: %v", ref, err)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// If no tags found it means that the artifact does not exist in the OCI registry or
|
||||
// that it does not have tags.
|
||||
if len(remoteTags) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// We parse the tags in semVer and then sort and get the latest one.
|
||||
for _, tag := range remoteTags {
|
||||
parsedVersion, err := semver.ParseTolerant(tag)
|
||||
if err != nil {
|
||||
// Ignore any non-semver tags (like latest or signature tags)
|
||||
klog.Infof("Tag %s is not semver, ignoring", tag)
|
||||
continue
|
||||
}
|
||||
|
||||
versions = append(versions, parsedVersion)
|
||||
}
|
||||
|
||||
// Sort the versions.
|
||||
semver.Sort(versions)
|
||||
|
||||
// Return the latest version.
|
||||
// It should never happen that versions is empty. Since the artifacts are pushed by the CI if
|
||||
// it has been pushed then it must have a tag assigned to it.
|
||||
return versions[len(versions)-1].String(), nil
|
||||
}
|
||||
|
||||
// TODO(alacuku): duplicated code, in common with the "version" tool
|
||||
func git(args ...string) (output []string, err error) {
|
||||
stdout, err := exec.Command("git", args...).Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return nil, fmt.Errorf("unable to list tags %q: %v", exitErr.Stderr, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(stdout), "\n")
|
||||
|
||||
return lines[0 : len(lines)-1], nil
|
||||
}
|
||||
|
||||
// localLatestVersion returns the latest version of the artifact in the local git repository based on the tags.
|
||||
func localLatestVersion(artifactName string) (*semver.Version, error) {
|
||||
// List only the tags that have a prefix "artifactname-[0-9].*"
|
||||
tagPrefix := fmt.Sprintf("%s-[0-9].*", artifactName)
|
||||
tags, err := git("--no-pager", "tag", "-l", tagPrefix, "--sort", "-authordate")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(tags) == 0 {
|
||||
return nil, fmt.Errorf("no tags found for prefix %q", tagPrefix)
|
||||
}
|
||||
|
||||
// Trim tag's prefix.
|
||||
tag := strings.TrimPrefix(tags[0], artifactName+"-")
|
||||
version, err := semver.Parse(tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse tag %q to semver: %v", tags[0], err)
|
||||
}
|
||||
|
||||
return &version, err
|
||||
}
|
||||
|
||||
// newReleases returns the new released versions since the latest version fetched from the remote repository.
|
||||
// It could happen that the artifact does not exist in the remote repository, in that case we return the latest
|
||||
// version found in the local git repository.
|
||||
func newReleases(artifactName, remoteVersion string) ([]semver.Version, error) {
|
||||
var versions []semver.Version
|
||||
|
||||
// List only the tags that have a prefix "artifactname-[0-9].*"
|
||||
tagPrefix := fmt.Sprintf("%s-[0-9].*", artifactName)
|
||||
remoteTag := fmt.Sprintf("%s-%s", artifactName, remoteVersion)
|
||||
|
||||
// If the artifact does not exist in the OCI repo, then we just get the latest version in the
|
||||
// local git repo.
|
||||
if remoteVersion == "" {
|
||||
v, err := localLatestVersion(artifactName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(versions, *v), nil
|
||||
}
|
||||
|
||||
tagList, err := git("--no-pager", "tag", "--list", tagPrefix, "--contains", remoteTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags := make(map[string]struct{})
|
||||
|
||||
for _, t := range tagList {
|
||||
tags[t] = struct{}{}
|
||||
}
|
||||
|
||||
// Since the remoteTag is always self-contained, we remove it.
|
||||
delete(tags, remoteTag)
|
||||
|
||||
// If not new versions are found then return.
|
||||
if len(tags) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for tag := range tags {
|
||||
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
// Trim tag's prefix.
|
||||
t := strings.TrimPrefix(tag, artifactName+"-")
|
||||
|
||||
parsedVersion, err := semver.Parse(t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tag %q to semVer: %v", t, err)
|
||||
}
|
||||
|
||||
versions = append(versions, parsedVersion)
|
||||
}
|
||||
|
||||
// Sort and return the versions.
|
||||
semver.Sort(versions)
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
// DoUpdateOCIRegistry publishes new plugins with related rules to be released.
|
||||
// For each plugin in the registry index, it looks for new versions, since the latest version fetched from the remote OCI
|
||||
// repository, as tags on the local Git repository.
|
||||
// For each new version, it downloads the related plugin and rule set from the Falco distribution and updates the OCI
|
||||
// repository accordingly.
|
||||
func DoUpdateOCIRegistry(ctx context.Context, registryFile string) ([]registry.ArtifactPushMetadata, error) {
|
||||
func DoUpdateOCIRegistry(ctx context.Context, registryFile, pluginsAMD4, pluginsARM64, rulesfiles, devTag string) ([]registry.ArtifactPushMetadata, error) {
|
||||
var (
|
||||
cfg *config
|
||||
err error
|
||||
|
@ -283,11 +109,6 @@ func DoUpdateOCIRegistry(ctx context.Context, registryFile string) ([]registry.A
|
|||
return nil, err
|
||||
}
|
||||
|
||||
s3Client := s3.NewFromConfig(aws.Config{
|
||||
Region: region,
|
||||
Credentials: aws.AnonymousCredentials{},
|
||||
})
|
||||
|
||||
cred := &auth.Credential{
|
||||
Username: cfg.registryUser,
|
||||
Password: cfg.registryToken,
|
||||
|
@ -304,7 +125,7 @@ func DoUpdateOCIRegistry(ctx context.Context, registryFile string) ([]registry.A
|
|||
|
||||
// For each plugin in the registry index, look for new ones to be released, and publish them.
|
||||
for _, plugin := range reg.Plugins {
|
||||
pa, ra, err := handleArtifact(ctx, cfg, &plugin, s3Client, ociClient)
|
||||
pa, ra, err := handleArtifact(ctx, cfg, &plugin, ociClient, pluginsAMD4, pluginsARM64, rulesfiles, devTag)
|
||||
if err != nil {
|
||||
return artifacts, err
|
||||
}
|
||||
|
@ -321,66 +142,6 @@ func DoUpdateOCIRegistry(ctx context.Context, registryFile string) ([]registry.A
|
|||
return artifacts, nil
|
||||
}
|
||||
|
||||
func listObjects(ctx context.Context, client *s3.Client, prefix string) ([]string, error) {
|
||||
prefix = filepath.Join(pluginPrefix, prefix)
|
||||
params := &s3.ListObjectsV2Input{
|
||||
Bucket: &bucketName,
|
||||
Prefix: &prefix,
|
||||
}
|
||||
|
||||
klog.Infof("listing objects for plugin from s3 bucket with prefix %q", prefix)
|
||||
|
||||
// Create the Paginator for the ListObjectsV2 operation.
|
||||
p := s3.NewListObjectsV2Paginator(client, params, func(o *s3.ListObjectsV2PaginatorOptions) {
|
||||
if v := int32(maxKeys); v != 0 {
|
||||
o.Limit = v
|
||||
}
|
||||
})
|
||||
|
||||
var keys []string
|
||||
|
||||
// Iterate through the S3 object pages, printing each object returned.
|
||||
var i int
|
||||
for p.HasMorePages() {
|
||||
i++
|
||||
|
||||
// Next Page takes a new context for each page retrieval. This is where
|
||||
// you could add timeouts or deadlines.
|
||||
page, err := p.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while getting next page from s3 bucket while handling prefix %q: %w", prefix, err)
|
||||
}
|
||||
|
||||
// Add keys to the slice.
|
||||
for _, obj := range page.Contents {
|
||||
keys = append(keys, *obj.Key)
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(5).Infof("objects found for prefix %q: %s", prefix, keys)
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func downloadToFile(downloader *manager.Downloader, targetDirectory, bucket, key string) error {
|
||||
// Create the directories in the path
|
||||
file := filepath.Join(targetDirectory, key)
|
||||
if err := os.MkdirAll(filepath.Dir(file), 0775); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set up the local file
|
||||
fd, err := os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
// Download the file using the AWS SDK for Go
|
||||
_, err = downloader.Download(context.Background(), fd, &s3.GetObjectInput{Bucket: &bucket, Key: &key})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func tagsFromVersion(version *semver.Version) []string {
|
||||
var tags []string
|
||||
|
||||
|
@ -398,23 +159,22 @@ func tagsFromVersion(version *semver.Version) []string {
|
|||
return tags
|
||||
}
|
||||
|
||||
// handleArtifact discovers new plugin and related rules releases to be published comparing the Git local latest version,
|
||||
// with the remote latest version, on the OCI repository.
|
||||
// For each new release version, it pushes the plugin and rule set, downloading the content from the official Falco
|
||||
// distribution.
|
||||
func handleArtifact(ctx context.Context, cfg *config, plugin *registry.Plugin,
|
||||
s3Client *s3.Client, ociClient remote.Client) ([]registry.ArtifactPushMetadata, []registry.ArtifactPushMetadata, error) {
|
||||
// handleArtifact it pushes artifacts related to a given plugin in the registry.yaml file.
|
||||
// It could happen that for a given plugin no artifacts such as builds and rulesets are available.
|
||||
// Consider the case when we release a single plugin.
|
||||
func handleArtifact(ctx context.Context, cfg *config, plugin *registry.Plugin, ociClient remote.Client,
|
||||
pluginsAMD64, pluginsARM64, rulesfiles, devTag string) ([]registry.ArtifactPushMetadata, []registry.ArtifactPushMetadata, error) {
|
||||
// Filter out plugins that are not owned by falcosecurity.
|
||||
if plugin.Authors != falcoAuthors {
|
||||
if !strings.HasPrefix(plugin.URL, PluginsRepo) {
|
||||
sepString := strings.Repeat("#", 15)
|
||||
klog.Infof("%s %s %s", sepString, plugin.Name, sepString)
|
||||
klog.Info("%s %s %s", sepString, plugin.Name, sepString)
|
||||
klog.Infof("skipping plugin %q with authors %q: it is not maintained by %q",
|
||||
plugin.Name, plugin.Authors, falcoAuthors)
|
||||
plugin.Name, plugin.Authors, FalcoAuthors)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Handle the plugin.
|
||||
newPluginArtifacts, err := handlePlugin(ctx, cfg, plugin, s3Client, ociClient)
|
||||
newPluginArtifacts, err := handlePlugin(ctx, cfg, plugin, ociClient, pluginsAMD64, pluginsARM64, devTag)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -423,7 +183,7 @@ func handleArtifact(ctx context.Context, cfg *config, plugin *registry.Plugin,
|
|||
newRuleArtifacts := []registry.ArtifactPushMetadata{}
|
||||
|
||||
if plugin.RulesURL != "" {
|
||||
newRuleArtifacts, err = handleRule(ctx, cfg, plugin, s3Client, ociClient)
|
||||
newRuleArtifacts, err = handleRule(ctx, cfg, plugin, ociClient, rulesfiles, devTag)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -432,239 +192,170 @@ func handleArtifact(ctx context.Context, cfg *config, plugin *registry.Plugin,
|
|||
return newPluginArtifacts, newRuleArtifacts, nil
|
||||
}
|
||||
|
||||
// handlePlugin discovers new releases to be published comparing the local latest version, as a git tag on the local
|
||||
// repository, with the remote latest version, as latest published tag on the remote OCI repository.
|
||||
// For each new release version, it pushes the plugin with as tag the new release version, and as content the one
|
||||
// downloaded from the official Falco distribution.
|
||||
func handlePlugin(ctx context.Context, cfg *config, plugin *registry.Plugin,
|
||||
s3Client *s3.Client, ociClient remote.Client) ([]registry.ArtifactPushMetadata, error) {
|
||||
var s3Keys []string
|
||||
// handlePlugin for a given plugin it checks if there exists build artifacts in the given folders, and
|
||||
// if found packs them as an OCI artifact and pushes them to the registry.
|
||||
func handlePlugin(ctx context.Context, cfg *config, plugin *registry.Plugin, ociClient remote.Client,
|
||||
pluginsAMD64, pluginsARM64 string, devTag string) ([]registry.ArtifactPushMetadata, error) {
|
||||
var configLayer *oci.ArtifactConfig
|
||||
var err error
|
||||
var filepaths, platforms, tags []string
|
||||
var version string
|
||||
var infoP *plugins.Info
|
||||
|
||||
sepString := strings.Repeat("#", 15)
|
||||
klog.Infof("%s %s %s", sepString, plugin.Name, sepString)
|
||||
|
||||
// Build the reference for the artifact.
|
||||
ref := refFromPluginEntry(cfg, plugin, false)
|
||||
// Get all the tags for the given artifact in the remote repository.
|
||||
remoteVersion, err := latestVersionArtifact(ctx, ref, ociClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteVersion != "" {
|
||||
klog.Infof("latest version found in the OCI registry is: %q", remoteVersion)
|
||||
} else {
|
||||
klog.Info("no versions found in the OCI registry")
|
||||
}
|
||||
|
||||
// New releases to be published.
|
||||
releases, err := newReleases(plugin.Name, remoteVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there are no new releases then return.
|
||||
if len(releases) == 0 {
|
||||
klog.Info("no new releases found in the local git repo. Nothing to be done")
|
||||
return nil, nil
|
||||
} else {
|
||||
klog.Infof("new releases found in local git repo: %q", releases)
|
||||
}
|
||||
|
||||
// Create s3 downloader.
|
||||
downloader := manager.NewDownloader(s3Client)
|
||||
|
||||
// Metadata of the plugins OCI artifacts push.
|
||||
metadata := []registry.ArtifactPushMetadata{}
|
||||
|
||||
// For each new release we download the tarballs from s3 bucket.
|
||||
for _, v := range releases {
|
||||
prefixKey := s3ArtifactNamePrefix(plugin, v.String(), false)
|
||||
// Get the s3 keys.
|
||||
if s3Keys, err = listObjects(ctx, s3Client, prefixKey); err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while listing objects for prefix %q: %v", prefixKey, err)
|
||||
// Get the name of the build object for the amd64 architecture.
|
||||
amd64Build, err := buildName(plugin.Name, pluginsAMD64, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if amd64Build != "" {
|
||||
if infoP, err = pluginInfo(filepath.Join(pluginsAMD64, amd64Build)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// It could happen if we tagged a new version in the git repo but the CI has not processed it.
|
||||
// It means that no binaries have been produced and uploaded in the s3 bucket.
|
||||
if len(s3Keys) == 0 {
|
||||
klog.Warningf("no archives found on s3 bucket for prefix %q", prefixKey)
|
||||
continue
|
||||
}
|
||||
|
||||
var filepaths, platforms []string
|
||||
|
||||
// Download the tarballs for each key.
|
||||
for _, key := range s3Keys {
|
||||
klog.Infof("downloading tarball with key %q", key)
|
||||
if err := downloadToFile(downloader, plugin.Name, bucketName, key); err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while downloading tarball %q from bucket %q: %w",
|
||||
key, bucketName, err)
|
||||
}
|
||||
|
||||
filepaths = append(filepaths, filepath.Join(plugin.Name, key))
|
||||
platforms = append(platforms, platformFromS3Key(key))
|
||||
}
|
||||
|
||||
tags := tagsFromVersion(&v)
|
||||
|
||||
klog.Infof("generating plugin's config layer")
|
||||
|
||||
// current platform where the CI is running.
|
||||
platform := currentPlatform()
|
||||
for i, p := range platforms {
|
||||
// We need to get the plugin that have been built for the same platform as the one where we are loading it.
|
||||
if p == platform {
|
||||
configLayer, err = pluginConfig(plugin.Name, v.String(), filepaths[i])
|
||||
if err != nil {
|
||||
klog.Errorf("unable to generate config file: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if configLayer == nil {
|
||||
klog.Warningf("no config layer generated for plugin %q: the plugins has not been build for the current platform %q", plugin.Name, platform)
|
||||
// Check that the plugin has the same name as the one we got from the registry.yaml.
|
||||
// If not, we skip it. It could happen that plugins share the same prefix, example k8saudit, k8saudit-gke.
|
||||
if infoP.Name != plugin.Name {
|
||||
// buildName func returned a wrong path starting from the plugin name found in registry.yaml.
|
||||
klog.Warningf("skipping plugin since there is a mismatch in plugin name (%q) and plugin info name(%q)", plugin.Name, infoP.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
klog.Infof("pushing plugin to remote repo with ref %q and tags %q", ref, tags)
|
||||
pusher := ocipusher.NewPusher(ociClient, false, nil)
|
||||
res, err := pusher.Push(context.Background(), oci.Plugin, ref,
|
||||
ocipusher.WithTags(tags...),
|
||||
ocipusher.WithFilepathsAndPlatforms(filepaths, platforms),
|
||||
ocipusher.WithArtifactConfig(*configLayer),
|
||||
ocipusher.WithAnnotationSource(cfg.pluginsRepo))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while pushing plugin %q: %w", plugin.Name, err)
|
||||
}
|
||||
if res != nil {
|
||||
metadata = append(metadata, registry.ArtifactPushMetadata{
|
||||
registry.RepositoryMetadata{
|
||||
Ref: ref,
|
||||
},
|
||||
registry.ArtifactMetadata{
|
||||
Digest: res.Digest,
|
||||
Tags: tags,
|
||||
},
|
||||
})
|
||||
}
|
||||
filepaths = append(filepaths, filepath.Join(pluginsAMD64, amd64Build))
|
||||
platforms = append(platforms, amd64Platform)
|
||||
}
|
||||
|
||||
// Get the name of the build object for the arm64 architecture.
|
||||
arm64Build, err := buildName(plugin.Name, pluginsARM64, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if arm64Build != "" {
|
||||
filepaths = append(filepaths, filepath.Join(pluginsARM64, arm64Build))
|
||||
platforms = append(platforms, arm64Platform)
|
||||
}
|
||||
|
||||
if arm64Build == "" && amd64Build == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sepString := strings.Repeat("#", 15)
|
||||
klog.Infof("%s %s %s", sepString, plugin.Name, sepString)
|
||||
|
||||
// Extract version from build object.
|
||||
klog.Infof("generating plugin's config layer")
|
||||
|
||||
version, tags, err = versionAndTags(plugin.Name, filepath.Base(filepaths[0]), devTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if infoP == nil {
|
||||
klog.Warningf("no config layer generated for plugin %q: the plugins has not been build for the current platform %q", plugin.Name, currentPlatform())
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
configLayer, err = pluginConfig(plugin.Name, version, infoP)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to generate config file: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.Infof("pushing plugin to remote repo with ref %q and tags %q", ref, tags)
|
||||
pusher := ocipusher.NewPusher(ociClient, false, nil)
|
||||
res, err := pusher.Push(ctx, oci.Plugin, ref,
|
||||
ocipusher.WithTags(tags...),
|
||||
ocipusher.WithFilepathsAndPlatforms(filepaths, platforms),
|
||||
ocipusher.WithArtifactConfig(*configLayer),
|
||||
ocipusher.WithAnnotationSource(cfg.pluginsRepo))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while pushing plugin %q: %w", plugin.Name, err)
|
||||
}
|
||||
if res != nil {
|
||||
metadata = append(metadata, registry.ArtifactPushMetadata{
|
||||
registry.RepositoryMetadata{
|
||||
Ref: ref,
|
||||
},
|
||||
registry.ArtifactMetadata{
|
||||
Digest: res.RootDigest,
|
||||
Tags: tags,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// handleRule discovers new releases to be published comparing the local latest version, as a git tag on the local
|
||||
// repository, with the remote latest version, as latest published tag on the remote OCI repository.
|
||||
// For each new release version, it pushes the rule set with as tag the new release version, and as content the one
|
||||
// downloaded from the official Falco distribution.
|
||||
// handleRule for a given plugin it checks if there exists rulesfiles in the given folder, and
|
||||
// if found packs them as an OCI artifact and pushes it to the registry.
|
||||
func handleRule(ctx context.Context, cfg *config, plugin *registry.Plugin,
|
||||
s3Client *s3.Client, ociClient remote.Client) ([]registry.ArtifactPushMetadata, error) {
|
||||
var s3Keys []string
|
||||
ociClient remote.Client, rulesfiles, devTag string) ([]registry.ArtifactPushMetadata, error) {
|
||||
var err error
|
||||
var filepaths, tags []string
|
||||
var version string
|
||||
|
||||
// Build the reference for the artifact.
|
||||
ref := refFromPluginEntry(cfg, plugin, true)
|
||||
|
||||
// Metadata of the plugins OCI artifacts push.
|
||||
metadata := []registry.ArtifactPushMetadata{}
|
||||
|
||||
// Get the name of the build object for the amd64 architecture.
|
||||
rulesfileBuild, err := buildName(plugin.Name, rulesfiles, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rulesfileBuild != "" {
|
||||
filepaths = append(filepaths, filepath.Join(rulesfiles, rulesfileBuild))
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sepString := strings.Repeat("#", 15)
|
||||
klog.Infof("%s %s %s", sepString, rulesfileNameFromPlugin(plugin.Name), sepString)
|
||||
|
||||
ref := refFromPluginEntry(cfg, plugin, true)
|
||||
// Get all the tags for the given artifact in the remote repository.
|
||||
remoteVersion, err := latestVersionArtifact(ctx, ref, ociClient)
|
||||
klog.Infof("generating rulesfile's config layer")
|
||||
|
||||
version, tags, err = versionAndTags(plugin.Name, filepath.Base(filepaths[0]), devTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteVersion != "" {
|
||||
klog.Infof("latest version found in the OCI registry is: %q", remoteVersion)
|
||||
} else {
|
||||
klog.Info("no versions found in the OCI registry")
|
||||
}
|
||||
|
||||
// New releases to be published.
|
||||
releases, err := newReleases(plugin.Name, remoteVersion)
|
||||
configLayer, err := rulesfileConfig(rulesfileNameFromPlugin(plugin.Name), version, filepaths[0])
|
||||
if err != nil {
|
||||
klog.Errorf("unable to generate config file: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there are no new releases then return.
|
||||
if len(releases) == 0 {
|
||||
klog.Info("no new releases found in the local git repo. Nothing to be done")
|
||||
return nil, nil
|
||||
} else {
|
||||
klog.Infof("new releases found in local git repo: %q", releases)
|
||||
klog.Infof("pushing rulesfile to remote repo with ref %q and tags %q", ref, tags)
|
||||
pusher := ocipusher.NewPusher(ociClient, false, nil)
|
||||
res, err := pusher.Push(ctx, oci.Rulesfile, ref,
|
||||
ocipusher.WithTags(tags...),
|
||||
ocipusher.WithFilepaths(filepaths),
|
||||
ocipusher.WithArtifactConfig(*configLayer),
|
||||
ocipusher.WithAnnotationSource(cfg.pluginsRepo))
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while pushing rulesfile %q: %w", plugin.Name, err)
|
||||
}
|
||||
|
||||
// Create s3 downloader.
|
||||
downloader := manager.NewDownloader(s3Client)
|
||||
|
||||
// Metadata of the rules OCI artifacts push.
|
||||
metadata := []registry.ArtifactPushMetadata{}
|
||||
|
||||
// For each new version we download the archives from s3 bucket
|
||||
for _, v := range releases {
|
||||
prefixKey := s3ArtifactNamePrefix(plugin, v.String(), true)
|
||||
// Get the s3 keys.
|
||||
if s3Keys, err = listObjects(ctx, s3Client, prefixKey); err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while listing objects for prefix %q: %v", prefixKey, err)
|
||||
}
|
||||
|
||||
// It could happen if we tagged a new version in the git repo but the CI has not processed it.
|
||||
// It means that no binaries have been produced and uploaded in the s3 bucket.
|
||||
if len(s3Keys) == 0 {
|
||||
klog.Warningf("no archives found on s3 bucket for prefix %q", prefixKey)
|
||||
continue
|
||||
}
|
||||
|
||||
// For a given release of a rulesfile there should be only one archive in the s3 bucket.
|
||||
if len(s3Keys) > 1 {
|
||||
err := fmt.Errorf("multiple archives found for rulesfiles with prefix %q: %s", prefixKey, s3Keys)
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var filepaths []string
|
||||
|
||||
key := s3Keys[0]
|
||||
klog.Infof("downloading tarball with key %q", key)
|
||||
if err := downloadToFile(downloader, plugin.Name, bucketName, key); err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while downloading tarball %q from bucket %q: %w",
|
||||
key, bucketName, err)
|
||||
}
|
||||
filepaths = append(filepaths, filepath.Join(plugin.Name, key))
|
||||
|
||||
tags := tagsFromVersion(&v)
|
||||
|
||||
klog.Infof("generating rulesfile's config layer")
|
||||
|
||||
configLayer, err := rulesfileConfig(rulesfileNameFromPlugin(plugin.Name), v.String(), filepaths[0])
|
||||
if err != nil {
|
||||
klog.Errorf("unable to generate config file: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
klog.Infof("pushing rulesfile to remote repo with ref %q and tags %q", ref, tags)
|
||||
pusher := ocipusher.NewPusher(ociClient, false, nil)
|
||||
res, err := pusher.Push(context.Background(), oci.Rulesfile, ref,
|
||||
ocipusher.WithTags(tags...),
|
||||
ocipusher.WithFilepaths(filepaths),
|
||||
ocipusher.WithArtifactConfig(*configLayer),
|
||||
ocipusher.WithAnnotationSource(cfg.pluginsRepo))
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("an error occurred while pushing rulesfile %q: %w", plugin.Name, err)
|
||||
}
|
||||
if res != nil {
|
||||
metadata = append(metadata, registry.ArtifactPushMetadata{
|
||||
registry.RepositoryMetadata{
|
||||
Ref: ref,
|
||||
},
|
||||
registry.ArtifactMetadata{
|
||||
Digest: res.Digest,
|
||||
Tags: tags,
|
||||
},
|
||||
})
|
||||
}
|
||||
if res != nil {
|
||||
metadata = append(metadata, registry.ArtifactPushMetadata{
|
||||
registry.RepositoryMetadata{
|
||||
Ref: ref,
|
||||
},
|
||||
registry.ArtifactMetadata{
|
||||
Digest: res.RootDigest,
|
||||
Tags: tags,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
|
@ -673,3 +364,60 @@ func handleRule(ctx context.Context, cfg *config, plugin *registry.Plugin,
|
|||
func rulesfileNameFromPlugin(name string) string {
|
||||
return fmt.Sprintf("%s%s", name, common.RulesArtifactSuffix)
|
||||
}
|
||||
|
||||
// buildName returns the name of the build object for a given object name.
|
||||
// It searches in the given folder if build artifact exists that has the same
|
||||
// prefix as the object. If we are searching for a rulesfiles object then, the
|
||||
// rulefiles variable needs to be set to true.
|
||||
func buildName(objName, dirPath string, rulesfile bool) (string, error) {
|
||||
if dirPath == "" {
|
||||
return "", nil
|
||||
}
|
||||
// Get the entries
|
||||
entries, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get build object for %q: %w", objName, err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
if rulesfile {
|
||||
if strings.HasPrefix(name, objName+"-rules") {
|
||||
return name, nil
|
||||
}
|
||||
} else {
|
||||
if strings.HasPrefix(name, objName) && !strings.Contains(name, "rules") {
|
||||
return name, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func versionAndTags(pluginName, buildName, devTag string) (string, []string, error) {
|
||||
var version string
|
||||
var tags []string
|
||||
var err error
|
||||
|
||||
if strings.Contains(buildName, "-rules") {
|
||||
version = strings.TrimPrefix(buildName, pluginName+"-rules-")
|
||||
version = strings.TrimSuffix(version, archiveSuffix)
|
||||
} else {
|
||||
regexPattern := `\b-linux\S*`
|
||||
regex := regexp.MustCompile(regexPattern)
|
||||
// Replace all substrings starting with "linux" with an empty string
|
||||
version = regex.ReplaceAllString(buildName, "")
|
||||
version = strings.TrimPrefix(version, pluginName+"-")
|
||||
}
|
||||
|
||||
if devTag != "" {
|
||||
return version, append(tags, devTag), nil
|
||||
}
|
||||
|
||||
// If not a dev version, we expect to but be semver compatible.
|
||||
semVer, err := semver.Parse(version)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("unable to parse version for %q: %w", buildName, err)
|
||||
}
|
||||
return version, tagsFromVersion(&semVer), nil
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/falcosecurity/falcoctl/pkg/oci"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/loader"
|
||||
"github.com/falcosecurity/plugins/build/registry/pkg/common"
|
||||
)
|
||||
|
||||
|
@ -67,15 +67,16 @@ func rulesfileRequirement(filePath string) (*oci.ArtifactRequirement, error) {
|
|||
// In case the requirement was expressed as a numeric value,
|
||||
// we convert it to semver and treat it as minor version.
|
||||
tokens := strings.Split(fileScanner.Text(), ":")
|
||||
reqVer, err := semver.Parse(tokens[1])
|
||||
version := strings.TrimSpace(tokens[1])
|
||||
reqVer, err := semver.Parse(version)
|
||||
if err != nil {
|
||||
reqVer, err = semver.ParseTolerant(tokens[1])
|
||||
minor, err := strconv.ParseUint(version, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse requirement %q: expected a numeric value or a valid semver string", tokens[1])
|
||||
return nil, fmt.Errorf("unable to parse requirement %q: expected a numeric value or a valid semver string", version)
|
||||
}
|
||||
reqVer = semver.Version{
|
||||
Major: 0,
|
||||
Minor: reqVer.Major,
|
||||
Minor: minor,
|
||||
Patch: 0,
|
||||
}
|
||||
}
|
||||
|
@ -85,17 +86,3 @@ func rulesfileRequirement(filePath string) (*oci.ArtifactRequirement, error) {
|
|||
Version: reqVer.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pluginRequirement given a plugin as a shared library it loads it and gets the api version
|
||||
// required by the plugin.
|
||||
func pluginRequirement(filePath string) (*oci.ArtifactRequirement, error) {
|
||||
plugin, err := loader.NewPlugin(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open plugin %q: %w", filePath, err)
|
||||
}
|
||||
|
||||
return &oci.ArtifactRequirement{
|
||||
Name: common.PluginAPIVersion,
|
||||
Version: plugin.Info().RequiredAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRulesfileRequirement(t *testing.T) {
|
||||
req, err := rulesfileRequirement("testdata/rules-failed-req.yaml")
|
||||
assert.Error(t, err)
|
||||
|
||||
req, err = rulesfileRequirement("testdata/rules-numeric-req.yaml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0.15.0", req.Version)
|
||||
assert.Equal(t, "engine_version_semver", req.Name)
|
||||
|
||||
req, err = rulesfileRequirement("testdata/rules-semver-req.yaml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0.31.0", req.Version)
|
||||
assert.Equal(t, "engine_version_semver", req.Name)
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
- required_engine_version: test
|
|
@ -0,0 +1 @@
|
|||
- required_engine_version: 15
|
|
@ -0,0 +1 @@
|
|||
- required_engine_version: 0.31.0
|
|
@ -2,4 +2,4 @@ module github.com/falcosecurity/plugins/build/utils
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/spf13/pflag v1.0.5
|
||||
require github.com/spf13/pflag v1.0.6
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
#include <stdio.h>
|
||||
|
||||
static uintptr_t pluginOpen(const char* path, char** err) {
|
||||
void* h = dlopen(path, RTLD_NOW|RTLD_GLOBAL);
|
||||
void* h = dlopen(path, RTLD_NOW|RTLD_GLOBAL|RTLD_DEEPBIND);
|
||||
if (h == NULL) {
|
||||
*err = (char*)dlerror();
|
||||
}
|
||||
|
@ -163,13 +163,18 @@ func main() {
|
|||
var hash string
|
||||
|
||||
// get last tag
|
||||
tags, err := git("describe", "--tags", "--abbrev=0", "--match", name+`-[0-9].[0-9].[0-9]*`)
|
||||
// It matches the old tag in "pluginName-version" or the new one "plugins/pluginName/semver"
|
||||
tags, err := git("describe", "--tags", "--abbrev=0", "--match", name+`-[0-9]*`, "--match", "plugins/"+name+"/v*")
|
||||
if err == nil {
|
||||
if len(tags) == 0 {
|
||||
fail(errors.New("no git tag found for: " + name))
|
||||
}
|
||||
lastTag := tags[0]
|
||||
lastVer = strings.Replace(lastTag, name+"-", "", 1)
|
||||
if strings.HasPrefix(lastTag, name) {
|
||||
lastVer = strings.Replace(lastTag, name+"-", "", 1)
|
||||
} else {
|
||||
lastVer = strings.Replace(lastTag, "plugins/"+name+"/v", "", 1)
|
||||
}
|
||||
if !rgxVersion.MatchString(lastVer) {
|
||||
fail(errors.New("plugin latest released version not compatible with SemVer: " + lastTag))
|
||||
}
|
||||
|
@ -200,7 +205,7 @@ func main() {
|
|||
|
||||
} else {
|
||||
// stable versions MUST have a precise tag matching plugin name and version
|
||||
expectedTag := name + "-" + version
|
||||
expectedTag := "plugins/" + name + "/v" + version
|
||||
tags, err := git("--no-pager", "tag", "--points-at", "HEAD")
|
||||
if err != nil {
|
||||
fail(err)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/bash
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2023 The Falco Authors.
|
||||
# Copyright (C) 2025 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
|
@ -24,18 +24,34 @@ tool=./build/changelog/bin/changelog
|
|||
|
||||
to=""
|
||||
from=""
|
||||
tags="$(git tag -l | grep ${plugin}-[0-9].[0-9].[0-9] | grep -v ${plugin}-[0-9].[0-9].[0-9]-rc | sort -r)"
|
||||
tags="$(git tag -l | grep -E -e ${plugin}-[0-9]+.[0-9]+.[0-9]+ -e ${plugin}/v[0-9]+.[0-9]+.[0-9]+ | grep -E -v ${plugin}-[0-9]+.[0-9]+.[0-9]+-rc | sort -V -r)"
|
||||
|
||||
# print title
|
||||
echo "# Changelog"
|
||||
echo ""
|
||||
|
||||
# generate entry for upcoming tag, if any
|
||||
head="$(git rev-parse HEAD)"
|
||||
dev_changelog="$(${tool} --from="" --to=${head} --plugin=${plugin})"
|
||||
if [ ! -z "$dev_changelog" ]; then
|
||||
echo "## dev build (unreleased)"
|
||||
echo ""
|
||||
echo "$dev_changelog"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# generate entry for each tag
|
||||
for tag in $tags
|
||||
do
|
||||
from=$tag
|
||||
if [ ! -z "$to" ]; then
|
||||
ver="$(echo ${to} | sed -e s/^${plugin}-// -e s/^/v/)"
|
||||
ver=""
|
||||
# support both the old and new tag formats
|
||||
if [[ $to == plugins/* ]]; then
|
||||
ver="$(echo ${to} | sed -e s/^plugins\\/${plugin}\\///)"
|
||||
else
|
||||
ver="$(echo ${to} | sed -e s/^${plugin}-// -e s/^/v/)"
|
||||
fi
|
||||
echo "## ${ver}"
|
||||
echo ""
|
||||
${tool} --from=${from} --to=${to} --plugin=${plugin}
|
||||
|
@ -47,7 +63,12 @@ done
|
|||
# generate last entry for first tag, starting from the first commit
|
||||
if [ -n "$to" ]; then
|
||||
from="$(git rev-list --max-parents=0 HEAD)"
|
||||
ver="$(echo ${to} | sed -e s/^${plugin}-// -e s/^/v/)"
|
||||
# support both the old and new tag formats
|
||||
if [[ $to == plugins/* ]]; then
|
||||
ver="$(echo ${to} | sed -e s/^plugins\\/${plugin}\\///)"
|
||||
else
|
||||
ver="$(echo ${to} | sed -e s/^${plugin}-// -e s/^/v/)"
|
||||
fi
|
||||
echo "## ${ver}"
|
||||
echo ""
|
||||
${tool} --from=${from} --to=${to} --plugin=${plugin}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Using a unique `id` is mandatory to maintain interoperability across all plugins with _event sourcing_ capability. When a plugin is loaded by a compatible application (e.g., Falco), the `id` is used to route events to the correct plugin. Indeed, attempting to load two or more plugins using the same `id` will result in an error.
|
||||
|
||||
For this reason, The Falco Project maintains a [public registry of plugins](https://github.com/falcosecurity/plugins/blob/master/README.md#registering-a-new-plugin), which allows the assignment of a unique `id` for your plugin. However, some plugins may not be registered in the public registry. For example, if you are privately developing a plugin for your own use, you might use any `id` you want. To avoid conflicts in these situations, this document mandates general rules regarding `id` assignment and reservation.
|
||||
For this reason, The Falco Project maintains a [public registry of plugins](https://github.com/falcosecurity/plugins/blob/main/README.md#registering-a-new-plugin), which allows the assignment of a unique `id` for your plugin. However, some plugins may not be registered in the public registry. For example, if you are privately developing a plugin for your own use, you might use any `id` you want. To avoid conflicts in these situations, this document mandates general rules regarding `id` assignment and reservation.
|
||||
|
||||
## ID Blocks
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
### Registering a Plugin
|
||||
|
||||
Registering your plugin inside the registry helps ensure that some technical constraints are respected, such as that a [given ID is used by exactly one plugin with event source capability](https://falco.org/docs/concepts/plugins/architecture/#plugin-event-ids) and allows plugin authors to [coordinate about event source formats](https://falco.org/docs/concepts/plugins/architecture/#plugin-event-sources-and-interoperability). Moreover, this is a great way to share your plugin project with the community and engage with it, thus gaining new users and **increasing its visibility**. We encourage you to register your plugin in this registry before publishing it. You can add your plugins in this registry regardless of where its source code is hosted (there's a `url` field for this specifically).
|
||||
|
||||
The registration process involves adding an entry about your plugin inside the [registry.yaml](../registry.yaml) file by creating a Pull Request in this repository. Please be mindful of a few constraints that are automatically checked and required for your plugin to be accepted:
|
||||
|
||||
- The `name` field is mandatory and must be **unique** across all the plugins in the registry
|
||||
- *(Sourcing Capability Only)* The `id` field is mandatory and must be **unique** in the registry across all the plugins with event source capability
|
||||
- See [docs/plugin-ids.md](plugin-ids.md) for more information about plugin IDs
|
||||
- The plugin `name` must match this [regular expression](https://en.wikipedia.org/wiki/Regular_expression): `^[a-z]+[a-z0-9-_\-]*$` (however, its not recommended to use `_` in the name, unless you are trying to match the name of a source or for particular reasons)
|
||||
- The `source` *(Sourcing Capability Only)* and `sources` *(Extraction Capability Only)* must match this [regular expression](https://en.wikipedia.org/wiki/Regular_expression): `^[a-z]+[a-z0-9_]*$`
|
||||
- The `url` field should point to the plugin source code
|
||||
- The `rules_url` field should point to the default ruleset, if any
|
||||
|
||||
For reference, here's an example of an entry for a plugin with both event sourcing and field extraction capabilities:
|
||||
```yaml
|
||||
- name: k8saudit
|
||||
description: ...
|
||||
authors: ...
|
||||
contact: ...
|
||||
maintainers:
|
||||
- name: The Falco Authors
|
||||
email: cncf-falco-dev@lists.cncf.io
|
||||
keywords:
|
||||
- audit
|
||||
- audit-log
|
||||
- audit-events
|
||||
- kubernetes
|
||||
url: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit
|
||||
rules_url: https://github.com/falcosecurity/plugins/tree/main/plugins/k8saudit/rules
|
||||
url: ...
|
||||
license: ...
|
||||
capabilities:
|
||||
sourcing:
|
||||
supported: true
|
||||
id: 2
|
||||
source: k8s_audit
|
||||
extraction:
|
||||
supported: true
|
||||
```
|
|
@ -0,0 +1,6 @@
|
|||
*.so
|
||||
*.a
|
||||
*.o
|
||||
.vscode
|
||||
build*
|
||||
libanomalydetection.so
|
|
@ -0,0 +1,38 @@
|
|||
# Changelog
|
||||
|
||||
## dev build (unreleased)
|
||||
|
||||
* [`406c517`](https://github.com/falcosecurity/plugins/commit/406c517) update(anomalydetection): tweak inits when count_min_sketch disabled + better...
|
||||
|
||||
* [`cb0fdb1`](https://github.com/falcosecurity/plugins/commit/cb0fdb1) update(anomalydetection): update documentation
|
||||
|
||||
* [`79c085e`](https://github.com/falcosecurity/plugins/commit/79c085e) update(anomalydetection): helper new filtercheck / output field anomaly.falco...
|
||||
|
||||
* [`489ef6d`](https://github.com/falcosecurity/plugins/commit/489ef6d) update(anomalydetection): ability to reset data structures w/ timers
|
||||
|
||||
* [`d4e72b8`](https://github.com/falcosecurity/plugins/commit/d4e72b8) update(anomalydetection): more usage safeguards and info log messages
|
||||
|
||||
* [`23bf05e`](https://github.com/falcosecurity/plugins/commit/23bf05e) update(anomalydetection): add some fallbacks / evt param extraction in cases ...
|
||||
|
||||
* [`1e8052c`](https://github.com/falcosecurity/plugins/commit/1e8052c) update(anomalydetection): add some custom behavior profile short-cut fields o...
|
||||
|
||||
* [`6a448c2`](https://github.com/falcosecurity/plugins/commit/6a448c2) update(anomalydetection): add MutexGuard (adopted from libs) to sketches data...
|
||||
|
||||
* [`f720e4a`](https://github.com/falcosecurity/plugins/commit/f720e4a) update(anomalydetection): finish currently supported behavior profile filterc...
|
||||
|
||||
* [`14a58d3`](https://github.com/falcosecurity/plugins/commit/14a58d3) update(anomalydetection): add lastevent_fd + enhance robustness / tests + sta...
|
||||
|
||||
* [`27747eb`](https://github.com/falcosecurity/plugins/commit/27747eb) update(anomalydetection): unit tests for proc lineage + add filterchecks 1/n
|
||||
|
||||
* [`fa2c05e`](https://github.com/falcosecurity/plugins/commit/fa2c05e) update(anomalydetection): populate info for proc args
|
||||
|
||||
* [`faf6636`](https://github.com/falcosecurity/plugins/commit/faf6636) update(anomalydetection): sync plugin to latest SDK changes
|
||||
|
||||
* [`6a834a3`](https://github.com/falcosecurity/plugins/commit/6a834a3) new(anomalydetection): init config + start behavior profile extraction
|
||||
|
||||
* [`73c11bd`](https://github.com/falcosecurity/plugins/commit/73c11bd) new(anomalydetection): start unit tests + bump libs and sdk
|
||||
|
||||
* [`f778922`](https://github.com/falcosecurity/plugins/commit/f778922) cleanup(anomalydetection): cms class updates
|
||||
|
||||
* [`6b89390`](https://github.com/falcosecurity/plugins/commit/6b89390) new(anomalydetection): init plugin / start dev
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 3.22)
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
|
||||
|
||||
option(BUILD_TESTS "Enable tests" ON)
|
||||
|
||||
# Project metadata
|
||||
project(
|
||||
anomalydetection
|
||||
VERSION 0.1.0
|
||||
DESCRIPTION "Falco Anomaly Detection Plugin"
|
||||
LANGUAGES CXX)
|
||||
|
||||
# Dependencies
|
||||
include(FetchContent)
|
||||
include(plugin-sdk-cpp)
|
||||
include(libs) # Temporarily include libs for initial dev
|
||||
include(xxhash)
|
||||
|
||||
# Project target
|
||||
file(GLOB_RECURSE anomalydetection_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp")
|
||||
add_library(anomalydetection SHARED ${anomalydetection_SOURCES} )
|
||||
set_target_properties(anomalydetection PROPERTIES CXX_EXTENSIONS OFF)
|
||||
|
||||
# Project compilation options
|
||||
target_compile_options(anomalydetection PRIVATE "-fPIC")
|
||||
target_compile_options(anomalydetection PRIVATE "-Wl,-z,relro,-z,now")
|
||||
target_compile_options(anomalydetection PRIVATE "-fstack-protector-strong")
|
||||
# When compiling in Debug mode, this will define the DEBUG symbol for use in your code
|
||||
target_compile_options(anomalydetection PUBLIC "$<$<CONFIG:DEBUG>:-DDEBUG>")
|
||||
target_compile_features(anomalydetection PUBLIC cxx_std_17)
|
||||
|
||||
# Project includes
|
||||
target_include_directories(
|
||||
anomalydetection PRIVATE "${PLUGIN_SDK_INCLUDE}" "${XXHASH_INCLUDE}" "${LIBS_INCLUDE}")
|
||||
|
||||
# Project linked libraries
|
||||
target_link_libraries(anomalydetection ${_REFLECTION})
|
||||
|
||||
# Testing
|
||||
if(BUILD_TESTS)
|
||||
add_subdirectory(test)
|
||||
endif()
|
|
@ -0,0 +1,36 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
NAME := anomalydetection
|
||||
OUTPUT := lib$(NAME).so
|
||||
|
||||
all: $(OUTPUT)
|
||||
|
||||
clean:
|
||||
rm -rf build $(OUTPUT)
|
||||
# Temporarily include libs for initial dev
|
||||
$(OUTPUT):
|
||||
mkdir -p build \
|
||||
&& cd build \
|
||||
&& cmake \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DMINIMAL_BUILD=ON \
|
||||
-DUSE_BUNDLED_LIBELF=OFF \
|
||||
-DCREATE_TEST_TARGETS=OFF \
|
||||
../ \
|
||||
&& make -j6 anomalydetection \
|
||||
&& cp ./$(OUTPUT) ../$(OUTPUT)
|
||||
|
||||
readme:
|
||||
@$(READMETOOL) -p ./$(OUTPUT) -f README.md
|
|
@ -0,0 +1,319 @@
|
|||
# Falcosecurity `anomalydetection` Plugin
|
||||
|
||||
**This plugin is experimental and under development**
|
||||
|
||||
This `anomalydetection` plugin has been created upon this [Proposal](https://github.com/falcosecurity/falco/blob/master/proposals/20230620-anomaly-detection-framework.md).
|
||||
|
||||
## Introduction
|
||||
|
||||
The `anomalydetection` plugin enhances {syscall} event analysis by incorporating anomaly detection estimates for probabilistic filtering.
|
||||
|
||||
### Functionality
|
||||
|
||||
The initial scope focuses exclusively on "CountMinSketch Powered Probabilistic Counting and Filtering" for a subset of syscalls and a selection of options for defining behavior profiles. This limitation is due to current restrictions related to the plugin API and SDK layout.
|
||||
|
||||
The new framework primarily aims to improve the usability of standard Falco rules. It may reduce the need for precise rule tuning, leverages probabilistic count estimates to auto-tune noisy rules on the fly, and enables the creation of broader Falco rules. Read more in the [Proposal](https://github.com/falcosecurity/falco/blob/master/proposals/20230620-anomaly-detection-framework.md).
|
||||
|
||||
### TL;DR
|
||||
|
||||
The official documentation will eventually be available on the Falco [Plugins](https://falco.org/docs/plugins/) site. Therefore, consider this README as not being a complete documentation for using this plugin.
|
||||
|
||||
*Disclaimer*: Anomaly detection can mean different things to different people. It's best to keep your expectations low for this plugin's current capabilities. For now, it is focused solely on probabilistic counting.
|
||||
|
||||
What this plugin is:
|
||||
- **Initial step for real-time anomaly detection in Falco**: Introduces basic real-time anomaly detection methods on the host.
|
||||
- **Probabilistic counting**: Currently supports only probabilistic counting, with the guarantee that any overcounting remains within an acceptable error margin.
|
||||
- **Use-case dependent**: Requires careful derivation of custom use cases; no default use cases are provided at this time.
|
||||
- **Limited by current API**: Subject to several restrictions due to plugin API and other limitations.
|
||||
- **Built for future extensibility**: Designed to support more algorithms in the future, limited to those that can be implemented in a single data pass to ensure real-time performance.
|
||||
- **Documentation is insufficient**: Expect to need hands-on exploration to understand usage and restrictions.
|
||||
|
||||
What this plugin is not:
|
||||
- **Not a pre-trained AI/ML model**.
|
||||
- **Not ready out-of-the-box**: No default configuration or use cases are provided at this time.
|
||||
- **Not a universal solution**: Does not offer a one-size-fits-all approach to anomaly detection.
|
||||
- **No multi-pass algorithms**: Algorithms requiring multiple data passes are not planned; the plugin is intended to remain real-time and efficient for applicable use cases.
|
||||
- **Not yet battle-tested in production**.
|
||||
|
||||
### Outlook
|
||||
|
||||
In the near term, the plan is to expand the syscalls for which behavior profiles can be applied and to enhance the fields available for defining these profiles. The first version is quite restrictive in this regard due to current plugin API limitations. Additionally, from an algorithmic and capabilities point of view, we will explore the following:
|
||||
|
||||
- Support for HyperLogLog probabilistic distinct counting (ETA unknown).
|
||||
- Overcoming the cold start problem by loading sketch data structures and counts from previous agent runs or from test environments (ETA unknown).
|
||||
- Efficient and feasible options for real-time, single-pass time series analysis (ETA unknown).
|
||||
|
||||
### Plugin Official Name
|
||||
|
||||
`anomalydetection`
|
||||
|
||||
## Capabilities
|
||||
|
||||
The `anomalydetection` plugin implements 2 capabilities:
|
||||
|
||||
* `extraction`
|
||||
* `parsing`
|
||||
|
||||
## Supported Fields
|
||||
|
||||
Here is the current set of output / filter fields introduced by this plugin:
|
||||
|
||||
<!-- README-PLUGIN-FIELDS -->
|
||||
| NAME | TYPE | ARG | DESCRIPTION |
|
||||
|------------------------------------|----------|-------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `anomaly.count_min_sketch` | `uint64` | Index | Count Min Sketch Estimate according to the specified behavior profile for a predefined set of {syscalls} events. Access different behavior profiles/sketches using indices. For instance, anomaly.count_min_sketch[0] retrieves the first behavior profile defined in the plugins' `init_config`. |
|
||||
| `anomaly.count_min_sketch.profile` | `string` | Index | Concatenated string according to the specified behavior profile (not preserving original order). Access different behavior profiles using indices. For instance, anomaly.count_min_sketch.profile[0] retrieves the first behavior profile defined in the plugins' `init_config`. |
|
||||
| `anomaly.falco.duration_ns` | `uint64` | None | Falco agent run duration in nanoseconds, which could be useful for ignoring some rare events at launch time while Falco is just starting to build up the counts in the sketch data structures (if applicable). |
|
||||
<!-- /README-PLUGIN-FIELDS -->
|
||||
|
||||
## Usage
|
||||
|
||||
**Configuration**
|
||||
|
||||
Here's an example of configuration of `falco.yaml`:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- name: anomalydetection
|
||||
library_path: libanomalydetection.so
|
||||
init_config:
|
||||
count_min_sketch:
|
||||
enabled: true
|
||||
n_sketches: 3
|
||||
# `gamma_eps`: auto-calculate rows and cols; usage: [[gamma, eps], ...];
|
||||
# gamma -> error probability -> determine d / rows / number of hash functions
|
||||
# eps -> relative error -> determine w / cols / number of buckets
|
||||
gamma_eps: [
|
||||
[0.001, 0.0001],
|
||||
[0.001, 0.0001],
|
||||
[0.001, 0.0001]
|
||||
]
|
||||
# `rows_cols`: pass explicit dimensions, supersedes `gamma_eps`; usage: [[7, 27183], ...]; by default disabled when not used.
|
||||
# rows_cols: []
|
||||
behavior_profiles: [
|
||||
{
|
||||
"fields": "%container.id %custom.proc.aname.lineage.join[7] %custom.proc.aexepath.lineage.join[7] %proc.tty %proc.vpgid.name %proc.sname",
|
||||
# execve, execveat exit event codes
|
||||
"event_codes": [293, 331]
|
||||
},
|
||||
{
|
||||
"fields": "%container.id %custom.proc.aname.lineage.join[7] %custom.proc.aexepath.lineage.join[7] %proc.tty %proc.vpgid.name %proc.sname %fd.name %fd.nameraw",
|
||||
# open, openat, openat2 exit event codes
|
||||
"event_codes": [3, 307, 327]
|
||||
},
|
||||
{
|
||||
"fields": "%container.id %proc.cmdline",
|
||||
# execve, execveat exit event codes
|
||||
"event_codes": [293, 331],
|
||||
# optional config `reset_timer_ms`, resets the data structure every x milliseconds, here one hour as example
|
||||
# Remove JSON key if not wanted / needed.
|
||||
"reset_timer_ms": 3600000
|
||||
}
|
||||
]
|
||||
|
||||
load_plugins: [anomalydetection]
|
||||
```
|
||||
|
||||
The first version is quite restrictive with respect to the behavior profile's `event_codes` and `fields`. In a nutshell, you can currently define them only for a handful of event codes that Falco supports and a subset of the [Supported Fields for Conditions and Outputs](https://falco.org/docs/reference/rules/supported-fields/).
|
||||
|
||||
When you disable the `count_min_sketch` algorithm as shown below, all `anomaly.count_min_sketch` fields will be null.
|
||||
|
||||
```
|
||||
count_min_sketch:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
__NOTE__: Do not toggle the `enabled` key while hot reloading the config, as it currently does not get properly applied in such cases. Restart Falco with the `count_min_sketch` either enabled or disabled; subsequent reloads will work as expected.
|
||||
|
||||
**Behavior profiles for "execve/execveat/clone/clone3" events**
|
||||
|
||||
Example 1:
|
||||
```
|
||||
"event_codes": [293, 331],
|
||||
```
|
||||
|
||||
Example 2:
|
||||
```
|
||||
"event_codes": [223, 335],
|
||||
```
|
||||
|
||||
You can reference a behavior profile based on "execve/execveat/clone/clone3" events in any Falco rule that monitors any supported syscall. This works because every syscall is associated with a process.
|
||||
|
||||
**Behavior profiles for "fd-related" events**
|
||||
|
||||
Example 1:
|
||||
```
|
||||
rule: (evt.type in (open, openat, openat2) and evt.dir=<)
|
||||
...
|
||||
"event_codes": [3, 307, 327],
|
||||
```
|
||||
|
||||
Example 2:
|
||||
```
|
||||
rule: (evt.type=connect and evt.dir=<)
|
||||
...
|
||||
"event_codes": [23],
|
||||
```
|
||||
|
||||
You should avoid writing rules for arbitrary syscalls using "fd-related" behavior profiles because if a syscall doesn't involve a file descriptor (fd), referencing counts that rely on fd fields won't be meaningful.
|
||||
|
||||
Here's how it works:
|
||||
- If your behavior profile includes `%fd.*` fields, all event codes in that profile must be related to file descriptors.
|
||||
- If you use an "fd-related" behavior profile with a syscall that doesn't involve a file descriptor, the count will always be zero. While Falco won't crash, the anomaly detection estimate won't function as expected.
|
||||
|
||||
References:
|
||||
- See the [Supported PPME `event codes`](#ppme-event-codes) reference below.
|
||||
- See the [Supported Behavior Profiles `fields`](#behavior-profiles-fields) reference below.
|
||||
|
||||
**Open Parameters**:
|
||||
|
||||
This plugin does not have open params.
|
||||
|
||||
**Rules**
|
||||
|
||||
This plugin does not provide any default use cases or rules at the moment. More concrete use cases may be added at a later time.
|
||||
|
||||
Example of a dummy Falco rule using the `anomalydetection` fields for local testing:
|
||||
|
||||
```yaml
|
||||
- macro: spawned_process
|
||||
condition: (evt.type in (execve, execveat) and evt.dir=<)
|
||||
- rule: execve count_min_sketch test
|
||||
desc: "execve count_min_sketch test"
|
||||
condition: spawned_process and proc.name=cat and anomaly.count_min_sketch[0] > 10
|
||||
output: '%anomaly.count_min_sketch[0] %proc.pid %proc.ppid %proc.name %user.loginuid %user.name %user.uid %proc.cmdline %container.id %evt.type %evt.res %proc.cwd %proc.sid %proc.exepath %container.image.repository'
|
||||
priority: NOTICE
|
||||
tags: [maturity_sandbox, host, container, process, anomalydetection]
|
||||
```
|
||||
|
||||
__NOTE__: Ensure you regularly execute `cat` commands. Once you have done so frequently enough, logs will start to appear. Alternatively, perform an inverse test to observe how quickly a very noisy rule gets silenced.
|
||||
|
||||
**Adoption**
|
||||
|
||||
To adopt the plugin framework, you can start by identifying rules in the [default](https://github.com/falcosecurity/rules) Falco ruleset that could benefit from auto-tuning based on your heuristics regarding counts. For example, you might broaden the scope of a rule and add an `anomaly.count_min_sketch` filter condition as a safety upper bound.
|
||||
|
||||
For initial adoption, we recommend creating new, separate rules inspired by existing upstream rules, rather than modifying rules that are already performing well in production.
|
||||
|
||||
Another approach is to duplicate a rule -- one version with and another without the anomaly detection filtering.
|
||||
|
||||
Alternatively, you can add the count estimates as output fields to provide additional forensic evidence without using the counts for on-host filtering.
|
||||
|
||||
Lastly, keep in mind that there is a configuration to reset the counts per behavior profile every x milliseconds if this suits your use case better.
|
||||
|
||||
### Running
|
||||
|
||||
This plugin requires Falco with version >= **0.38.2**.
|
||||
|
||||
1. Have Falco >= **0.38.2** installed and set up
|
||||
2. Download the plugin's shared object (or build it yourself; see instructions below) and place it under `/usr/share/falco/plugins/libanomalydetection.so`
|
||||
3. Modify the `falco.yaml` with the provided example [configuration](#configuration) above
|
||||
4. Add a rule that uses `anomaly.count_min_sketch` as an output field and/or filter to `falco_rules.yaml`, and you're ready to go!
|
||||
|
||||
|
||||
```shell
|
||||
# Read the steps above before running Falco with this plugin
|
||||
sudo falco -c falco.yaml -r falco_rules.yaml
|
||||
```
|
||||
|
||||
## Local Development
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
git clone https://github.com/falcosecurity/plugins.git
|
||||
cd plugins/plugins/anomalydetection
|
||||
rm -f libanomalydetection.so;
|
||||
rm -f build/libanomalydetection.so;
|
||||
make;
|
||||
# Copy the shared library to the expected location for `falco.yaml`, which is `library_path: libanomalydetection.so`
|
||||
sudo mkdir -p /usr/share/falco/plugins/;
|
||||
sudo cp -f libanomalydetection.so /usr/share/falco/plugins/libanomalydetection.so;
|
||||
```
|
||||
|
||||
|
||||
## References
|
||||
|
||||
### PPME event codes
|
||||
|
||||
Read this [blog post](https://falco.org/blog/adaptive-syscalls-selection/) to learn more about Falco's internal PPME event codes compared to the syscall names you are used to using in Falco rules.
|
||||
|
||||
The list below is complete, and no other event codes from Falco can be used for the behavior profiles at the moment. The binary will error out if used incorrectly. Thank you for your patience.
|
||||
|
||||
```CPP
|
||||
typedef enum {
|
||||
PPME_SYSCALL_OPEN_X = 3, // compare to "(evt.type=open and evt.dir=<)" in a Falco rule
|
||||
PPME_SOCKET_CONNECT_X = 23, // compare to "(evt.type=connect and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_CREAT_X = 59, // compare to "(evt.type=creat and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_CLONE_20_X = 223, // compare to "(evt.type=clone and evt.dir=<)" in a Falco rule
|
||||
PPME_SOCKET_ACCEPT_5_X = 247, // compare to "(evt.type=accept and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_EXECVE_19_X = 293, // compare to "(evt.type=execve and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_OPENAT_2_X = 307, // compare to "(evt.type=openat and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_OPENAT2_X = 327, // compare to "(evt.type=openat2 and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_EXECVEAT_X = 331, // compare to "(evt.type=execveat and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_CLONE3_X = 335, // compare to "(evt.type=clone3 and evt.dir=<)" in a Falco rule
|
||||
PPME_SYSCALL_OPEN_BY_HANDLE_AT_X = 337, // compare to "(evt.type=open_by_handle_at and evt.dir=<)" in a Falco rule
|
||||
PPME_SOCKET_ACCEPT4_6_X = 389, // compare to "(evt.type=accept4 and evt.dir=<)" in a Falco rule
|
||||
} ppm_event_code;
|
||||
```
|
||||
|
||||
### Behavior Profiles fields
|
||||
|
||||
Compare to [Supported Fields for Conditions and Outputs](https://falco.org/docs/reference/rules/supported-fields/).
|
||||
|
||||
The list below is complete, and no other fields from Falco can be used for the behavior profiles at the moment. The binary will error out if used incorrectly. Thank you for your patience.
|
||||
|
||||
| Supported Behavior Profile Field | Description |
|
||||
| --- | --- |
|
||||
|proc.exe|The first command-line argument (i.e., argv[0]), typically the executable name or a custom string as specified by the user. It is primarily obtained from syscall arguments, truncated after 4096 bytes, or, as a fallback, by reading /proc/PID/cmdline, in which case it may be truncated after 1024 bytes. This field may differ from the last component of proc.exepath, reflecting how command invocation and execution paths can vary.|
|
||||
|proc.pexe|The proc.exe (first command line argument argv[0]) of the parent process.|
|
||||
|proc.aexe|The proc.exe (first command line argument argv[0]) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.aexe[1] retrieves the proc.exe of the parent process, proc.aexe[2] retrieves the proc.exe of the grandparent process, and so on. The current process's proc.exe line can be obtained using proc.aexe[0]. When used without any arguments, proc.aexe is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.aexe endswith java` to match any process ancestor whose proc.exe ends with the term `java`.|
|
||||
|proc.exepath|The full executable path of a process, resolving to the canonical path for symlinks. This is primarily obtained from the kernel, or as a fallback, by reading /proc/PID/exe (in the latter case, the path is truncated after 1024 bytes). For eBPF drivers, due to verifier limits, path components may be truncated to 24 for legacy eBPF on kernel <5.2, 48 for legacy eBPF on kernel >=5.2, or 96 for modern eBPF.|
|
||||
|proc.pexepath|The proc.exepath (full executable path) of the parent process.|
|
||||
|proc.aexepath|The proc.exepath (full executable path) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.aexepath[1] retrieves the proc.exepath of the parent process, proc.aexepath[2] retrieves the proc.exepath of the grandparent process, and so on. The current process's proc.exepath line can be obtained using proc.aexepath[0]. When used without any arguments, proc.aexepath is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.aexepath endswith java` to match any process ancestor whose path ends with the term `java`.|
|
||||
|proc.name|The process name (truncated after 16 characters) generating the event (task->comm). Truncation is determined by kernel settings and not by Falco. This field is collected from the syscalls args or, as a fallback, extracted from /proc/PID/status. The name of the process and the name of the executable file on disk (if applicable) can be different if a process is given a custom name which is often the case for example for java applications.|
|
||||
|proc.pname|The proc.name truncated after 16 characters) of the process generating the event.|
|
||||
|proc.aname|The proc.name (truncated after 16 characters) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.aname[1] retrieves the proc.name of the parent process, proc.aname[2] retrieves the proc.name of the grandparent process, and so on. The current process's proc.name line can be obtained using proc.aname[0]. When used without any arguments, proc.aname is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.aname=bash` to match any process ancestor whose name is `bash`.|
|
||||
|proc.args|The arguments passed on the command line when starting the process generating the event excluding argv[0] (truncated after 4096 bytes). This field is collected from the syscalls args or, as a fallback, extracted from /proc/PID/cmdline.|
|
||||
|proc.cmdline|The concatenation of `proc.name + proc.args` (truncated after 4096 bytes) when starting the process generating the event.|
|
||||
|proc.pcmdline|The proc.cmdline (full command line (proc.name + proc.args)) of the parent of the process generating the event.|
|
||||
|proc.acmdline|The full command line (proc.name + proc.args) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.acmdline[1] retrieves the full command line of the parent process, proc.acmdline[2] retrieves the proc.cmdline of the grandparent process, and so on. The current process's full command line can be obtained using proc.acmdline[0]. When used without any arguments, proc.acmdline is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.acmdline contains base64` to match any process ancestor whose command line contains the term base64.|
|
||||
|proc.cmdnargs|The number of command line args (proc.args).|
|
||||
|proc.cmdlenargs|The total count of characters / length of the command line args (proc.args) combined excluding whitespaces between args.|
|
||||
|proc.exeline|The full command line, with exe as first argument (proc.exe + proc.args) when starting the process generating the event.|
|
||||
|proc.env|The environment variables of the process generating the event as concatenated string 'ENV_NAME=value ENV_NAME1=value1'. Can also be used to extract the value of a known env variable, e.g. proc.env[ENV_NAME].|
|
||||
|proc.cwd|The current working directory of the event.|
|
||||
|proc.tty|The controlling terminal of the process. 0 for processes without a terminal.|
|
||||
|proc.pid|The id of the process generating the event.|
|
||||
|proc.ppid|The pid of the parent of the process generating the event.|
|
||||
|proc.apid|The pid for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.apid[1] retrieves the pid of the parent process, proc.apid[2] retrieves the pid of the grandparent process, and so on. The current process's pid can be obtained using proc.apid[0]. When used without any arguments, proc.apid is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.apid=1337` to match any process ancestor whose pid is equal to 1337.|
|
||||
|proc.vpid|The id of the process generating the event as seen from its current PID namespace.|
|
||||
|proc.pvpid|The id of the parent process generating the event as seen from its current PID namespace.|
|
||||
|proc.sid|The session id of the process generating the event.|
|
||||
|proc.sname|The name of the current process's session leader. This is either the process with pid=proc.sid or the eldest ancestor that has the same sid as the current process.|
|
||||
|proc.sid.exe|The first command line argument argv[0] (usually the executable name or a custom one) of the current process's session leader. This is either the process with pid=proc.sid or the eldest ancestor that has the same sid as the current process.|
|
||||
|proc.sid.exepath|The full executable path of the current process's session leader. This is either the process with pid=proc.sid or the eldest ancestor that has the same sid as the current process.|
|
||||
|proc.vpgid|The process group id of the process generating the event, as seen from its current PID namespace.|
|
||||
|proc.vpgid.name|The name of the current process's process group leader. This is either the process with proc.vpgid == proc.vpid or the eldest ancestor that has the same vpgid as the current process. The description of `proc.is_vpgid_leader` offers additional insights.|
|
||||
|proc.vpgid.exe|The first command line argument argv[0] (usually the executable name or a custom one) of the current process's process group leader. This is either the process with proc.vpgid == proc.vpid or the eldest ancestor that has the same vpgid as the current process. The description of `proc.is_vpgid_leader` offers additional insights.|
|
||||
|proc.vpgid.exepath|The full executable path of the current process's process group leader. This is either the process with proc.vpgid == proc.vpid or the eldest ancestor that has the same vpgid as the current process. The description of `proc.is_vpgid_leader` offers additional insights.|
|
||||
|proc.is_exe_writable|'true' if this process' executable file is writable by the same user that spawned the process.|
|
||||
|proc.is_exe_upper_layer|'true' if this process' executable file is in upper layer in overlayfs. This field value can only be trusted if the underlying kernel version is greater or equal than 3.18.0, since overlayfs was introduced at that time.|
|
||||
|proc.is_exe_from_memfd|'true' if the executable file of the current process is an anonymous file created using memfd_create() and is being executed by referencing its file descriptor (fd). This type of file exists only in memory and not on disk. Relevant to detect malicious in-memory code injection. Requires kernel version greater or equal to 3.17.0.|
|
||||
|proc.is_sid_leader|'true' if this process is the leader of the process session, proc.sid == proc.vpid. For host processes vpid reflects pid.|
|
||||
|proc.is_vpgid_leader|'true' if this process is the leader of the virtual process group, proc.vpgid == proc.vpid. For host processes vpgid and vpid reflect pgid and pid. Can help to distinguish if the process was 'directly' executed for instance in a tty (similar to bash history logging, `is_vpgid_leader` would be 'true') or executed as descendent process in the same process group which for example is the case when subprocesses are spawned from a script (`is_vpgid_leader` would be 'false').|
|
||||
|proc.exe_ino|The inode number of the executable file on disk. Can be correlated with fd.ino.|
|
||||
|proc.exe_ino.ctime|Last status change time of executable file (inode->ctime) as epoch timestamp in nanoseconds. Time is changed by writing or by setting inode information e.g. owner, group, link count, mode etc.|
|
||||
|proc.exe_ino.mtime|Last modification time of executable file (inode->mtime) as epoch timestamp in nanoseconds. Time is changed by file modifications, e.g. by mknod, truncate, utime, write of more than zero bytes etc. For tracking changes in owner, group, link count or mode, use proc.exe_ino.ctime instead.|
|
||||
|container.id|The truncated container ID (first 12 characters), e.g. 3ad7b26ded6d is extracted from the Linux cgroups by Falco within the kernel. Consequently, this field is reliably available and serves as the lookup key for Falco's synchronous or asynchronous requests against the container runtime socket to retrieve all other `'container.*'` information. One important aspect to be aware of is that if the process occurs on the host, meaning not in the container PID namespace, this field is set to a string called 'host'. In Kubernetes, pod sandbox container processes can exist where `container.id` matches `k8s.pod.sandbox_id`, lacking other 'container.*' details.|
|
||||
|fd.num|the unique number identifying the file descriptor.|
|
||||
|fd.name|FD full name. If the fd is a file, this field contains the full path. If the FD is a socket, this field contain the connection tuple.|
|
||||
|fd.directory|If the fd is a file, the directory that contains it.|
|
||||
|fd.filename|If the fd is a file, the filename without the path.|
|
||||
|fd.dev|device number (major/minor) containing the referenced file|
|
||||
|fd.ino|inode number of the referenced file|
|
||||
|fd.nameraw|FD full name raw. Just like fd.name, but only used if fd is a file path. File path is kept raw with limited sanitization and without deriving the absolute path.|
|
||||
|custom.proc.aname.lineage.join|[Incubating] String concatenate the process lineage to achieve better performance. It requires an argument to specify the maximum level of traversal, e.g. 'custom.proc.aname.lineage.join[7]'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future.|
|
||||
|custom.proc.aexe.lineage.join|[Incubating] String concatenate the process lineage to achieve better performance. It requires an argument to specify the maximum level of traversal, e.g. 'custom.proc.aexe.lineage.join[7]'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future.|
|
||||
|custom.proc.aexepath.lineage.join|[Incubating] String concatenate the process lineage to achieve better performance. It requires an argument to specify the maximum level of traversal, e.g. 'custom.proc.aexepath.lineage.join[7]'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future.|
|
||||
|custom.fd.name.part1|[Incubating] For fd related network events only. Part 1 as string of the ip tuple in the format 'ip:port', e.g '172.40.111.222:54321' given fd.name '172.40.111.222:54321->142.251.111.147:443'. It may be dperecated in the future.|
|
||||
|custom.fd.name.part2|[Incubating] For fd related network events only. Part 2 as string of the ip tuple in the format 'ip:port', e.g.'142.251.111.147:443' given fd.name '172.40.111.222:54321->142.251.111.147:443'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future.|
|
|
@ -0,0 +1,26 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
message(STATUS "Fetching libs at 'https://github.com/falcosecurity/libs.git'")
|
||||
|
||||
FetchContent_Declare(
|
||||
libs
|
||||
GIT_REPOSITORY https://github.com/falcosecurity/libs.git
|
||||
GIT_TAG 273299c5832ab7efa6a93547f7c3bd55706b135c
|
||||
CONFIGURE_COMMAND "" BUILD_COMMAND "")
|
||||
|
||||
FetchContent_MakeAvailable(libs)
|
||||
set(LIBS_INCLUDE "${libs_SOURCE_DIR}")
|
||||
set(LIBS_DIR "${libs_SOURCE_DIR}")
|
||||
message(STATUS "Using libs include at '${LIBS_INCLUDE}'")
|
|
@ -0,0 +1,27 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
message(
|
||||
STATUS
|
||||
"Fetching plugin-sdk-cpp at 'https://github.com/falcosecurity/plugin-sdk-cpp.git'"
|
||||
)
|
||||
|
||||
FetchContent_Declare(
|
||||
plugin-sdk-cpp
|
||||
GIT_REPOSITORY https://github.com/falcosecurity/plugin-sdk-cpp.git
|
||||
GIT_TAG 1c46ba02e8e9fe30a8362a54e99a6c3c804661f6)
|
||||
|
||||
FetchContent_MakeAvailable(plugin-sdk-cpp)
|
||||
set(PLUGIN_SDK_INCLUDE "${plugin-sdk-cpp_SOURCE_DIR}/include")
|
||||
message(STATUS "Using plugin-sdk-cpp include at '${PLUGIN_SDK_INCLUDE}'")
|
|
@ -0,0 +1,31 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
message(
|
||||
STATUS
|
||||
"Fetching xxhash at 'https://raw.githubusercontent.com/Cyan4973/xxHash/v0.8.2/xxhash.h'"
|
||||
)
|
||||
|
||||
FetchContent_Declare(
|
||||
# BSD 2-Clause License
|
||||
xxhash
|
||||
URL "https://raw.githubusercontent.com/Cyan4973/xxHash/v0.8.2/xxhash.h"
|
||||
URL_HASH SHA256=be275e9db21a503c37f24683cdb4908f2370a3e35ab96e02c4ea73dc8e399c43
|
||||
DOWNLOAD_NAME "xxhash.h"
|
||||
DOWNLOAD_NO_EXTRACT TRUE
|
||||
)
|
||||
|
||||
FetchContent_MakeAvailable(xxhash)
|
||||
set(XXHASH_INCLUDE "${xxhash_SOURCE_DIR}")
|
||||
message(STATUS "Using xxhash include at '${XXHASH_INCLUDE}'")
|
|
@ -0,0 +1,228 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "xxhash_ext.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdint>
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
|
||||
/*
|
||||
CountMinSketch Powered Probabilistic Counting and Filtering
|
||||
Falco Proposal: https://github.com/falcosecurity/falco/blob/master/proposals/20230620-anomaly-detection-framework.md
|
||||
*/
|
||||
|
||||
namespace plugin::anomalydetection::num
|
||||
{
|
||||
|
||||
template<typename T>
|
||||
class cms
|
||||
{
|
||||
private:
|
||||
std::unique_ptr<std::unique_ptr<T[]>[]> sketch;
|
||||
uint64_t d_; // d / Rows / number of hash functions
|
||||
uint64_t w_; // w / Cols / number of buckets
|
||||
double gamma_; // Error probability (e.g. 0.001)
|
||||
double eps_; // Relative error (e.g. 0.0001)
|
||||
|
||||
public:
|
||||
static uint64_t calculate_d_rows_from_gamma(double gamma)
|
||||
{
|
||||
// -> determine Rows / number of hash functions
|
||||
return static_cast<uint64_t>(std::ceil(std::log(1.0 / gamma)));
|
||||
}
|
||||
|
||||
static double calculate_gamma_rows_from_d(uint64_t d)
|
||||
{
|
||||
// -> reverse calculate error probability from Rows / number of hash functions
|
||||
return 1.0 / std::exp(d);
|
||||
}
|
||||
|
||||
static uint64_t calculate_w_cols_buckets_from_eps(double eps)
|
||||
{
|
||||
// -> determine Cols / number of buckets
|
||||
return static_cast<uint64_t>(std::ceil(std::exp(1) / eps));
|
||||
}
|
||||
|
||||
static double calculate_eps_cols_buckets_from_w(uint64_t w)
|
||||
{
|
||||
// -> reverse calculate relative error from Cols / number of buckets
|
||||
return std::exp(1) / w;
|
||||
}
|
||||
|
||||
cms(double gamma, double eps)
|
||||
{
|
||||
d_ = calculate_d_rows_from_gamma(gamma); // -> determine Rows / number of hash functions
|
||||
w_ = calculate_w_cols_buckets_from_eps(eps); // -> determine Cols / number of buckets
|
||||
gamma_ = gamma;
|
||||
eps_ = eps;
|
||||
sketch = std::make_unique<std::unique_ptr<T[]>[]>(d_);
|
||||
for (uint64_t i = 0; i < d_; ++i)
|
||||
{
|
||||
sketch[i] = std::make_unique<T[]>(w_);
|
||||
std::fill(sketch[i].get(), sketch[i].get() + w_, static_cast<T>(0)); // Init to 0
|
||||
}
|
||||
}
|
||||
|
||||
// Overloaded constructor
|
||||
cms(uint64_t d, uint64_t w)
|
||||
{
|
||||
d_ = d;
|
||||
w_ = w;
|
||||
gamma_ = calculate_gamma_rows_from_d(d); // -> reverse calculate error probability from Rows / number of hash functions
|
||||
eps_ = calculate_eps_cols_buckets_from_w(w); // -> reverse calculate relative error from Cols / number of buckets
|
||||
sketch = std::make_unique<std::unique_ptr<T[]>[]>(d_);
|
||||
for (uint64_t i = 0; i < d_; ++i)
|
||||
{
|
||||
sketch[i] = std::make_unique<T[]>(w_);
|
||||
std::fill(sketch[i].get(), sketch[i].get() + w_, static_cast<T>(0)); // Init to 0
|
||||
}
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
// Reset data structure
|
||||
for (uint64_t i = 0; i < d_; ++i)
|
||||
{
|
||||
std::fill(sketch[i].get(), sketch[i].get() + w_, static_cast<T>(0));
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t hash_XXH3_seed(std::string value, uint64_t seed) const
|
||||
{
|
||||
// using https://raw.githubusercontent.com/Cyan4973/xxHash/v0.8.2/xxhash.h
|
||||
// Requirement: Need fast and reliable independent hash functions.
|
||||
uint64_t hash = XXH3_64bits_withSeed(value.c_str(), value.size(), seed);
|
||||
return hash;
|
||||
}
|
||||
|
||||
void update(std::string value, T count)
|
||||
{
|
||||
if (value.empty())
|
||||
{
|
||||
return;
|
||||
}
|
||||
// Update counts for each hash function.
|
||||
// Note: d is typically very small (e.g. < 10)
|
||||
for (uint64_t seed = 0; seed < d_; ++seed)
|
||||
{
|
||||
// Map the hash value to an index of the current sketch Row by taking the modulo of the hash value, where w is the number of buckets.
|
||||
// Simply loop over d, which is the number of hash functions, to obtain a seed in order to use independent hash functions for each Row.
|
||||
sketch[seed][hash_XXH3_seed(value, seed) % w_] += count;
|
||||
}
|
||||
}
|
||||
|
||||
T update_estimate(std::string value, T count) const
|
||||
{
|
||||
if (value.empty())
|
||||
{
|
||||
return T();
|
||||
}
|
||||
std::vector<T> estimates;
|
||||
// Same as the update function, but also returns the minimum count as an estimate.
|
||||
// Note: d is typically very small (e.g. < 10)
|
||||
for (uint64_t seed = 0; seed < d_; ++seed)
|
||||
{
|
||||
uint64_t index = hash_XXH3_seed(value, seed) % w_;
|
||||
sketch[seed][index] += count;
|
||||
estimates.push_back(sketch[seed][index]);
|
||||
}
|
||||
auto min_element = std::min_element(estimates.begin(), estimates.end());
|
||||
return min_element != estimates.end() ? *min_element : T();
|
||||
}
|
||||
|
||||
T estimate(std::string value) const
|
||||
{
|
||||
if (value.empty())
|
||||
{
|
||||
return T();
|
||||
}
|
||||
std::vector<T> estimates;
|
||||
// Return the minimum count across hash functions as an estimate.
|
||||
// Note: d is typically very small (e.g. < 10)
|
||||
for (uint64_t seed = 0; seed < d_; ++seed)
|
||||
{
|
||||
uint64_t index = hash_XXH3_seed(value, seed) % w_;
|
||||
estimates.push_back(sketch[seed][index]);
|
||||
}
|
||||
auto min_element = std::min_element(estimates.begin(), estimates.end());
|
||||
return min_element != estimates.end() ? *min_element : T();
|
||||
}
|
||||
|
||||
T get_item(uint64_t row, uint64_t col) const
|
||||
{
|
||||
if (row >= 0 && row < d_ && col >= 0 && col < w_)
|
||||
{
|
||||
return sketch[row][col];
|
||||
} else
|
||||
{
|
||||
return T();
|
||||
}
|
||||
}
|
||||
|
||||
size_t get_size_bytes() const
|
||||
{
|
||||
return d_ * w_ * sizeof(T);
|
||||
}
|
||||
|
||||
static size_t get_size_bytes(uint64_t d, uint64_t w)
|
||||
{
|
||||
return d * w * sizeof(T);
|
||||
}
|
||||
|
||||
std::pair<uint64_t, uint64_t> get_dimensions() const
|
||||
{
|
||||
return std::make_pair(d_, w_);
|
||||
}
|
||||
|
||||
// Return Rows / number of hash functions
|
||||
uint64_t get_d() const
|
||||
{
|
||||
return d_;
|
||||
}
|
||||
|
||||
// Return Cols / number of buckets
|
||||
uint64_t get_w() const
|
||||
{
|
||||
return w_;
|
||||
}
|
||||
|
||||
// Return error probability
|
||||
double get_gamma() const
|
||||
{
|
||||
return gamma_;
|
||||
}
|
||||
|
||||
// Return relative error
|
||||
double get_eps() const
|
||||
{
|
||||
return eps_;
|
||||
}
|
||||
|
||||
cms(cms&&) noexcept = default;
|
||||
cms(const cms&) = default;
|
||||
cms& operator=(cms&&) noexcept = default;
|
||||
cms& operator=(const cms&) = default;
|
||||
cms() = delete;
|
||||
};
|
||||
|
||||
} // namespace plugin::anomalydetection::num
|
|
@ -0,0 +1,19 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#define XXH_INLINE_ALL
|
||||
#include <xxhash.h>
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,228 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "num/cms.h"
|
||||
#include "plugin_consts.h"
|
||||
#include "plugin_utils.h"
|
||||
#include "plugin_mutex.h"
|
||||
#include "plugin_thread_manager.h"
|
||||
#include "plugin_sinsp_filterchecks.h"
|
||||
|
||||
#include <falcosecurity/sdk.h>
|
||||
#include <driver/ppm_events_public.h> // Temporary workaround to avoid redefining syscalls PPME events and risking being out of sync
|
||||
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <sstream>
|
||||
|
||||
#define UINT32_MAX (4294967295U)
|
||||
#define PPM_AT_FDCWD -100
|
||||
#define SECOND_TO_NS 1000000000ULL
|
||||
|
||||
struct sinsp_param
|
||||
{
|
||||
uint16_t param_len;
|
||||
uint8_t* param_pointer;
|
||||
};
|
||||
|
||||
class anomalydetection
|
||||
{
|
||||
public:
|
||||
anomalydetection() : m_thread_manager() {}
|
||||
|
||||
// Keep this aligned with `get_fields`
|
||||
enum anomalydetection_fields
|
||||
{
|
||||
ANOMALYDETECTION_COUNT_MIN_SKETCH_COUNT = 0,
|
||||
ANOMALYDETECTION_COUNT_MIN_SKETCH_BEHAVIOR_PROFILE_CONCAT_STR,
|
||||
ANOMALYDETECTION_FALCO_DURATION_NS,
|
||||
ANOMALYDETECTION_FIELD_MAX
|
||||
};
|
||||
|
||||
//////////////////////////
|
||||
// General plugin API
|
||||
//////////////////////////
|
||||
|
||||
virtual ~anomalydetection() = default;
|
||||
|
||||
std::string get_name() { return PLUGIN_NAME; }
|
||||
|
||||
std::string get_version() { return PLUGIN_VERSION; }
|
||||
|
||||
std::string get_description() { return PLUGIN_DESCRIPTION; }
|
||||
|
||||
std::string get_contact() { return PLUGIN_CONTACT; }
|
||||
|
||||
std::string get_required_api_version()
|
||||
{
|
||||
return PLUGIN_REQUIRED_API_VERSION;
|
||||
}
|
||||
|
||||
falcosecurity::init_schema get_init_schema();
|
||||
|
||||
void parse_init_config(nlohmann::json& config_json);
|
||||
|
||||
bool init(falcosecurity::init_input& in);
|
||||
|
||||
// todo
|
||||
// void destroy();
|
||||
|
||||
std::string get_last_error() { return m_lasterr; }
|
||||
|
||||
static void log_error(std::string err_mess);
|
||||
|
||||
//////////////////////////
|
||||
// Extract capability
|
||||
//////////////////////////
|
||||
|
||||
// required; standard plugin API
|
||||
std::vector<std::string> get_extract_event_sources()
|
||||
{
|
||||
return {"syscall"};
|
||||
}
|
||||
|
||||
// required; standard plugin API
|
||||
std::vector<falcosecurity::field_info> get_fields();
|
||||
|
||||
// required; standard plugin API
|
||||
bool extract(const falcosecurity::extract_fields_input& in);
|
||||
|
||||
//////////////////////////
|
||||
// Parse capability
|
||||
//////////////////////////
|
||||
|
||||
// required; standard plugin API
|
||||
std::vector<std::string> get_parse_event_sources()
|
||||
{
|
||||
return {"syscall"};
|
||||
}
|
||||
|
||||
// required; standard plugin API
|
||||
std::vector<falcosecurity::event_type> get_parse_event_types()
|
||||
{
|
||||
std::vector<falcosecurity::event_type> event_types;
|
||||
// Temporary workaround
|
||||
for (int i = PPME_GENERIC_E; i <= PPM_EVENT_MAX; ++i)
|
||||
{
|
||||
event_types.push_back(static_cast<falcosecurity::event_type>(i));
|
||||
}
|
||||
return event_types;
|
||||
}
|
||||
|
||||
// required; standard plugin API
|
||||
bool parse_event(const falcosecurity::parse_event_input& in);
|
||||
|
||||
// Custom helper functions within event parsing
|
||||
bool extract_filterchecks_concat_profile(const falcosecurity::event_reader &evt, const falcosecurity::table_reader &tr, const std::vector<plugin_sinsp_filterchecks_field>& fields, std::string& behavior_profile_concat_str);
|
||||
std::string extract_filterchecks_evt_params_fallbacks(const falcosecurity::event_reader &evt, const plugin_sinsp_filterchecks_field& field, const std::string& cwd = "");
|
||||
|
||||
private:
|
||||
|
||||
// Manages plugin side threads, such as resetting the count min sketch data structures
|
||||
ThreadManager m_thread_manager;
|
||||
|
||||
// Epoch of Falco agent run start, re-creates libs agent_info->start_ts_epoch info
|
||||
uint64_t m_falco_start_ts_epoch_ns;
|
||||
|
||||
/* Note: While we have set the stage for supporting multiple algorithms in this plugin,
|
||||
the class is currently designed with direct members specific to the count_min_sketch use case.
|
||||
This will be refactored and refined in the future.
|
||||
*/
|
||||
bool m_count_min_sketch_enabled = false;
|
||||
uint32_t m_n_sketches = 0;
|
||||
std::vector<std::vector<double>> m_gamma_eps;
|
||||
std::vector<std::vector<uint64_t>> m_rows_cols; // If set supersedes m_gamma_eps
|
||||
std::vector<std::vector<plugin_sinsp_filterchecks_field>> m_behavior_profiles_fields;
|
||||
std::vector<std::unordered_set<ppm_event_code>> m_behavior_profiles_event_codes;
|
||||
std::vector<uint64_t> m_reset_timers;
|
||||
|
||||
// Plugin managed state table specific to the count_min_sketch use case
|
||||
plugin_anomalydetection::Mutex<std::vector<std::shared_ptr<plugin::anomalydetection::num::cms<uint64_t>>>> m_count_min_sketches;
|
||||
|
||||
// required; standard plugin API
|
||||
std::string m_lasterr;
|
||||
// required; standard plugin API; accessor to falcosecurity/libs' thread table
|
||||
falcosecurity::table m_thread_table;
|
||||
|
||||
/* Subtables */
|
||||
falcosecurity::table_field m_args; ///< args subtable
|
||||
falcosecurity::table_field m_env; ///< env variables subtable
|
||||
falcosecurity::table_field m_fds; ///< fd subtable
|
||||
|
||||
/* proc related */
|
||||
falcosecurity::table_field m_tid; ///< The id of this thread
|
||||
falcosecurity::table_field m_pid; ///< The id of the process containing this thread. In single thread threads, this is equal to tid.
|
||||
falcosecurity::table_field m_ptid; ///< The id of the process that started this thread.
|
||||
falcosecurity::table_field m_sid; ///< The session id of the process containing this thread.
|
||||
falcosecurity::table_field m_comm; ///< Command name (e.g. "top")
|
||||
falcosecurity::table_field m_exe; ///< argv[0] (e.g. "sshd: user@pts/4")
|
||||
falcosecurity::table_field m_exepath; ///< full executable path
|
||||
falcosecurity::table_field m_exe_writable;
|
||||
falcosecurity::table_field m_exe_upper_layer; ///< True if the executable file belongs to upper layer in overlayfs
|
||||
falcosecurity::table_field m_exe_from_memfd; ///< True if the executable is stored in fileless memory referenced by memfd
|
||||
falcosecurity::table_field m_exe_ino;
|
||||
falcosecurity::table_field m_exe_ino_ctime;
|
||||
falcosecurity::table_field m_exe_ino_mtime;
|
||||
// falcosecurity::table_field m_cap_permitted; // todo fix/expose via plugin API
|
||||
// falcosecurity::table_field m_cap_inheritable; // todo fix/expose via plugin API
|
||||
// falcosecurity::table_field m_cap_effective; // todo fix/expose via plugin API
|
||||
falcosecurity::table_field m_args_value; ///< Value entry to command line arguments (e.g. "-d1") from the args array
|
||||
falcosecurity::table_field m_env_value; ///< Value entry
|
||||
falcosecurity::table_field m_group; ///< group infos
|
||||
falcosecurity::table_field m_vtid; ///< The virtual id of this thread.
|
||||
falcosecurity::table_field m_vpid; ///< The virtual id of the process containing this thread. In single thread threads, this is equal to vtid.
|
||||
falcosecurity::table_field m_vpgid; // The virtual process group id, as seen from its pid namespace
|
||||
falcosecurity::table_field m_tty; ///< Number of controlling terminal
|
||||
falcosecurity::table_field m_cwd; ///< current working directory
|
||||
|
||||
/* user related */
|
||||
// Not available until the next libs plugins API expansion
|
||||
// falcosecurity::table_field m_uid; ///< user uid
|
||||
// falcosecurity::table_field m_user; ///< user infos
|
||||
// falcosecurity::table_field m_loginuid; ///< auid
|
||||
// falcosecurity::table_field m_loginuser; ///< loginuser infos (auid)
|
||||
|
||||
/* fd related */
|
||||
// falcosecurity::table_field m_fd_type_value; // todo fix/expose via plugin API
|
||||
falcosecurity::table_field m_fd_openflags_value;
|
||||
// falcosecurity::table_field m_fd_sockinfo_value; // todo fix/expose via plugin API
|
||||
falcosecurity::table_field m_fd_name_value;
|
||||
falcosecurity::table_field m_fd_nameraw_value;
|
||||
falcosecurity::table_field m_fd_oldname_value;
|
||||
falcosecurity::table_field m_fd_flags_value;
|
||||
falcosecurity::table_field m_fd_dev_value;
|
||||
falcosecurity::table_field m_fd_mount_id_value;
|
||||
falcosecurity::table_field m_fd_ino_value;
|
||||
falcosecurity::table_field m_fd_pid_value;
|
||||
// falcosecurity::table_field m_fd_fd_value; // todo fix/expose via plugin API
|
||||
|
||||
/* container related */
|
||||
falcosecurity::table_field m_container_id; ///< heuristic-based container id
|
||||
|
||||
/* Custom write/read fields*/
|
||||
falcosecurity::table_field m_lastevent_fd_field; // todo fix/expose via plugin API
|
||||
};
|
||||
|
||||
// required; standard plugin API
|
||||
FALCOSECURITY_PLUGIN(anomalydetection);
|
||||
FALCOSECURITY_PLUGIN_FIELD_EXTRACTION(anomalydetection);
|
||||
FALCOSECURITY_PLUGIN_EVENT_PARSING(anomalydetection);
|
|
@ -0,0 +1,35 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/////////////////////////
|
||||
// Generic plugin consts
|
||||
/////////////////////////
|
||||
|
||||
#define PLUGIN_NAME "anomalydetection"
|
||||
#define PLUGIN_VERSION "0.1.0"
|
||||
#define PLUGIN_DESCRIPTION "Enhance {syscall} event analysis by incorporating anomaly detection estimates for probabilistic filtering."
|
||||
#define PLUGIN_CONTACT "github.com/falcosecurity/plugins"
|
||||
#define PLUGIN_REQUIRED_API_VERSION "3.6.0"
|
||||
#define PLUGIN_LOG_PREFIX "[anomalydetection]"
|
||||
|
||||
///////////////////////////
|
||||
// Thread Table (libsinsp)
|
||||
///////////////////////////
|
||||
|
||||
#define THREAD_TABLE_NAME "threads"
|
|
@ -0,0 +1,183 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
// Below adopted 1:1 from falcosecurity/libs repo for this plugin
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
namespace plugin_anomalydetection {
|
||||
template<typename T>
|
||||
class ConstMutexGuard;
|
||||
|
||||
/**
|
||||
* \brief A wrapper to allow synchronized access to a value owned by a Mutex<T>
|
||||
*
|
||||
* @tparam T type of the value protected by the mutex
|
||||
*
|
||||
* It works by simply holding a `std::unique_lock` object that keeps the mutex
|
||||
* locked while it exists and unlocks it upon destruction
|
||||
*/
|
||||
template<typename T>
|
||||
class MutexGuard {
|
||||
public:
|
||||
MutexGuard(std::unique_lock<std::mutex> lock, T *inner) : m_lock(std::move(lock)), m_inner(inner) {}
|
||||
|
||||
// we cannot copy a MutexGuard, only move
|
||||
MutexGuard(MutexGuard &rhs) = delete;
|
||||
MutexGuard& operator=(MutexGuard &rhs) = delete;
|
||||
MutexGuard(MutexGuard &&rhs) noexcept : m_lock(std::move(rhs.m_lock)),
|
||||
m_inner(rhs.m_inner) {}
|
||||
|
||||
T *operator->()
|
||||
{
|
||||
return m_inner;
|
||||
}
|
||||
|
||||
T &operator*()
|
||||
{
|
||||
return *m_inner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that the guarded object exists.
|
||||
*/
|
||||
bool valid()
|
||||
{
|
||||
return m_inner != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_lock<std::mutex> m_lock;
|
||||
T *m_inner;
|
||||
|
||||
friend class ConstMutexGuard<T>;
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief A wrapper to allow synchronized const access to a value owned by a Mutex<T>
|
||||
*
|
||||
* @tparam T type of the value protected by the mutex
|
||||
*
|
||||
* It works by simply holding a `std::unique_lock` object that keeps the mutex
|
||||
* locked while it exists and unlocks it upon destruction
|
||||
*/
|
||||
template<typename T>
|
||||
class ConstMutexGuard {
|
||||
public:
|
||||
ConstMutexGuard(std::unique_lock<std::mutex> lock, const T *inner) : m_lock(std::move(lock)),
|
||||
m_inner(inner) {
|
||||
}
|
||||
|
||||
// we cannot copy a ConstMutexGuard, only move
|
||||
ConstMutexGuard(ConstMutexGuard &rhs) = delete;
|
||||
ConstMutexGuard& operator=(ConstMutexGuard &rhs) = delete;
|
||||
ConstMutexGuard(ConstMutexGuard &&rhs) noexcept : m_lock(std::move(rhs.m_lock)),
|
||||
m_inner(rhs.m_inner) {}
|
||||
|
||||
// a writable guard can be demoted to a read-only one, but *not* the other way around
|
||||
ConstMutexGuard(MutexGuard<T> &&rhs) noexcept : m_lock(std::move(rhs.m_lock)),
|
||||
m_inner(rhs.m_inner) // NOLINT(google-explicit-constructor)
|
||||
{}
|
||||
|
||||
const T *operator->() const
|
||||
{
|
||||
return m_inner;
|
||||
}
|
||||
|
||||
const T &operator*() const
|
||||
{
|
||||
return *m_inner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that the guarded object exists.
|
||||
*/
|
||||
bool valid()
|
||||
{
|
||||
return m_inner != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_lock<std::mutex> m_lock;
|
||||
const T *m_inner;
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief Wrap a value of type T, enforcing synchronized access
|
||||
*
|
||||
* @tparam T type of the wrapped value
|
||||
*
|
||||
* The class owns a value of type T and a mutex. The only way to access the T inside
|
||||
* is via the lock() method, which returns a guard object that unlocks the mutex
|
||||
* once it falls out of scope
|
||||
*
|
||||
* To protect an object with a mutex, declare a variable of type `Mutex<T>`, e.g.
|
||||
*
|
||||
* Mutex<std::vector<int>> m_locked_vector;
|
||||
*
|
||||
* Then, to access the variable, call .lock() on the Mutex object:
|
||||
*
|
||||
* MutexGuard<std::vector<int>> locked = m_locked_vector.lock();
|
||||
*
|
||||
* Now you can call the inner object's methods directly on the guard object,
|
||||
* which behaves like a smart pointer to the inner object:
|
||||
*
|
||||
* size_t num_elts = locked->size();
|
||||
*
|
||||
*/
|
||||
template<typename T>
|
||||
class Mutex {
|
||||
public:
|
||||
Mutex() = default;
|
||||
|
||||
Mutex(T inner) : m_inner(std::move(inner)) {}
|
||||
|
||||
/**
|
||||
* \brief Lock the mutex, allowing access to the stored object
|
||||
*
|
||||
* The returned guard object allows access to the protected data
|
||||
* via operator * or -> and ensures the lock is held as long as
|
||||
* the guard object exists
|
||||
*/
|
||||
MutexGuard<T> lock()
|
||||
{
|
||||
return MutexGuard<T>(std::unique_lock<std::mutex>(m_lock), &m_inner);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Lock the mutex, allowing access to the stored object
|
||||
*
|
||||
* The returned guard object allows access to the protected data
|
||||
* via operator * or -> and ensures the lock is held as long as
|
||||
* the guard object exists
|
||||
*
|
||||
* `const Mutex<T>` only allows read-only access to the protected object
|
||||
*/
|
||||
ConstMutexGuard<T> lock() const
|
||||
{
|
||||
return ConstMutexGuard<T>(std::unique_lock<std::mutex>(m_lock), &m_inner);
|
||||
}
|
||||
|
||||
private:
|
||||
mutable std::mutex m_lock;
|
||||
T m_inner;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,274 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <falcosecurity/sdk.h>
|
||||
#include <driver/ppm_events_public.h> // Temporary workaround
|
||||
|
||||
namespace plugin_sinsp_filterchecks
|
||||
{
|
||||
enum check_type
|
||||
{
|
||||
TYPE_EXE = 0,
|
||||
TYPE_PEXE,
|
||||
TYPE_AEXE,
|
||||
TYPE_EXEPATH,
|
||||
TYPE_PEXEPATH,
|
||||
TYPE_AEXEPATH,
|
||||
TYPE_NAME,
|
||||
TYPE_PNAME,
|
||||
TYPE_ANAME,
|
||||
TYPE_ARGS,
|
||||
TYPE_CMDLINE,
|
||||
TYPE_PCMDLINE,
|
||||
TYPE_ACMDLINE,
|
||||
TYPE_CMDNARGS,
|
||||
TYPE_CMDLENARGS,
|
||||
TYPE_EXELINE,
|
||||
TYPE_ENV,
|
||||
TYPE_AENV,
|
||||
TYPE_CWD,
|
||||
TYPE_LOGINSHELLID,
|
||||
TYPE_TTY,
|
||||
TYPE_PID,
|
||||
TYPE_PPID,
|
||||
TYPE_APID,
|
||||
TYPE_VPID,
|
||||
TYPE_PVPID,
|
||||
TYPE_SID,
|
||||
TYPE_SNAME,
|
||||
TYPE_SID_EXE,
|
||||
TYPE_SID_EXEPATH,
|
||||
TYPE_VPGID,
|
||||
TYPE_VPGID_NAME,
|
||||
TYPE_VPGID_EXE,
|
||||
TYPE_VPGID_EXEPATH,
|
||||
TYPE_DURATION,
|
||||
TYPE_PPID_DURATION,
|
||||
TYPE_PID_CLONE_TS,
|
||||
TYPE_PPID_CLONE_TS,
|
||||
TYPE_IS_EXE_WRITABLE,
|
||||
TYPE_IS_EXE_UPPER_LAYER,
|
||||
TYPE_IS_EXE_FROM_MEMFD,
|
||||
TYPE_IS_SID_LEADER,
|
||||
TYPE_IS_VPGID_LEADER,
|
||||
TYPE_EXE_INO,
|
||||
TYPE_EXE_INO_CTIME,
|
||||
TYPE_EXE_INO_MTIME,
|
||||
TYPE_EXE_INO_CTIME_DURATION_CLONE_TS,
|
||||
TYPE_EXE_INO_CTIME_DURATION_PIDNS_START,
|
||||
TYPE_PIDNS_INIT_START_TS,
|
||||
TYPE_CAP_PERMITTED,
|
||||
TYPE_CAP_INHERITABLE,
|
||||
TYPE_CAP_EFFECTIVE,
|
||||
TYPE_IS_CONTAINER_HEALTHCHECK,
|
||||
TYPE_IS_CONTAINER_LIVENESS_PROBE,
|
||||
TYPE_IS_CONTAINER_READINESS_PROBE,
|
||||
TYPE_FDOPENCOUNT,
|
||||
TYPE_FDLIMIT,
|
||||
TYPE_FDUSAGE,
|
||||
TYPE_VMSIZE,
|
||||
TYPE_VMRSS,
|
||||
TYPE_VMSWAP,
|
||||
TYPE_PFMAJOR,
|
||||
TYPE_PFMINOR,
|
||||
TYPE_TID,
|
||||
TYPE_ISMAINTHREAD,
|
||||
TYPE_VTID,
|
||||
TYPE_NAMETID,
|
||||
TYPE_EXECTIME,
|
||||
TYPE_TOTEXECTIME,
|
||||
TYPE_CGROUPS,
|
||||
TYPE_CGROUP,
|
||||
TYPE_NTHREADS,
|
||||
TYPE_NCHILDS,
|
||||
TYPE_THREAD_CPU,
|
||||
TYPE_THREAD_CPU_USER,
|
||||
TYPE_THREAD_CPU_SYSTEM,
|
||||
TYPE_THREAD_VMSIZE,
|
||||
TYPE_THREAD_VMRSS,
|
||||
TYPE_THREAD_VMSIZE_B,
|
||||
TYPE_THREAD_VMRSS_B,
|
||||
TYPE_CONTAINER_ID,
|
||||
TYPE_CONTAINER_FULL_CONTAINER_ID,
|
||||
TYPE_CONTAINER_NAME,
|
||||
TYPE_CONTAINER_IMAGE,
|
||||
TYPE_CONTAINER_IMAGE_ID,
|
||||
TYPE_CONTAINER_TYPE,
|
||||
TYPE_CONTAINER_PRIVILEGED,
|
||||
TYPE_CONTAINER_MOUNTS,
|
||||
TYPE_CONTAINER_MOUNT,
|
||||
TYPE_CONTAINER_MOUNT_SOURCE,
|
||||
TYPE_CONTAINER_MOUNT_DEST,
|
||||
TYPE_CONTAINER_MOUNT_MODE,
|
||||
TYPE_CONTAINER_MOUNT_RDWR,
|
||||
TYPE_CONTAINER_MOUNT_PROPAGATION,
|
||||
TYPE_CONTAINER_IMAGE_REPOSITORY,
|
||||
TYPE_CONTAINER_IMAGE_TAG,
|
||||
TYPE_CONTAINER_IMAGE_DIGEST,
|
||||
TYPE_CONTAINER_HEALTHCHECK,
|
||||
TYPE_CONTAINER_LIVENESS_PROBE,
|
||||
TYPE_CONTAINER_READINESS_PROBE,
|
||||
TYPE_CONTAINER_START_TS,
|
||||
TYPE_CONTAINER_DURATION,
|
||||
TYPE_CONTAINER_IP_ADDR,
|
||||
TYPE_CONTAINER_CNIRESULT,
|
||||
TYPE_FDNUM,
|
||||
TYPE_FDTYPE,
|
||||
TYPE_FDTYPECHAR,
|
||||
TYPE_FDNAME,
|
||||
TYPE_DIRECTORY,
|
||||
TYPE_FILENAME,
|
||||
TYPE_IP,
|
||||
TYPE_CLIENTIP,
|
||||
TYPE_SERVERIP,
|
||||
TYPE_LIP,
|
||||
TYPE_RIP,
|
||||
TYPE_PORT,
|
||||
TYPE_CLIENTPORT,
|
||||
TYPE_SERVERPORT,
|
||||
TYPE_LPORT,
|
||||
TYPE_RPORT,
|
||||
TYPE_L4PROTO,
|
||||
TYPE_SOCKFAMILY,
|
||||
TYPE_IS_SERVER,
|
||||
TYPE_UID,
|
||||
TYPE_CONTAINERNAME,
|
||||
TYPE_CONTAINERDIRECTORY,
|
||||
TYPE_PROTO,
|
||||
TYPE_CLIENTPROTO,
|
||||
TYPE_SERVERPROTO,
|
||||
TYPE_LPROTO,
|
||||
TYPE_RPROTO,
|
||||
TYPE_NET,
|
||||
TYPE_CNET,
|
||||
TYPE_SNET,
|
||||
TYPE_LNET,
|
||||
TYPE_RNET,
|
||||
TYPE_IS_CONNECTED,
|
||||
TYPE_NAME_CHANGED,
|
||||
TYPE_CLIENTIP_NAME,
|
||||
TYPE_SERVERIP_NAME,
|
||||
TYPE_LIP_NAME,
|
||||
TYPE_RIP_NAME,
|
||||
TYPE_DEV,
|
||||
TYPE_DEV_MAJOR,
|
||||
TYPE_DEV_MINOR,
|
||||
TYPE_INO,
|
||||
TYPE_FDNAMERAW,
|
||||
TYPE_FDTYPES,
|
||||
TYPE_FSPATH_NAME,
|
||||
TYPE_FSPATH_NAMERAW,
|
||||
TYPE_FSPATH_SOURCE,
|
||||
TYPE_FSPATH_SOURCERAW,
|
||||
TYPE_FSPATH_TARGET,
|
||||
TYPE_FSPATH_TARGETRAW,
|
||||
TYPE_CUSTOM_ANAME_LINEAGE_CONCAT,
|
||||
TYPE_CUSTOM_AEXE_LINEAGE_CONCAT,
|
||||
TYPE_CUSTOM_AEXEPATH_LINEAGE_CONCAT,
|
||||
TYPE_CUSTOM_FDNAME_PART1,
|
||||
TYPE_CUSTOM_FDNAME_PART2,
|
||||
};
|
||||
}
|
||||
|
||||
// Below copied from falcosecurity/libs userspace/libsinsp/event.h
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Event arguments
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
enum filtercheck_field_flags
|
||||
{
|
||||
EPF_NONE = 0,
|
||||
EPF_FILTER_ONLY = 1 << 0, ///< this field can only be used as a filter.
|
||||
EPF_PRINT_ONLY = 1 << 1, ///< this field can only be printed.
|
||||
EPF_ARG_REQUIRED = 1 << 2, ///< this field includes an argument, under the form 'property.argument'.
|
||||
EPF_TABLE_ONLY = 1 << 3, ///< this field is designed to be used in a table and won't appear in the field listing.
|
||||
EPF_INFO = 1 << 4, ///< this field contains summary information about the event.
|
||||
EPF_CONVERSATION = 1 << 5, ///< this field can be used to identify conversations.
|
||||
EPF_IS_LIST = 1 << 6, ///< this field is a list of values.
|
||||
EPF_ARG_ALLOWED = 1 << 7, ///< this field optionally includes an argument.
|
||||
EPF_ARG_INDEX = 1 << 8, ///< this field accepts numeric arguments.
|
||||
EPF_ARG_KEY = 1 << 9, ///< this field accepts string arguments.
|
||||
EPF_DEPRECATED = 1 << 10,///< this field is deprecated.
|
||||
EPF_NO_TRANSFORMER = 1 << 11,///< this field cannot have a field transformer.
|
||||
EPF_NO_RHS = 1 << 12,///< this field cannot have a right-hand side filter check, and cannot be used as a right-hand side filter check.
|
||||
// Custom below
|
||||
EPF_ANOMALY_PLUGIN = 1 << 13,///< this field is supported by the anomalydetection plugin
|
||||
};
|
||||
|
||||
// Below copied from falcosecurity/libs userspace/libsinsp/sinsp_filtercheck.h
|
||||
/*!
|
||||
\brief Information about a filter/formatting field.
|
||||
*/
|
||||
struct filtercheck_field_info
|
||||
{
|
||||
ppm_param_type m_type = PT_NONE; ///< Field type.
|
||||
uint32_t m_flags = 0; ///< Field flags.
|
||||
ppm_print_format m_print_format = PF_NA; ///< If this is a numeric field, this flag specifies if it should be rendered as octal, decimal or hex.
|
||||
char m_name[64]; ///< Field name.
|
||||
char m_display[64]; ///< Field display name (short description). May be empty.
|
||||
char m_description[1024]; ///< Field description.
|
||||
|
||||
//
|
||||
// Return true if this field must have an argument
|
||||
//
|
||||
inline bool is_arg_required() const
|
||||
{
|
||||
return m_flags & EPF_ARG_REQUIRED;
|
||||
}
|
||||
|
||||
//
|
||||
// Return true if this field can optionally have an argument
|
||||
//
|
||||
inline bool is_arg_allowed() const
|
||||
{
|
||||
return m_flags & EPF_ARG_REQUIRED;
|
||||
}
|
||||
|
||||
//
|
||||
// Returns true if this field can have an argument, either
|
||||
// optionally or mandatorily
|
||||
//
|
||||
inline bool is_arg_supported() const
|
||||
{
|
||||
return (m_flags & EPF_ARG_REQUIRED) ||(m_flags & EPF_ARG_ALLOWED);
|
||||
}
|
||||
|
||||
//
|
||||
// Returns true if this field is a list of values
|
||||
//
|
||||
inline bool is_list() const
|
||||
{
|
||||
return m_flags & EPF_IS_LIST;
|
||||
}
|
||||
|
||||
//
|
||||
// Returns true if this filter check can support a rhs filter check instead of a const value.
|
||||
//
|
||||
inline bool is_rhs_field_supported() const
|
||||
{
|
||||
return !(m_flags & EPF_NO_RHS);
|
||||
}
|
||||
|
||||
//
|
||||
// Returns true if this filter check can support an extraction transformer on it.
|
||||
//
|
||||
inline bool is_transformer_supported() const
|
||||
{
|
||||
return !(m_flags & EPF_NO_TRANSFORMER);
|
||||
}
|
||||
};
|
|
@ -0,0 +1,117 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "num/cms.h"
|
||||
#include "plugin_mutex.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <atomic>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
class ThreadManager {
|
||||
public:
|
||||
ThreadManager() : m_stop_requested(false) {}
|
||||
|
||||
~ThreadManager()
|
||||
{
|
||||
stop_threads();
|
||||
}
|
||||
|
||||
void stop_threads()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_thread_mutex);
|
||||
m_stop_requested = true;
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_thread_mutex);
|
||||
for (auto& t : m_threads)
|
||||
{
|
||||
if (t.joinable())
|
||||
{
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
m_threads.clear();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void start_periodic_count_min_sketch_reset_worker(int id, uint64_t interval_ms, plugin_anomalydetection::Mutex<std::vector<std::shared_ptr<plugin::anomalydetection::num::cms<T>>>>& count_min_sketches)
|
||||
{
|
||||
if (interval_ms > 100)
|
||||
{
|
||||
auto worker = [id, interval_ms, &count_min_sketches, this]() {
|
||||
periodic_count_min_sketch_reset_worker<T>(id, interval_ms, count_min_sketches);
|
||||
};
|
||||
|
||||
std::thread worker_thread(worker);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_thread_mutex);
|
||||
m_threads.push_back(std::move(worker_thread));
|
||||
}
|
||||
}
|
||||
}
|
||||
std::atomic<bool> m_stop_requested;
|
||||
|
||||
private:
|
||||
std::vector<std::thread> m_threads;
|
||||
std::mutex m_thread_mutex;
|
||||
|
||||
template<typename T>
|
||||
void reset_sketches_worker(int id, plugin_anomalydetection::Mutex<std::vector<std::shared_ptr<plugin::anomalydetection::num::cms<T>>>>& count_min_sketches)
|
||||
{
|
||||
auto sketches = count_min_sketches.lock();
|
||||
if (id >= 0 && id < sketches->size())
|
||||
{
|
||||
auto& sketch_ptr = sketches->at(id);
|
||||
if (sketch_ptr)
|
||||
{
|
||||
sketch_ptr->reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void periodic_count_min_sketch_reset_worker(int id, uint64_t interval_ms, plugin_anomalydetection::Mutex<std::vector<std::shared_ptr<plugin::anomalydetection::num::cms<T>>>>& count_min_sketches)
|
||||
{
|
||||
std::chrono::milliseconds interval(interval_ms);
|
||||
while (true)
|
||||
{
|
||||
std::this_thread::sleep_for(interval);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_thread_mutex);
|
||||
if (m_stop_requested)
|
||||
break;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
reset_sketches_worker<T>(id, count_min_sketches);
|
||||
} catch (const std::exception& e)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
|
@ -0,0 +1,475 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#include "plugin_utils.h"
|
||||
|
||||
#define SCAP_MAX_PATH_SIZE 1024
|
||||
|
||||
// Copied from falcosecurity/libs and adjusted w/ EPF_ANOMALY_PLUGIN flag and extended via adding custom fields
|
||||
static const filtercheck_field_info sinsp_filter_check_fields[] =
|
||||
{
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.exe", "First Argument", "The first command-line argument (i.e., argv[0]), typically the executable name or a custom string as specified by the user. It is primarily obtained from syscall arguments, truncated after 4096 bytes, or, as a fallback, by reading /proc/PID/cmdline, in which case it may be truncated after 1024 bytes. This field may differ from the last component of proc.exepath, reflecting how command invocation and execution paths can vary."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.pexe", "Parent First Argument", "The proc.exe (first command line argument argv[0]) of the parent process."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "proc.aexe", "Ancestor First Argument", "The proc.exe (first command line argument argv[0]) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.aexe[1] retrieves the proc.exe of the parent process, proc.aexe[2] retrieves the proc.exe of the grandparent process, and so on. The current process's proc.exe line can be obtained using proc.aexe[0]. When used without any arguments, proc.aexe is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.aexe endswith java` to match any process ancestor whose proc.exe ends with the term `java`."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.exepath", "Process Executable Path", "The full executable path of a process, resolving to the canonical path for symlinks. This is primarily obtained from the kernel, or as a fallback, by reading /proc/PID/exe (in the latter case, the path is truncated after 1024 bytes). For eBPF drivers, due to verifier limits, path components may be truncated to 24 for legacy eBPF on kernel <5.2, 48 for legacy eBPF on kernel >=5.2, or 96 for modern eBPF."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.pexepath", "Parent Process Executable Path", "The proc.exepath (full executable path) of the parent process."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "proc.aexepath", "Ancestor Executable Path", "The proc.exepath (full executable path) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.aexepath[1] retrieves the proc.exepath of the parent process, proc.aexepath[2] retrieves the proc.exepath of the grandparent process, and so on. The current process's proc.exepath line can be obtained using proc.aexepath[0]. When used without any arguments, proc.aexepath is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.aexepath endswith java` to match any process ancestor whose path ends with the term `java`."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.name", "Name", "The process name (truncated after 16 characters) generating the event (task->comm). Truncation is determined by kernel settings and not by Falco. This field is collected from the syscalls args or, as a fallback, extracted from /proc/PID/status. The name of the process and the name of the executable file on disk (if applicable) can be different if a process is given a custom name which is often the case for example for java applications."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.pname", "Parent Name", "The proc.name truncated after 16 characters) of the process generating the event."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "proc.aname", "Ancestor Name", "The proc.name (truncated after 16 characters) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.aname[1] retrieves the proc.name of the parent process, proc.aname[2] retrieves the proc.name of the grandparent process, and so on. The current process's proc.name line can be obtained using proc.aname[0]. When used without any arguments, proc.aname is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.aname=bash` to match any process ancestor whose name is `bash`."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.args", "Arguments", "The arguments passed on the command line when starting the process generating the event excluding argv[0] (truncated after 4096 bytes). This field is collected from the syscalls args or, as a fallback, extracted from /proc/PID/cmdline."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.cmdline", "Command Line", "The concatenation of `proc.name + proc.args` (truncated after 4096 bytes) when starting the process generating the event."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.pcmdline", "Parent Command Line", "The proc.cmdline (full command line (proc.name + proc.args)) of the parent of the process generating the event."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "proc.acmdline", "Ancestor Command Line", "The full command line (proc.name + proc.args) for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.acmdline[1] retrieves the full command line of the parent process, proc.acmdline[2] retrieves the proc.cmdline of the grandparent process, and so on. The current process's full command line can be obtained using proc.acmdline[0]. When used without any arguments, proc.acmdline is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.acmdline contains base64` to match any process ancestor whose command line contains the term base64."},
|
||||
{PT_UINT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_DEC, "proc.cmdnargs", "Number of Command Line args", "The number of command line args (proc.args)."},
|
||||
{PT_UINT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_DEC, "proc.cmdlenargs", "Total Count of Characters in Command Line args", "The total count of characters / length of the command line args (proc.args) combined excluding whitespaces between args."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.exeline", "Executable Command Line", "The full command line, with exe as first argument (proc.exe + proc.args) when starting the process generating the event."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_ALLOWED, PF_NA, "proc.env", "Environment", "The environment variables of the process generating the event as concatenated string 'ENV_NAME=value ENV_NAME1=value1'. Can also be used to extract the value of a known env variable, e.g. proc.env[ENV_NAME]."},
|
||||
{PT_CHARBUF, EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "proc.aenv", "Ancestor Environment", "[EXPERIMENTAL] This field can be used in three flavors: (1) as a filter checking all parents, e.g. 'proc.aenv contains xyz', which is similar to the familiar 'proc.aname contains xyz' approach, (2) checking the `proc.env` of a specified level of the parent, e.g. 'proc.aenv[2]', which is similar to the familiar 'proc.aname[2]' approach, or (3) checking the first matched value of a known ENV_NAME in the parent lineage, such as 'proc.aenv[ENV_NAME]' (across a max of 20 ancestor levels). This field may be deprecated or undergo breaking changes in future releases. Please use it with caution."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.cwd", "Current Working Directory", "The current working directory of the event."},
|
||||
{PT_INT64, EPF_NONE, PF_ID, "proc.loginshellid", "Login Shell ID", "The pid of the oldest shell among the ancestors of the current process, if there is one. This field can be used to separate different user sessions."},
|
||||
{PT_UINT32, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.tty", "Process TTY", "The controlling terminal of the process. 0 for processes without a terminal."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.pid", "Process ID", "The id of the process generating the event."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.ppid", "Parent Process ID", "The pid of the parent of the process generating the event."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_ID, "proc.apid", "Ancestor Process ID", "The pid for a specific process ancestor. You can access different levels of ancestors by using indices. For example, proc.apid[1] retrieves the pid of the parent process, proc.apid[2] retrieves the pid of the grandparent process, and so on. The current process's pid can be obtained using proc.apid[0]. When used without any arguments, proc.apid is applicable only in filters and matches any of the process ancestors. For instance, you can use `proc.apid=1337` to match any process ancestor whose pid is equal to 1337."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.vpid", "Virtual Process ID", "The id of the process generating the event as seen from its current PID namespace."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.pvpid", "Parent Virtual Process ID", "The id of the parent process generating the event as seen from its current PID namespace."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.sid", "Process Session ID", "The session id of the process generating the event."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.sname", "Process Session Name", "The name of the current process's session leader. This is either the process with pid=proc.sid or the eldest ancestor that has the same sid as the current process."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.sid.exe", "Process Session First Argument", "The first command line argument argv[0] (usually the executable name or a custom one) of the current process's session leader. This is either the process with pid=proc.sid or the eldest ancestor that has the same sid as the current process."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.sid.exepath", "Process Session Executable Path", "The full executable path of the current process's session leader. This is either the process with pid=proc.sid or the eldest ancestor that has the same sid as the current process."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "proc.vpgid", "Process Virtual Group ID", "The process group id of the process generating the event, as seen from its current PID namespace."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.vpgid.name", "Process Group Name", "The name of the current process's process group leader. This is either the process with proc.vpgid == proc.vpid or the eldest ancestor that has the same vpgid as the current process. The description of `proc.is_vpgid_leader` offers additional insights."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.vpgid.exe", "Process Group First Argument", "The first command line argument argv[0] (usually the executable name or a custom one) of the current process's process group leader. This is either the process with proc.vpgid == proc.vpid or the eldest ancestor that has the same vpgid as the current process. The description of `proc.is_vpgid_leader` offers additional insights."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.vpgid.exepath", "Process Group Executable Path", "The full executable path of the current process's process group leader. This is either the process with proc.vpgid == proc.vpid or the eldest ancestor that has the same vpgid as the current process. The description of `proc.is_vpgid_leader` offers additional insights."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "proc.duration", "Process Duration", "Number of nanoseconds since the process started."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "proc.ppid.duration", "Parent Process Duration", "Number of nanoseconds since the parent process started."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "proc.pid.ts", "Process start ts", "Start of process as epoch timestamp in nanoseconds."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "proc.ppid.ts", "Parent Process start ts", "Start of parent process as epoch timestamp in nanoseconds."},
|
||||
{PT_BOOL, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.is_exe_writable", "Process Executable Is Writable", "'true' if this process' executable file is writable by the same user that spawned the process."},
|
||||
{PT_BOOL, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.is_exe_upper_layer", "Process Executable Is In Upper Layer", "'true' if this process' executable file is in upper layer in overlayfs. This field value can only be trusted if the underlying kernel version is greater or equal than 3.18.0, since overlayfs was introduced at that time."},
|
||||
{PT_BOOL, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.is_exe_from_memfd", "Process Executable Is Stored In Memfd", "'true' if the executable file of the current process is an anonymous file created using memfd_create() and is being executed by referencing its file descriptor (fd). This type of file exists only in memory and not on disk. Relevant to detect malicious in-memory code injection. Requires kernel version greater or equal to 3.17.0."},
|
||||
{PT_BOOL, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.is_sid_leader", "Process Is Process Session Leader", "'true' if this process is the leader of the process session, proc.sid == proc.vpid. For host processes vpid reflects pid."},
|
||||
{PT_BOOL, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "proc.is_vpgid_leader", "Process Is Virtual Process Group Leader", "'true' if this process is the leader of the virtual process group, proc.vpgid == proc.vpid. For host processes vpgid and vpid reflect pgid and pid. Can help to distinguish if the process was 'directly' executed for instance in a tty (similar to bash history logging, `is_vpgid_leader` would be 'true') or executed as descendent process in the same process group which for example is the case when subprocesses are spawned from a script (`is_vpgid_leader` would be 'false')."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_DEC, "proc.exe_ino", "Inode number of executable file on disk", "The inode number of the executable file on disk. Can be correlated with fd.ino."},
|
||||
{PT_ABSTIME, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_DEC, "proc.exe_ino.ctime", "Last status change time (ctime) of executable file", "Last status change time of executable file (inode->ctime) as epoch timestamp in nanoseconds. Time is changed by writing or by setting inode information e.g. owner, group, link count, mode etc."},
|
||||
{PT_ABSTIME, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_DEC, "proc.exe_ino.mtime", "Last modification time (mtime) of executable file", "Last modification time of executable file (inode->mtime) as epoch timestamp in nanoseconds. Time is changed by file modifications, e.g. by mknod, truncate, utime, write of more than zero bytes etc. For tracking changes in owner, group, link count or mode, use proc.exe_ino.ctime instead."},
|
||||
{PT_ABSTIME, EPF_NONE, PF_DEC, "proc.exe_ino.ctime_duration_proc_start", "Number of nanoseconds between ctime exe file and proc clone ts", "Number of nanoseconds between modifying status of executable image and spawning a new process using the changed executable image."},
|
||||
{PT_ABSTIME, EPF_NONE, PF_DEC, "proc.exe_ino.ctime_duration_pidns_start", "Number of nanoseconds between pidns start ts and ctime exe file", "Number of nanoseconds between PID namespace start ts and ctime exe file if PID namespace start predates ctime."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.pidns_init_start_ts", "Start ts of pid namespace", "Start of PID namespace (container or non container pid namespace) as epoch timestamp in nanoseconds."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "thread.cap_permitted", "Permitted capabilities", "The permitted capabilities set"},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "thread.cap_inheritable", "Inheritable capabilities", "The inheritable capabilities set"},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "thread.cap_effective", "Effective capabilities", "The effective capabilities set"},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "proc.is_container_healthcheck", "Process Is Container Healthcheck", "'true' if this process is running as a part of the container's health check."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "proc.is_container_liveness_probe", "Process Is Container Liveness", "'true' if this process is running as a part of the container's liveness probe."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "proc.is_container_readiness_probe", "Process Is Container Readiness", "'true' if this process is running as a part of the container's readiness probe."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.fdopencount", "FD Count", "Number of open FDs for the process"},
|
||||
{PT_INT64, EPF_NONE, PF_DEC, "proc.fdlimit", "FD Limit", "Maximum number of FDs the process can open."},
|
||||
{PT_DOUBLE, EPF_NONE, PF_NA, "proc.fdusage", "FD Usage", "The ratio between open FDs and maximum available FDs for the process."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.vmsize", "VM Size", "Total virtual memory for the process (as kb)."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.vmrss", "VM RSS", "Resident non-swapped memory for the process (as kb)."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.vmswap", "VM Swap", "Swapped memory for the process (as kb)."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "thread.pfmajor", "Major Page Faults", "Number of major page faults since thread start."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "thread.pfminor", "Minor Page Faults", "Number of minor page faults since thread start."},
|
||||
{PT_INT64, EPF_NONE, PF_ID, "thread.tid", "Thread ID", "The id of the thread generating the event."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "thread.ismain", "Main Thread", "'true' if the thread generating the event is the main one in the process."},
|
||||
{PT_INT64, EPF_NONE, PF_ID, "thread.vtid", "Virtual Thread ID", "The id of the thread generating the event as seen from its current PID namespace."},
|
||||
{PT_CHARBUF, EPF_TABLE_ONLY, PF_NA, "thread.nametid", "Thread Name + ID", "This field chains the process name and tid of a thread and can be used as a specific identifier of a thread for a specific execve."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "thread.exectime", "Scheduled Thread CPU Time", "CPU time spent by the last scheduled thread, in nanoseconds. Exported by switch events only."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "thread.totexectime", "Current Thread CPU Time", "Total CPU time, in nanoseconds since the beginning of the capture, for the current thread. Exported by switch events only."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "thread.cgroups", "Thread Cgroups", "All cgroups the thread belongs to, aggregated into a single string."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "thread.cgroup", "Thread Cgroup", "The cgroup the thread belongs to, for a specific subsystem. e.g. thread.cgroup.cpuacct."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.nthreads", "Threads", "The number of alive threads that the process generating the event currently has, including the leader thread. Please note that the leader thread may not be here, in that case 'proc.nthreads' and 'proc.nchilds' are equal"},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "proc.nchilds", "Children", "The number of alive not leader threads that the process generating the event currently has. This excludes the leader thread."},
|
||||
{PT_DOUBLE, EPF_NONE, PF_NA, "thread.cpu", "Thread CPU", "The CPU consumed by the thread in the last second."},
|
||||
{PT_DOUBLE, EPF_NONE, PF_NA, "thread.cpu.user", "Thread User CPU", "The user CPU consumed by the thread in the last second."},
|
||||
{PT_DOUBLE, EPF_NONE, PF_NA, "thread.cpu.system", "Thread System CPU", "The system CPU consumed by the thread in the last second."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "thread.vmsize", "Thread VM Size (kb)", "For the process main thread, this is the total virtual memory for the process (as kb). For the other threads, this field is zero."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "thread.vmrss", "Thread VM RSS (kb)", "For the process main thread, this is the resident non-swapped memory for the process (as kb). For the other threads, this field is zero."},
|
||||
{PT_UINT64, EPF_TABLE_ONLY, PF_DEC, "thread.vmsize.b", "Thread VM Size (b)", "For the process main thread, this is the total virtual memory for the process (in bytes). For the other threads, this field is zero."},
|
||||
{PT_UINT64, EPF_TABLE_ONLY, PF_DEC, "thread.vmrss.b", "Thread VM RSS (b)", "For the process main thread, this is the resident non-swapped memory for the process (in bytes). For the other threads, this field is zero."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "container.id", "Container ID", "The truncated container ID (first 12 characters), e.g. 3ad7b26ded6d is extracted from the Linux cgroups by Falco within the kernel. Consequently, this field is reliably available and serves as the lookup key for Falco's synchronous or asynchronous requests against the container runtime socket to retrieve all other 'container.*' information. One important aspect to be aware of is that if the process occurs on the host, meaning not in the container PID namespace, this field is set to a string called 'host'. In Kubernetes, pod sandbox container processes can exist where `container.id` matches `k8s.pod.sandbox_id`, lacking other 'container.*' details."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.full_id", "Container ID", "The full container ID, e.g. 3ad7b26ded6d8e7b23da7d48fe889434573036c27ae5a74837233de441c3601e. In contrast to `container.id`, we enrich this field as part of the container engine enrichment. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.name", "Container Name", "The container name. In instances of userspace container engine lookup delays, this field may not be available yet. One important aspect to be aware of is that if the process occurs on the host, meaning not in the container PID namespace, this field is set to a string called 'host'."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.image", "Image Name", "The container image name (e.g. falcosecurity/falco:latest for docker). In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.image.id", "Image ID", "The container image id (e.g. 6f7e2741b66b). In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.type", "Type", "The container type, e.g. docker, cri-o, containerd etc."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "container.privileged", "Privileged", "'true' for containers running as privileged, 'false' otherwise. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.mounts", "Mounts", "A space-separated list of mount information. Each item in the list has the format 'source:dest:mode:rdrw:propagation'. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "container.mount", "Mount", "Information about a single mount, specified by number (e.g. container.mount[0]) or mount source (container.mount[/usr/local]). The pathname can be a glob (container.mount[/usr/local/*]), in which case the first matching mount will be returned. The information has the format 'source:dest:mode:rdrw:propagation'. If there is no mount with the specified index or matching the provided source, returns the string \"none\" instead of a NULL value. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "container.mount.source", "Mount Source", "The mount source, specified by number (e.g. container.mount.source[0]) or mount destination (container.mount.source[/host/lib/modules]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "container.mount.dest", "Mount Destination", "The mount destination, specified by number (e.g. container.mount.dest[0]) or mount source (container.mount.dest[/lib/modules]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "container.mount.mode", "Mount Mode", "The mount mode, specified by number (e.g. container.mount.mode[0]) or mount source (container.mount.mode[/usr/local]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "container.mount.rdwr", "Mount Read/Write", "The mount rdwr value, specified by number (e.g. container.mount.rdwr[0]) or mount source (container.mount.rdwr[/usr/local]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_ARG_REQUIRED, PF_NA, "container.mount.propagation", "Mount Propagation", "The mount propagation value, specified by number (e.g. container.mount.propagation[0]) or mount source (container.mount.propagation[/usr/local]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.image.repository", "Repository", "The container image repository (e.g. falcosecurity/falco). In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.image.tag", "Image Tag", "The container image tag (e.g. stable, latest). In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.image.digest", "Registry Digest", "The container image registry digest (e.g. sha256:d977378f890d445c15e51795296e4e5062f109ce6da83e0a355fc4ad8699d27). In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.healthcheck", "Health Check", "The container's health check. Will be the null value (\"N/A\") if no healthcheck configured, \"NONE\" if configured but explicitly not created, and the healthcheck command line otherwise. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.liveness_probe", "Liveness", "The container's liveness probe. Will be the null value (\"N/A\") if no liveness probe configured, the liveness probe command line otherwise. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.readiness_probe", "Readiness", "The container's readiness probe. Will be the null value (\"N/A\") if no readiness probe configured, the readiness probe command line otherwise. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_UINT64, EPF_NONE, PF_DEC, "container.start_ts", "Container start", "Container start as epoch timestamp in nanoseconds based on proc.pidns_init_start_ts and extracted in the kernel and not from the container runtime socket / container engine."},
|
||||
{PT_RELTIME, EPF_NONE, PF_DEC, "container.duration", "Number of nanoseconds since container.start_ts", "Number of nanoseconds since container.start_ts."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.ip", "Container ip address", "The container's / pod's primary ip address as retrieved from the container engine. Only ipv4 addresses are tracked. Consider container.cni.json (CRI use case) for logging ip addresses for each network interface. In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "container.cni.json", "Container's / pod's CNI result json", "The container's / pod's CNI result field from the respective pod status info. It contains ip addresses for each network interface exposed as unparsed escaped JSON string. Supported for CRI container engine (containerd, cri-o runtimes), optimized for containerd (some non-critical JSON keys removed). Useful for tracking ips (ipv4 and ipv6, dual-stack support) for each network interface (multi-interface support). In instances of userspace container engine lookup delays, this field may not be available yet."},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_ID, "fd.num", "FD Number", "the unique number identifying the file descriptor."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_DEC, "fd.type", "FD Type", "type of FD. Can be 'file', 'directory', 'ipv4', 'ipv6', 'unix', 'pipe', 'event', 'signalfd', 'eventpoll', 'inotify' 'signalfd' or 'memfd'."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_DEC, "fd.typechar", "FD Type Char", "type of FD as a single character. Can be 'f' for file, 4 for IPv4 socket, 6 for IPv6 socket, 'u' for unix socket, p for pipe, 'e' for eventfd, 's' for signalfd, 'l' for eventpoll, 'i' for inotify, 'b' for bpf, 'u' for userfaultd, 'r' for io_uring, 'm' for memfd ,'o' for unknown."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "fd.name", "FD Name", "FD full name. If the fd is a file, this field contains the full path. If the FD is a socket, this field contain the connection tuple."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "fd.directory", "FD Directory", "If the fd is a file, the directory that contains it."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "fd.filename", "FD Filename", "If the fd is a file, the filename without the path."},
|
||||
{PT_IPADDR, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.ip", "FD IP Address", "matches the ip address (client or server) of the fd."},
|
||||
{PT_IPADDR, EPF_NONE, PF_NA, "fd.cip", "FD Client Address", "client IP address."},
|
||||
{PT_IPADDR, EPF_NONE, PF_NA, "fd.sip", "FD Server Address", "server IP address."},
|
||||
{PT_IPADDR, EPF_NONE, PF_NA, "fd.lip", "FD Local Address", "local IP address."},
|
||||
{PT_IPADDR, EPF_NONE, PF_NA, "fd.rip", "FD Remote Address", "remote IP address."},
|
||||
{PT_PORT, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_DEC, "fd.port", "FD Port", "matches the port (either client or server) of the fd."},
|
||||
{PT_PORT, EPF_NONE, PF_DEC, "fd.cport", "FD Client Port", "for TCP/UDP FDs, the client port."},
|
||||
{PT_PORT, EPF_NONE, PF_DEC, "fd.sport", "FD Server Port", "for TCP/UDP FDs, server port."},
|
||||
{PT_PORT, EPF_NONE, PF_DEC, "fd.lport", "FD Local Port", "for TCP/UDP FDs, the local port."},
|
||||
{PT_PORT, EPF_NONE, PF_DEC, "fd.rport", "FD Remote Port", "for TCP/UDP FDs, the remote port."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.l4proto", "FD IP Protocol", "the IP protocol of a socket. Can be 'tcp', 'udp', 'icmp' or 'raw'."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.sockfamily", "FD Socket Family", "the socket family for socket events. Can be 'ip' or 'unix'."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "fd.is_server", "FD Server", "'true' if the process owning this FD is the server endpoint in the connection."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.uid", "FD ID", "a unique identifier for the FD, created by chaining the FD number and the thread ID."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.containername", "FD Container Name", "chaining of the container ID and the FD name. Useful when trying to identify which container an FD belongs to."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.containerdirectory", "FD Container Directory", "chaining of the container ID and the directory name. Useful when trying to identify which container a directory belongs to."},
|
||||
{PT_PORT, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.proto", "FD Protocol", "matches the protocol (either client or server) of the fd."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.cproto", "FD Client Protocol", "for TCP/UDP FDs, the client protocol."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.sproto", "FD Server Protocol", "for TCP/UDP FDs, server protocol."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.lproto", "FD Local Protocol", "for TCP/UDP FDs, the local protocol."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fd.rproto", "FD Remote Protocol", "for TCP/UDP FDs, the remote protocol."},
|
||||
{PT_IPNET, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.net", "FD IP Network", "matches the IP network (client or server) of the fd."},
|
||||
{PT_IPNET, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.cnet", "FD Client Network", "matches the client IP network of the fd."},
|
||||
{PT_IPNET, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.snet", "FD Server Network", "matches the server IP network of the fd."},
|
||||
{PT_IPNET, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.lnet", "FD Local Network", "matches the local IP network of the fd."},
|
||||
{PT_IPNET, EPF_FILTER_ONLY | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.rnet", "FD Remote Network", "matches the remote IP network of the fd."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "fd.connected", "FD Connected", "for TCP/UDP FDs, 'true' if the socket is connected."},
|
||||
{PT_BOOL, EPF_NONE, PF_NA, "fd.name_changed", "FD Name Changed", "True when an event changes the name of an fd used by this event. This can occur in some cases such as udp connections where the connection tuple changes."},
|
||||
{PT_CHARBUF, EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.cip.name", "FD Client Domain Name", "Domain name associated with the client IP address."},
|
||||
{PT_CHARBUF, EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.sip.name", "FD Server Domain Name", "Domain name associated with the server IP address."},
|
||||
{PT_CHARBUF, EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.lip.name", "FD Local Domain Name", "Domain name associated with the local IP address."},
|
||||
{PT_CHARBUF, EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_NA, "fd.rip.name", "FD Remote Domain Name", "Domain name associated with the remote IP address."},
|
||||
{PT_INT32, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_HEX, "fd.dev", "FD Device", "device number (major/minor) containing the referenced file"},
|
||||
{PT_INT32, EPF_NONE, PF_DEC, "fd.dev.major", "FD Major Device", "major device number containing the referenced file"},
|
||||
{PT_INT32, EPF_NONE, PF_DEC, "fd.dev.minor", "FD Minor Device", "minor device number containing the referenced file"},
|
||||
{PT_INT64, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_DEC, "fd.ino", "FD Inode Number", "inode number of the referenced file"},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "fd.nameraw", "FD Name Raw", "FD full name raw. Just like fd.name, but only used if fd is a file path. File path is kept raw with limited sanitization and without deriving the absolute path."},
|
||||
{PT_CHARBUF, EPF_IS_LIST | EPF_ARG_ALLOWED | EPF_NO_RHS | EPF_NO_TRANSFORMER, PF_DEC, "fd.types", "FD Type", "List of FD types in used. Can be passed an fd number e.g. fd.types[0] to get the type of stdout as a single item list."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fs.path.name", "Path for Filesystem-related operation", "For any event type that deals with a filesystem path, the path the file syscall is operating on. This path is always fully resolved, prepending the thread cwd when needed."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fs.path.nameraw", "Raw path for Filesystem-related operation", "For any event type that deals with a filesystem path, the path the file syscall is operating on. This path is always the path provided to the syscall and may not be fully resolved."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fs.path.source", "Source path for Filesystem-related operation", "For any event type that deals with a filesystem path, and specifically for a source and target like mv, cp, etc, the source path the file syscall is operating on. This path is always fully resolved, prepending the thread cwd when needed."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fs.path.sourceraw", "Source path for Filesystem-related operation", "For any event type that deals with a filesystem path, and specifically for a source and target like mv, cp, etc, the source path the file syscall is operating on. This path is always the path provided to the syscall and may not be fully resolved."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fs.path.target", "Target path for Filesystem-related operation", "For any event type that deals with a filesystem path, and specifically for a target and target like mv, cp, etc, the target path the file syscall is operating on. This path is always fully resolved, prepending the thread cwd when needed."},
|
||||
{PT_CHARBUF, EPF_NONE, PF_NA, "fs.path.targetraw", "Target path for Filesystem-related operation", "For any event type that deals with a filesystem path, and specifically for a target and target like mv, cp, etc, the target path the file syscall is operating on. This path is always the path provided to the syscall and may not be fully resolved."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_REQUIRED, PF_NA, "custom.proc.aname.lineage.join", "Custom concat lineage", "[Incubating] String concatenate the process lineage to achieve better performance. It requires an argument to specify the maximum level of traversal, e.g. 'custom.proc.aname.lineage.join[7]'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_REQUIRED, PF_NA, "custom.proc.aexe.lineage.join", "Custom concat lineage", "[Incubating] String concatenate the process lineage to achieve better performance. It requires an argument to specify the maximum level of traversal, e.g. 'custom.proc.aexe.lineage.join[7]'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_ARG_REQUIRED, PF_NA, "custom.proc.aexepath.lineage.join", "Custom concat lineage", "[Incubating] String concatenate the process lineage to achieve better performance. It requires an argument to specify the maximum level of traversal, e.g. 'custom.proc.aexepath.lineage.join[7]'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "custom.fd.name.part1", "Custom fd 'ip:port' part1", "[Incubating] For fd related network events only. Part 1 as string of the ip tuple in the format 'ip:port', e.g '172.40.111.222:54321' given fd.name '172.40.111.222:54321->142.251.111.147:443'. It may be dperecated in the future."},
|
||||
{PT_CHARBUF, EPF_ANOMALY_PLUGIN | EPF_NONE, PF_NA, "custom.fd.name.part2", "Custom fd 'ip:port' part1", "[Incubating] For fd related network events only. Part 2 as string of the ip tuple in the format 'ip:port', e.g.'142.251.111.147:443' given fd.name '172.40.111.222:54321->142.251.111.147:443'. This is a custom plugin specific field for the anomaly behavior profiles only. It may be dperecated in the future."},
|
||||
};
|
||||
|
||||
|
||||
namespace plugin_anomalydetection::utils
|
||||
{
|
||||
|
||||
// Adopted from falcosecurity/libs, custom hand-rolled for performance reasons
|
||||
static inline void rewind_to_parent_path(const char* targetbase, char** tc, const char** pc, uint32_t delta)
|
||||
{
|
||||
if(*tc <= targetbase + 1)
|
||||
{
|
||||
(*pc) += delta;
|
||||
return;
|
||||
}
|
||||
|
||||
(*tc)--;
|
||||
|
||||
while((*tc) >= targetbase + 1 && *((*tc) - 1) != '/')
|
||||
{
|
||||
(*tc)--;
|
||||
}
|
||||
|
||||
(*pc) += delta;
|
||||
}
|
||||
|
||||
// Adopted from falcosecurity/libs
|
||||
struct g_invalidchar
|
||||
{
|
||||
bool operator()(char c) const
|
||||
{
|
||||
// Exclude all non-printable characters and control characters while
|
||||
// including a wide range of languages (emojis, cyrillic, chinese etc)
|
||||
return !(isprint((unsigned)c));
|
||||
}
|
||||
};
|
||||
|
||||
// Adopted from falcosecurity/libs, custom hand-rolled for performance reasons
|
||||
static inline void copy_and_sanitize_path(char* target, char* targetbase, const char *path, char separator)
|
||||
{
|
||||
char* tc = target;
|
||||
const char* pc = path;
|
||||
g_invalidchar ic;
|
||||
const bool empty_base = target == targetbase;
|
||||
|
||||
while(true)
|
||||
{
|
||||
if(*pc == 0)
|
||||
{
|
||||
*tc = 0;
|
||||
|
||||
//
|
||||
// If the path ends with a separator, remove it, as the OS does.
|
||||
// Properly manage case where path is just "/".
|
||||
//
|
||||
if((tc > (targetbase + 1)) && (*(tc - 1) == separator))
|
||||
{
|
||||
*(tc - 1) = 0;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if(ic(*pc))
|
||||
{
|
||||
//
|
||||
// Invalid char, substitute with a '.'
|
||||
//
|
||||
*tc = '.';
|
||||
tc++;
|
||||
pc++;
|
||||
}
|
||||
else
|
||||
{
|
||||
//
|
||||
// If path begins with '.' or '.' is the first char after a '/'
|
||||
//
|
||||
if(*pc == '.' && (tc == targetbase || *(tc - 1) == separator))
|
||||
{
|
||||
//
|
||||
// '../', rewind to the previous separator
|
||||
//
|
||||
if(*(pc + 1) == '.' && *(pc + 2) == separator)
|
||||
{
|
||||
rewind_to_parent_path(targetbase, &tc, &pc, 3);
|
||||
}
|
||||
//
|
||||
// '..', with no separator.
|
||||
// This is valid if we are at the end of the string, and in that case we rewind.
|
||||
//
|
||||
else if(*(pc + 1) == '.' && *(pc + 2) == 0)
|
||||
{
|
||||
rewind_to_parent_path(targetbase, &tc, &pc, 2);
|
||||
}
|
||||
//
|
||||
// './', just skip it
|
||||
//
|
||||
else if(*(pc + 1) == separator)
|
||||
{
|
||||
pc += 2;
|
||||
}
|
||||
//
|
||||
// '.', with no separator.
|
||||
// This is valid if we are at the end of the string, and in that case we rewind.
|
||||
//
|
||||
else if(*(pc + 1) == 0)
|
||||
{
|
||||
pc++;
|
||||
}
|
||||
//
|
||||
// Otherwise, we leave the string intact.
|
||||
//
|
||||
else
|
||||
{
|
||||
*tc = *pc;
|
||||
pc++;
|
||||
tc++;
|
||||
}
|
||||
}
|
||||
else if(*pc == separator)
|
||||
{
|
||||
//
|
||||
// separator:
|
||||
// * if the last char is already a separator, skip it
|
||||
// * if we are back at targetbase but targetbase was not empty before, it means we
|
||||
// fully rewinded back to targetbase and the string is now empty. Skip separator.
|
||||
// Example: "/foo/../a" -> "/a" BUT "foo/../a" -> "a"
|
||||
// -> Otherwise: "foo/../a" -> "/a"
|
||||
//
|
||||
if((tc > targetbase && *(tc - 1) == separator) || (tc == targetbase && !empty_base))
|
||||
{
|
||||
pc++;
|
||||
}
|
||||
else
|
||||
{
|
||||
*tc = *pc;
|
||||
tc++;
|
||||
pc++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
//
|
||||
// Normal char, copy it
|
||||
//
|
||||
*tc = *pc;
|
||||
tc++;
|
||||
pc++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adopted from falcosecurity/libs
|
||||
#ifndef HAVE_STRLCPY
|
||||
static inline size_t strlcpy(char *dst, const char *src, size_t size) {
|
||||
size_t srcsize = strlen(src);
|
||||
if (size == 0) {
|
||||
return srcsize;
|
||||
}
|
||||
|
||||
size_t copysize = srcsize;
|
||||
|
||||
if (copysize > size - 1) {
|
||||
copysize = size - 1;
|
||||
}
|
||||
|
||||
memcpy(dst, src, copysize);
|
||||
dst[copysize] = '\0';
|
||||
|
||||
return srcsize;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Adopted from falcosecurity/libs, custom hand-rolled for performance reasons
|
||||
static inline bool concatenate_paths_(char* target, uint32_t targetlen, const char* path1, uint32_t len1,
|
||||
const char* path2, uint32_t len2)
|
||||
{
|
||||
if(targetlen < (len1 + len2 + 1))
|
||||
{
|
||||
strlcpy(target, "/PATH_TOO_LONG", targetlen);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(len2 != 0 && path2[0] != '/')
|
||||
{
|
||||
memcpy(target, path1, len1);
|
||||
copy_and_sanitize_path(target + len1, target, path2, '/');
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
target[0] = 0;
|
||||
copy_and_sanitize_path(target, target, path2, '/');
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Adopted from falcosecurity/libs, custom hand-rolled for performance reasons
|
||||
std::string concatenate_paths(std::string_view path1, std::string_view path2)
|
||||
{
|
||||
char fullpath[SCAP_MAX_PATH_SIZE];
|
||||
concatenate_paths_(fullpath, SCAP_MAX_PATH_SIZE, path1.data(), (uint32_t)path1.length(), path2.data(),
|
||||
path2.size());
|
||||
return std::string(fullpath);
|
||||
}
|
||||
|
||||
const std::vector<plugin_sinsp_filterchecks_field> get_profile_fields(const std::string& behavior_profile)
|
||||
{
|
||||
std::vector<plugin_sinsp_filterchecks_field> fields;
|
||||
std::regex pattern(R"(%(\S+))");
|
||||
std::sregex_iterator iter(behavior_profile.begin(), behavior_profile.end(), pattern);
|
||||
std::sregex_iterator end;
|
||||
|
||||
plugin_sinsp_filterchecks::check_type id;
|
||||
std::string fieldname;
|
||||
std::int32_t argid = 0;
|
||||
std::string argname = "";
|
||||
|
||||
while (iter != end)
|
||||
{
|
||||
// todo revisit this helper
|
||||
auto rawfield = iter->str().substr(1);
|
||||
std::string fieldname = rawfield;
|
||||
bool found_match = false;
|
||||
for (size_t i = 0; i < sizeof(sinsp_filter_check_fields) / sizeof(sinsp_filter_check_fields[0]); ++i)
|
||||
{
|
||||
id = static_cast<plugin_sinsp_filterchecks::check_type>(i);
|
||||
if(id == plugin_sinsp_filterchecks::TYPE_ENV ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_APID ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_ANAME ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_AEXE ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_AEXEPATH ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_ACMDLINE ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_CUSTOM_ANAME_LINEAGE_CONCAT||
|
||||
id == plugin_sinsp_filterchecks::TYPE_CUSTOM_AEXE_LINEAGE_CONCAT ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_CUSTOM_AEXEPATH_LINEAGE_CONCAT
|
||||
)
|
||||
{
|
||||
size_t start_pos = rawfield.find('[');
|
||||
size_t end_pos = rawfield.find(']');
|
||||
if (start_pos != std::string::npos && end_pos != std::string::npos)
|
||||
{
|
||||
fieldname = rawfield.substr(0, start_pos);
|
||||
std::string arg_str = rawfield.substr(start_pos + 1, end_pos - start_pos - 1);
|
||||
if (!arg_str.empty())
|
||||
{
|
||||
argname = rawfield.substr(start_pos + 1, end_pos - start_pos - 1);
|
||||
if (std::all_of(argname.begin(), argname.end(), ::isdigit))
|
||||
{
|
||||
argid = std::stoi(rawfield.substr(start_pos + 1, end_pos - start_pos - 1));
|
||||
argname.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (std::string(sinsp_filter_check_fields[i].m_name) == fieldname)
|
||||
{
|
||||
found_match = true;
|
||||
if ((sinsp_filter_check_fields[i].m_flags & EPF_ANOMALY_PLUGIN))
|
||||
{
|
||||
if ((id == plugin_sinsp_filterchecks::TYPE_CUSTOM_ANAME_LINEAGE_CONCAT||
|
||||
id == plugin_sinsp_filterchecks::TYPE_CUSTOM_AEXE_LINEAGE_CONCAT ||
|
||||
id == plugin_sinsp_filterchecks::TYPE_CUSTOM_AEXEPATH_LINEAGE_CONCAT)
|
||||
&& argid == 0)
|
||||
{
|
||||
plugin_anomalydetection::utils::log_error("Usage of behavior profile field: '" + fieldname + "' requires an argument greater than 0 indicating the level of parent lineage traversal, e.g. '%custom.proc.aname.lineage.join[7]' or '%custom.proc.aexe.lineage.join[7]' or '%custom.proc.aexepath.lineage.join[7]' exiting...");
|
||||
exit(1);
|
||||
}
|
||||
fields.emplace_back(plugin_sinsp_filterchecks_field{
|
||||
id,
|
||||
argid,
|
||||
argname
|
||||
});
|
||||
} else
|
||||
{
|
||||
plugin_anomalydetection::utils::log_error("Remove the following unsupported behavior profile field: '" + fieldname + "' exiting...");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
argid = 0;
|
||||
argname.clear();
|
||||
}
|
||||
if (!found_match)
|
||||
{
|
||||
plugin_anomalydetection::utils::log_error("Remove the following invalid or mistyped behavior profile field: '" + fieldname + "' exiting...");
|
||||
exit(1);
|
||||
}
|
||||
++iter;
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "plugin_sinsp_filterchecks.h"
|
||||
#include "plugin_consts.h"
|
||||
|
||||
#include <falcosecurity/sdk.h>
|
||||
|
||||
#include <regex>
|
||||
#include <unordered_set>
|
||||
|
||||
typedef struct plugin_sinsp_filterchecks_field
|
||||
{
|
||||
plugin_sinsp_filterchecks::check_type id;
|
||||
std::int32_t argid;
|
||||
std::string argname;
|
||||
}plugin_sinsp_filterchecks_field;
|
||||
|
||||
namespace plugin_anomalydetection::utils
|
||||
{
|
||||
// Adopted from falcosecurity/libs, custom hand-rolled for performance reasons
|
||||
std::string concatenate_paths(std::string_view path1, std::string_view path2);
|
||||
|
||||
// Temporary workaround; not as robust as libsinsp/eventformatter;
|
||||
// ideally the plugin API exposes more libsinsp functionality in the near-term
|
||||
//
|
||||
// No need for performance optimization atm as the typical use case is to have less than 3-8 sketches
|
||||
const std::vector<plugin_sinsp_filterchecks_field> get_profile_fields(const std::string& behavior_profile);
|
||||
|
||||
inline void log_error(std::string err_mess)
|
||||
{
|
||||
printf("%s %s\n", PLUGIN_LOG_PREFIX, err_mess.c_str());
|
||||
}
|
||||
|
||||
} // plugin_anomalydetection::utils
|
|
@ -0,0 +1,71 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
# Setup adopted from the `k8smeta` plugin
|
||||
|
||||
include(libs)
|
||||
|
||||
# Create a build directory out of the tree for libs tests
|
||||
set(SINSP_TEST_FOLDER "${CMAKE_BINARY_DIR}/libs_tests")
|
||||
file(MAKE_DIRECTORY "${SINSP_TEST_FOLDER}")
|
||||
|
||||
# Prepare some additional includes for plugin tests
|
||||
set(TEST_EXTRA_INCLUDES "${CMAKE_BINARY_DIR}/plugin_include")
|
||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/plugin_test_var.h.in"
|
||||
"${TEST_EXTRA_INCLUDES}/plugin_test_var.h")
|
||||
# Copy the entire plugin src directory for targeted additional unit tests
|
||||
file(COPY "${CMAKE_SOURCE_DIR}/src/" DESTINATION "${TEST_EXTRA_INCLUDES}/")
|
||||
|
||||
# Download nlohmann json single include used in tests
|
||||
file(
|
||||
DOWNLOAD
|
||||
"https://raw.githubusercontent.com/nlohmann/json/v3.10.2/single_include/nlohmann/json.hpp"
|
||||
"${TEST_EXTRA_INCLUDES}/json.hpp"
|
||||
EXPECTED_HASH
|
||||
SHA256=059743e48b37e41579ee3a92e82e984bfa0d2a9a2b20b175d04db8089f46f047)
|
||||
|
||||
# Download xxhash single include used in cms class unit tests
|
||||
file(
|
||||
DOWNLOAD
|
||||
"https://raw.githubusercontent.com/Cyan4973/xxHash/v0.8.2/xxhash.h"
|
||||
"${TEST_EXTRA_INCLUDES}/xxhash.h"
|
||||
EXPECTED_HASH SHA256=be275e9db21a503c37f24683cdb4908f2370a3e35ab96e02c4ea73dc8e399c43)
|
||||
|
||||
# Add some additional test source files
|
||||
file(GLOB_RECURSE ANOMALYDETECTION_TEST_SUITE ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
|
||||
string(REPLACE ";" "\\;" ESCAPED_ANOMALYDETECTION_TEST_SUITE "${ANOMALYDETECTION_TEST_SUITE}")
|
||||
|
||||
# Associate the needed includes
|
||||
list(APPEND ANOMALYDETECTION_TEST_INCLUDES "${CMAKE_CURRENT_SOURCE_DIR}/include"
|
||||
"${CMAKE_BINARY_DIR}/plugin_include")
|
||||
string(REPLACE ";" "\\;" ESCAPED_ANOMALYDETECTION_TEST_INCLUDES "${ANOMALYDETECTION_TEST_INCLUDES}")
|
||||
|
||||
add_custom_target(
|
||||
build-tests
|
||||
WORKING_DIRECTORY "${SINSP_TEST_FOLDER}"
|
||||
COMMAND
|
||||
cmake -S"${LIBS_DIR}"
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DUSE_BUNDLED_DEPS=ON
|
||||
-DBUILD_LIBSCAP_GVISOR=OFF
|
||||
-DCREATE_TEST_TARGETS=ON
|
||||
-DMINIMAL_BUILD=ON
|
||||
-DSCAP_FILES_SUITE_ENABLE=OFF
|
||||
-DADDITIONAL_SINSP_TESTS_SUITE="${ESCAPED_ANOMALYDETECTION_TEST_SUITE}"
|
||||
-DADDITIONAL_SINSP_TESTS_INCLUDE_FOLDERS="${ESCAPED_ANOMALYDETECTION_TEST_INCLUDES}"
|
||||
COMMAND make -C "${SINSP_TEST_FOLDER}" unit-test-libsinsp -j4)
|
||||
|
||||
add_custom_target(
|
||||
run-tests COMMAND "${SINSP_TEST_FOLDER}/libsinsp/test/unit-test-libsinsp"
|
||||
--gtest_filter='*plugin_anomalydetection*')
|
|
@ -0,0 +1,13 @@
|
|||
# Tests Leveraging `libsinsp` Unit Tests Framework
|
||||
|
||||
We leverage the [falcosecurity/libs](https://github.com/falcosecurity/libs) `libsinsp` unit test framework for the `anomalydetection` plugin tests. This way, we can check the compatibility of the plugin with a specific framework version. This approach was adopted from the `k8smeta` plugin.
|
||||
|
||||
## Run Tests
|
||||
|
||||
```bash
|
||||
cd build
|
||||
# Build tests
|
||||
make build-tests
|
||||
# Run tests
|
||||
make run-tests
|
||||
```
|
|
@ -0,0 +1,49 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#define INIT_CONFIG "{\"count_min_sketch\":{\"enabled\":true,\"n_sketches\":3,\"gamma_eps\":[[0.001,0.0001],[0.001,0.0001],[0.001,0.0001]],\"behavior_profiles\":[\
|
||||
{\"fields\":\"%container.id %proc.is_vpgid_leader %proc.is_sid_leader %proc.exe_ino %proc.exe_ino.ctime %proc.exe_ino.mtime %proc.is_exe_writable %proc.is_exe_upper_layer %proc.is_exe_from_memfd %proc.cmdnargs %proc.cmdlenargs %proc.env[HOME] %proc.exeline %proc.name %proc.pname %proc.exepath %proc.pexepath %proc.tty %proc.vpid %proc.pvpid\",\
|
||||
\"event_codes\":[293,331]},\
|
||||
{\"fields\":\"%proc.pid %custom.fd.name.part1 %custom.fd.name.part2 %fd.num %fd.name %fd.directory %fd.filename %fd.dev %fd.ino %fd.nameraw\",\
|
||||
\"event_codes\":[3,307,327,23]},\
|
||||
{\"fields\":\"%container.id %custom.proc.aname.lineage.join[7] %custom.proc.aexe.lineage.join[7] %custom.proc.aexepath.lineage.join[7] %proc.cmdline %proc.acmdline %proc.pcmdline %proc.acmdline[1] %proc.name %proc.aname[0] %proc.aname[1] %proc.aname[2] %proc.aname[3] %proc.aname[4] %proc.aname[5] %proc.aname[6] %proc.aname[7] %proc.pid %proc.apid[0] %proc.apid[1] %proc.apid[2] %proc.apid[3] %proc.apid[4] %proc.apid[5] %proc.apid[6] %proc.apid[7] %proc.exepath %proc.aexepath[0] %proc.aexepath[1] %proc.aexepath[2] %proc.aexepath[3] %proc.aexepath[4] %proc.aexepath[5] %proc.aexepath[6] %proc.aexepath[7] %proc.vpgid %proc.vpgid.name %proc.vpgid.exe %proc.vpgid.exepath %proc.sid %proc.sname %proc.sid.exe %proc.sid.exepath\",\
|
||||
\"event_codes\":[293,331]}]}}"
|
||||
|
||||
#define ASSERT_PLUGIN_INITIALIZATION(p_o, p_l) \
|
||||
{ \
|
||||
p_o = m_inspector.register_plugin(PLUGIN_PATH); \
|
||||
ASSERT_TRUE(p_o.get()); \
|
||||
std::string err; \
|
||||
ASSERT_TRUE(p_o->init(INIT_CONFIG, err)) << "err: " << err; \
|
||||
p_l.add_filter_check(m_inspector.new_generic_filtercheck()); \
|
||||
p_l.add_filter_check(sinsp_plugin::new_filtercheck(p_o)); \
|
||||
}
|
||||
|
||||
#define DEFAULT_IPV4_CLIENT_STRING "172.40.111.222"
|
||||
#define DEFAULT_IPV6_CLIENT_STRING "::1"
|
||||
#define DEFAULT_CLIENT_PORT_STRING "54321"
|
||||
#define DEFAULT_CLIENT_PORT 54321
|
||||
|
||||
#define DEFAULT_IPV4_SERVER_STRING "142.251.111.147"
|
||||
#define DEFAULT_IPV6_SERVER_STRING "2001:4860:4860::8888"
|
||||
#define DEFAULT_SERVER_PORT_STRING "443"
|
||||
#define DEFAULT_SERVER_PORT 443
|
||||
|
||||
#define DEFAULT_IPV4_FDNAME "172.40.111.222:54321->142.251.111.147:443"
|
||||
#define DEFAULT_IPV6_FDNAME "::1:54321->2001:4860:4860::8888:443"
|
||||
|
||||
#define DEFAULT_IP_STRING_SIZE 100
|
|
@ -0,0 +1,20 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#define PLUGIN_PATH "${CMAKE_BINARY_DIR}/libanomalydetection.so"
|
|
@ -0,0 +1,328 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2024 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <sinsp_with_test_input.h>
|
||||
#include <helpers/threads_helpers.h>
|
||||
#include <num/cms.h>
|
||||
#include <plugin_test_var.h>
|
||||
#include <test_helpers.h>
|
||||
|
||||
TEST(plugin_anomalydetection, plugin_anomalydetection_cms_dim)
|
||||
{
|
||||
double gamma = 0.001;
|
||||
double epsilon = 0.0001;
|
||||
uint64_t d = 7;
|
||||
uint64_t w = 27183;
|
||||
|
||||
plugin::anomalydetection::num::cms<uint64_t> cms_proba_init(gamma, epsilon);
|
||||
|
||||
EXPECT_EQ(cms_proba_init.get_d(), d);
|
||||
EXPECT_EQ(cms_proba_init.get_w(), w);
|
||||
EXPECT_DOUBLE_EQ(cms_proba_init.get_gamma(), gamma);
|
||||
EXPECT_DOUBLE_EQ(cms_proba_init.get_eps(), epsilon);
|
||||
|
||||
plugin::anomalydetection::num::cms<uint64_t> cms_dim_init(d, w);
|
||||
|
||||
EXPECT_EQ(cms_dim_init.get_d(), d);
|
||||
EXPECT_EQ(cms_dim_init.get_w(), w);
|
||||
auto gamma_rounded = round(cms_dim_init.get_gamma() * 1000.0) / 1000.0;
|
||||
auto eps_rounded = round(cms_dim_init.get_eps() * 10000.0) / 10000.0;
|
||||
EXPECT_DOUBLE_EQ(gamma_rounded, gamma);
|
||||
EXPECT_DOUBLE_EQ(eps_rounded, epsilon);
|
||||
}
|
||||
|
||||
TEST(plugin_anomalydetection, plugin_anomalydetection_cms_update_estimate)
|
||||
{
|
||||
double gamma = 0.001;
|
||||
double epsilon = 0.0001;
|
||||
|
||||
plugin::anomalydetection::num::cms<uint64_t> cms(gamma, epsilon);
|
||||
|
||||
std::string test_str = "falco";
|
||||
std::string test_str2 = "falco1";
|
||||
cms.update(test_str, 1);
|
||||
cms.update(test_str, 1);
|
||||
cms.update(test_str, 1);
|
||||
|
||||
EXPECT_EQ(cms.estimate(test_str), 3);
|
||||
EXPECT_EQ(cms.estimate(test_str2), 0);
|
||||
}
|
||||
|
||||
TEST_F(sinsp_with_test_input, plugin_anomalydetection_filterchecks_fields)
|
||||
{
|
||||
std::shared_ptr<sinsp_plugin> plugin_owner;
|
||||
filter_check_list pl_flist;
|
||||
ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist)
|
||||
|
||||
add_default_init_thread();
|
||||
open_inspector();
|
||||
|
||||
/* Create realistic spawn_process event, adopted from falcosecurity/libs unit test */
|
||||
sinsp_evt* evt = NULL;
|
||||
uint64_t parent_pid = 1, parent_tid = 1, child_pid = 20, child_tid = 20, null_pid = 0;
|
||||
uint64_t fdlimit = 1024, pgft_maj = 0, pgft_min = 1;
|
||||
uint64_t exe_ino = 242048, ctime = 1676262698000004588, mtime = 1676262698000004577;
|
||||
uint32_t loginuid = UINT32_MAX - 1, euid = 2000U;
|
||||
scap_const_sized_buffer empty_bytebuf = {.buf = nullptr, .size = 0};
|
||||
|
||||
add_event_advance_ts(increasing_ts(), parent_tid, PPME_SYSCALL_CLONE_20_E, 0);
|
||||
std::vector<std::string> cgroups = {"cgroups=cpuset=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "cpu=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "cpuacct=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "io=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "memory=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "devices=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "freezer=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "net_cls=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "perf_event=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "net_prio=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "hugetlb=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "pids=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "rdma=/docker/f9c7a020960a15738167a77594bff1f7ac5f5bfdb6646ecbc9b17c7ed7ec5066", "misc=/"};
|
||||
std::string cgroupsv = test_utils::to_null_delimited(cgroups);
|
||||
std::vector<std::string> env = {"SHELL=/bin/bash", "SHELL_NEW=/bin/sh", "PWD=/home/user", "HOME=/home/user"};
|
||||
std::string envv = test_utils::to_null_delimited(env);
|
||||
std::vector<std::string> args = {"-c", "'echo aGVsbG8K | base64 -d'"};
|
||||
std::string argsv = test_utils::to_null_delimited(args);
|
||||
|
||||
add_event_advance_ts(increasing_ts(), parent_tid, PPME_SYSCALL_CLONE_20_X, 20, parent_pid, "bash", empty_bytebuf, parent_pid, parent_tid, null_pid, "", fdlimit, pgft_maj, pgft_min, (uint32_t)12088, (uint32_t)7208, (uint32_t)0, "init", scap_const_sized_buffer{cgroupsv.data(), cgroupsv.size()}, (uint32_t)(PPM_CL_CLONE_CHILD_CLEARTID | PPM_CL_CLONE_CHILD_SETTID | PPM_CL_CLONE_NEWPID | PPM_CL_CHILD_IN_PIDNS), (uint32_t)1000, (uint32_t)1000, parent_pid, parent_tid);
|
||||
add_event_advance_ts(increasing_ts(), child_tid, PPME_SYSCALL_CLONE_20_X, 20, (uint64_t)0, "bash", empty_bytebuf, child_pid, child_tid, parent_tid, "", fdlimit, pgft_maj, pgft_min, (uint32_t)12088, (uint32_t)3764, (uint32_t)0, "init", scap_const_sized_buffer{cgroupsv.data(), cgroupsv.size()}, (uint32_t)(PPM_CL_CLONE_CHILD_CLEARTID | PPM_CL_CLONE_CHILD_SETTID | PPM_CL_CLONE_NEWPID | PPM_CL_CHILD_IN_PIDNS), (uint32_t)1000, (uint32_t)1000, child_pid, child_tid);
|
||||
add_event_advance_ts(increasing_ts(), child_tid, PPME_SYSCALL_EXECVE_19_E, 1, "/bin/test-exe");
|
||||
evt = add_event_advance_ts(increasing_ts(), child_tid, PPME_SYSCALL_EXECVE_19_X, 27, (int64_t)0, "/bin/test-exe", scap_const_sized_buffer{argsv.data(), argsv.size()}, child_tid, child_pid, parent_tid, "", fdlimit, pgft_maj, pgft_min, (uint32_t)29612, (uint32_t)4, (uint32_t)0, "test-exe", scap_const_sized_buffer{cgroupsv.data(), cgroupsv.size()}, scap_const_sized_buffer{envv.data(), envv.size()}, (int32_t)34818, child_tid, loginuid, (uint32_t) (PPM_EXE_WRITABLE | PPM_EXE_UPPER_LAYER), parent_pid, parent_pid, parent_pid, exe_ino, ctime, mtime, euid);
|
||||
|
||||
ASSERT_EQ(get_field_as_string(evt, "proc.name"), "test-exe");
|
||||
|
||||
/* Check anomalydetection plugin filter fields */
|
||||
ASSERT_TRUE(field_exists(evt, "anomaly.count_min_sketch", pl_flist));
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch", pl_flist), "1");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch[0]", pl_flist), "1");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch[1]", pl_flist), "0");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch[2]", pl_flist), "1");
|
||||
|
||||
ASSERT_TRUE(field_exists(evt, "anomaly.count_min_sketch.profile", pl_flist));
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile", pl_flist), "1024204816762626980000045881676262698000004577110229/home/user/bin/test-exe -c 'echo aGVsbG8K | base64 -d'test-exeinit/bin/test-exe/sbin/init34818201");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[0]", pl_flist), "1024204816762626980000045881676262698000004577110229/home/user/bin/test-exe -c 'echo aGVsbG8K | base64 -d'test-exeinit/bin/test-exe/sbin/init34818201");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[2]", pl_flist), "test-exeinit/bin/test-exe/sbin/init/bin/test-exe/sbin/inittest-exe -c 'echo aGVsbG8K | base64 -d'test-exe -c 'echo aGVsbG8K | base64 -d'initinittest-exetest-exeinit20201/bin/test-exe/bin/test-exe/sbin/init20test-exe/bin/test-exe/bin/test-exe0init/sbin/init/sbin/init");
|
||||
}
|
||||
|
||||
TEST_F(sinsp_with_test_input, plugin_anomalydetection_filterchecks_fields_proc_lineage)
|
||||
{
|
||||
std::shared_ptr<sinsp_plugin> plugin_owner;
|
||||
filter_check_list pl_flist;
|
||||
ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist)
|
||||
uint64_t not_relevant_64 = 0;
|
||||
uint64_t pgid = 9999;
|
||||
uint32_t loginuid = UINT32_MAX - 1, euid = 2000U;
|
||||
scap_const_sized_buffer empty_bytebuf = {.buf = nullptr, .size = 0};
|
||||
std::vector<std::string> args = {"-c", "cat test"};
|
||||
std::string argsv = test_utils::to_null_delimited(args);
|
||||
/* Instantiate the default tree */
|
||||
DEFAULT_TREE
|
||||
generate_execve_enter_and_exit_event(0, p2_t1_tid, p2_t1_tid, p2_t1_pid, p2_t1_ptid, "/p2_t1_exepath", "p2_t1_comm", "/usr/bin/p2_t1_exepath");
|
||||
generate_execve_enter_and_exit_event(0, p3_t1_tid, p3_t1_tid, p3_t1_pid, p3_t1_ptid, "/p3_t1_exepath", "p3_t1_comm", "/usr/bin/p3_t1_exepath");
|
||||
generate_execve_enter_and_exit_event(0, p4_t1_tid, p4_t1_tid, p4_t1_pid, p4_t1_ptid, "/p4_t1_exepath", "p4_t1_comm", "/usr/bin/p4_t1_exepath");
|
||||
generate_execve_enter_and_exit_event(0, p4_t2_tid, p4_t1_tid, p4_t1_pid, p4_t1_ptid, "/p4_t1_exepath", "p4_t1_comm", "/usr/bin/p4_t1_exepath");
|
||||
|
||||
add_event_advance_ts(increasing_ts(), p5_t1_tid, PPME_SYSCALL_EXECVE_19_E, 1, "/usr/bin/p5_t1_exepath");
|
||||
add_event_advance_ts(increasing_ts(), p5_t1_tid, PPME_SYSCALL_EXECVE_19_X, 27, (int64_t)0, "/usr/bin/p5_t1_exepath", scap_const_sized_buffer{argsv.data(), argsv.size()}, p5_t1_tid, p5_t1_tid, p5_t1_ptid, "", not_relevant_64, not_relevant_64, not_relevant_64, (uint32_t)29612, (uint32_t)4, (uint32_t)0, "p5_t1_comm", empty_bytebuf, empty_bytebuf, (int32_t)34818, pgid, loginuid, (int32_t) PPM_EXE_WRITABLE, not_relevant_64, not_relevant_64, not_relevant_64, not_relevant_64, not_relevant_64, not_relevant_64, euid);
|
||||
args = {"-c", "'echo aGVsbG8K | base64 -d'"};
|
||||
add_event_advance_ts(increasing_ts(), p6_t1_tid, PPME_SYSCALL_EXECVE_19_E, 1, "/usr/bin/p6_t1_exepath");
|
||||
auto evt = add_event_advance_ts(increasing_ts(), p6_t1_tid, PPME_SYSCALL_EXECVE_19_X, 27, (int64_t)0, "/usr/bin/p6_t1_exepath", scap_const_sized_buffer{argsv.data(), argsv.size()}, p6_t1_tid, p6_t1_tid, p6_t1_ptid, "", not_relevant_64, not_relevant_64, not_relevant_64, (uint32_t)29612, (uint32_t)4, (uint32_t)0, "p6_t1_comm", empty_bytebuf, empty_bytebuf, (int32_t)34818, pgid, loginuid, (int32_t) PPM_EXE_WRITABLE, not_relevant_64, not_relevant_64, not_relevant_64, not_relevant_64, not_relevant_64, not_relevant_64, euid);
|
||||
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[2]", pl_flist), "p6_t1_commp5_t1_commp4_t1_commp3_t1_commp2_t1_comminit/usr/bin/p6_t1_exepath/usr/bin/p5_t1_exepath/p4_t1_exepath/p3_t1_exepath/p2_t1_exepath/sbin/init/usr/bin/p6_t1_exepath/usr/bin/p5_t1_exepath/usr/bin/p4_t1_exepath/usr/bin/p3_t1_exepath/usr/bin/p2_t1_exepath/sbin/initp6_t1_comm -c cat testp6_t1_comm -c cat testp5_t1_comm -c cat testp5_t1_comm -c cat testp6_t1_commp6_t1_commp5_t1_commp4_t1_commp3_t1_commp2_t1_comminit8787827672251/usr/bin/p6_t1_exepath/usr/bin/p6_t1_exepath/usr/bin/p5_t1_exepath/usr/bin/p4_t1_exepath/usr/bin/p3_t1_exepath/usr/bin/p2_t1_exepath/sbin/init9999p5_t1_comm/usr/bin/p5_t1_exepath/usr/bin/p5_t1_exepath0init/sbin/init/sbin/init");
|
||||
}
|
||||
|
||||
TEST_F(sinsp_with_test_input, plugin_anomalydetection_filterchecks_fields_fd)
|
||||
{
|
||||
std::shared_ptr<sinsp_plugin> plugin_owner;
|
||||
filter_check_list pl_flist;
|
||||
ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist)
|
||||
add_default_init_thread();
|
||||
|
||||
sinsp_evt *evt;
|
||||
open_inspector();
|
||||
|
||||
uint64_t ino = 777;
|
||||
int64_t fd = 4;
|
||||
add_event(increasing_ts(), 3, PPME_SYSCALL_OPEN_E, 3, "/tmp/subdir1/subdir2/subdir3/subdir4/../../the_file", 0, 0);
|
||||
add_event_advance_ts(increasing_ts(), 3, PPME_SYSCALL_OPEN_X, 6, fd, "/tmp/../../../some_other_file", 0, 0, 0, ino);
|
||||
fd = 5;
|
||||
add_event(increasing_ts(), 3, PPME_SYSCALL_OPEN_E, 3, "/tmp/subdir1/subdir2/subdir3/subdir4/../../the_file2", 0, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 3, PPME_SYSCALL_OPEN_X, 6, fd, "/tmp/../../../some_other_file2", 0, 0, 0, ino);
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "5");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "/tmp/subdir1/subdir2/the_file2");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.directory"), "/tmp/subdir1/subdir2");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "-15/tmp/subdir1/subdir2/the_file2/tmp/subdir1/subdir2the_file20777/tmp/subdir1/subdir2/subdir3/subdir4/../../the_file2");
|
||||
|
||||
evt = NULL;
|
||||
uint64_t dirfd = 3, new_fd = 100;
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPENAT2_E, 5, dirfd, "<NA>", 0, 0, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPENAT2_X, 8, new_fd, dirfd, "/tmp/dir1/../the_file", 0, 0, 0, 0, ino);
|
||||
ASSERT_EQ(get_field_as_string(evt, "proc.pid"), "1");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "/tmp/the_file");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.nameraw"), "/tmp/dir1/../the_file");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "1100/tmp/the_file/tmpthe_file0777/tmp/dir1/../the_file");
|
||||
|
||||
evt = NULL;
|
||||
fd = 4;
|
||||
int64_t mountfd = 5;
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPEN_BY_HANDLE_AT_E, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPEN_BY_HANDLE_AT_X, 6, fd, mountfd, PPM_O_RDWR, "/tmp/open_handle.txt", 0, ino);
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "14/tmp/open_handle.txt/tmpopen_handle.txt0777/tmp/open_handle.txt");
|
||||
}
|
||||
|
||||
TEST_F(sinsp_with_test_input, plugin_anomalydetection_filterchecks_fields_fd_null_fd_table)
|
||||
{
|
||||
std::shared_ptr<sinsp_plugin> plugin_owner;
|
||||
filter_check_list pl_flist;
|
||||
ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist)
|
||||
add_default_init_thread();
|
||||
|
||||
sinsp_evt *evt;
|
||||
open_inspector();
|
||||
|
||||
uint64_t ino = 777;
|
||||
int64_t fd = 4;
|
||||
add_event(increasing_ts(), 1, PPME_SYSCALL_OPEN_E, 3, "subdir1//../the_file2", 0, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPEN_X, 6, fd, "subdir1//../the_file2", 0, 0, 0, ino);
|
||||
|
||||
sinsp_fdinfo* fdinfo = evt->get_thread_info()->get_fd(fd);
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "4");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "/root/the_file2");
|
||||
ASSERT_EQ(get_field_as_string(evt, "proc.cwd"), "/root/");
|
||||
fdinfo->m_name.clear();
|
||||
fdinfo->m_name_raw.clear();
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "14/root/the_file2/rootthe_file20777subdir1//../the_file2");
|
||||
|
||||
evt = NULL;
|
||||
uint64_t dirfd = 8, new_fd = 100;
|
||||
fd = 8;
|
||||
add_event(increasing_ts(), 1, PPME_SYSCALL_OPEN_E, 3, "/tmp/subdir1/subdir2/../the_file2", 0, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPEN_X, 6, fd, "/tmp/subdir1/subdir2/../the_file2", 0, 0, 0, ino);
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPENAT2_E, 5, dirfd, "subdir1//../the_file", 0, 0, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPENAT2_X, 8, new_fd, dirfd, "subdir1//../the_file", 0, 0, 0, 0, ino);
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "100");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "/tmp/subdir1/the_file2/the_file");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fs.path.name"), "/root/the_file"); // todo fix in falcosecurity/libs as its wrong; ETA falco 0.39.0
|
||||
ASSERT_EQ(get_field_as_string(evt, "proc.cwd"), "/root/");
|
||||
fdinfo = evt->get_thread_info()->get_fd(new_fd);
|
||||
fdinfo->m_name.clear();
|
||||
fdinfo->m_name_raw.clear();
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "1100/tmp/subdir1/the_file/tmp/subdir1the_file0777subdir1//../the_file");
|
||||
|
||||
evt = NULL;
|
||||
fd = 4;
|
||||
int64_t mountfd = 5;
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPEN_BY_HANDLE_AT_E, 0);
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SYSCALL_OPEN_BY_HANDLE_AT_X, 6, fd, mountfd, PPM_O_RDWR, "/tmp/open_handle.txt", 0, ino);
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "4");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "/tmp/open_handle.txt");
|
||||
ASSERT_EQ(get_field_as_string(evt, "proc.cwd"), "/root/");
|
||||
fdinfo = evt->get_thread_info()->get_fd(fd);
|
||||
fdinfo->m_name.clear();
|
||||
fdinfo->m_name_raw.clear();
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "14/tmp/open_handle.txt/tmpopen_handle.txt0777/tmp/open_handle.txt");
|
||||
}
|
||||
|
||||
TEST_F(sinsp_with_test_input, plugin_anomalydetection_filterchecks_fields_fd_network)
|
||||
{
|
||||
std::shared_ptr<sinsp_plugin> plugin_owner;
|
||||
filter_check_list pl_flist;
|
||||
ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist)
|
||||
add_default_init_thread();
|
||||
|
||||
open_inspector();
|
||||
sinsp_evt* evt = NULL;
|
||||
sinsp_fdinfo* fdinfo = NULL;
|
||||
int64_t client_fd = 8;
|
||||
int64_t return_value = 0;
|
||||
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_SOCKET_E, 3, (uint32_t) PPM_AF_INET, (uint32_t) SOCK_STREAM, (uint32_t) 0);
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_SOCKET_X, 1, client_fd);
|
||||
|
||||
sockaddr_in client = test_utils::fill_sockaddr_in(DEFAULT_CLIENT_PORT, DEFAULT_IPV4_CLIENT_STRING);
|
||||
sockaddr_in server = test_utils::fill_sockaddr_in(DEFAULT_SERVER_PORT, DEFAULT_IPV4_SERVER_STRING);
|
||||
|
||||
std::vector<uint8_t> server_sockaddr = test_utils::pack_sockaddr(reinterpret_cast<sockaddr*>(&server));
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_CONNECT_E, 2, client_fd, scap_const_sized_buffer{server_sockaddr.data(), server_sockaddr.size()});
|
||||
std::vector<uint8_t> socktuple = test_utils::pack_socktuple(reinterpret_cast<sockaddr*>(&client), reinterpret_cast<sockaddr*>(&server));
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_CONNECT_X, 3, return_value, scap_const_sized_buffer{socktuple.data(), socktuple.size()}, client_fd);
|
||||
|
||||
/* We are able to recover the fdinfo in the connect exit event even when interleaved */
|
||||
fdinfo = evt->get_fd_info();
|
||||
ASSERT_NE(fdinfo, nullptr);
|
||||
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.connected"), "true");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "172.40.111.222:54321->142.251.111.147:443");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.rip"), "172.40.111.222");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.lip"), "142.251.111.147");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.cip"), "172.40.111.222");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.sip"), "142.251.111.147");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "8");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "1172.40.111.222:54321142.251.111.147:4438172.40.111.222:54321->142.251.111.147:443");
|
||||
|
||||
client = test_utils::fill_sockaddr_in(DEFAULT_CLIENT_PORT, DEFAULT_IPV4_CLIENT_STRING);
|
||||
std::vector<uint8_t> st = test_utils::pack_socktuple(reinterpret_cast<sockaddr*>(&client), reinterpret_cast<sockaddr*>(&server));
|
||||
|
||||
int64_t new_connected_fd = 6;
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_ACCEPT_5_E, 0);
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_ACCEPT_5_X, 5, new_connected_fd, scap_const_sized_buffer{st.data(), st.size()}, (uint8_t) 0, (uint32_t) 0, (uint32_t) 5);
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.name"), "172.40.111.222:54321->142.251.111.147:443");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "6");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.rip"), "172.40.111.222");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.lip"), "142.251.111.147");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.cip"), "172.40.111.222");
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.sip"), "142.251.111.147");
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "1172.40.111.222:54321142.251.111.147:4436172.40.111.222:54321->142.251.111.147:443");
|
||||
}
|
||||
|
||||
TEST_F(sinsp_with_test_input, plugin_anomalydetection_filterchecks_fields_fd_network_null_fd_table)
|
||||
{
|
||||
std::shared_ptr<sinsp_plugin> plugin_owner;
|
||||
filter_check_list pl_flist;
|
||||
ASSERT_PLUGIN_INITIALIZATION(plugin_owner, pl_flist)
|
||||
add_default_init_thread();
|
||||
|
||||
open_inspector();
|
||||
sinsp_evt* evt = NULL;
|
||||
sinsp_fdinfo* fdinfo = NULL;
|
||||
int64_t client_fd = 8;
|
||||
int64_t return_value = 0;
|
||||
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_SOCKET_E, 3, (uint32_t) PPM_AF_INET, (uint32_t) SOCK_STREAM, (uint32_t) 0);
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_SOCKET_X, 1, client_fd);
|
||||
|
||||
sockaddr_in client = test_utils::fill_sockaddr_in(DEFAULT_CLIENT_PORT, DEFAULT_IPV4_CLIENT_STRING);
|
||||
sockaddr_in server = test_utils::fill_sockaddr_in(DEFAULT_SERVER_PORT, DEFAULT_IPV4_SERVER_STRING);
|
||||
|
||||
std::vector<uint8_t> server_sockaddr = test_utils::pack_sockaddr(reinterpret_cast<sockaddr*>(&server));
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_CONNECT_E, 2, client_fd, scap_const_sized_buffer{server_sockaddr.data(), server_sockaddr.size()});
|
||||
std::vector<uint8_t> socktuple = test_utils::pack_socktuple(reinterpret_cast<sockaddr*>(&client), reinterpret_cast<sockaddr*>(&server));
|
||||
evt = add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_CONNECT_X, 3, return_value, scap_const_sized_buffer{socktuple.data(), socktuple.size()}, client_fd);
|
||||
|
||||
/* We are able to recover the fdinfo in the connect exit event even when interleaved */
|
||||
fdinfo = evt->get_fd_info();
|
||||
fdinfo->m_name.clear();
|
||||
fdinfo->m_name_raw.clear();
|
||||
ASSERT_NE(fdinfo, nullptr);
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "8");
|
||||
// no fallbacks atm
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "18");
|
||||
|
||||
client = test_utils::fill_sockaddr_in(DEFAULT_CLIENT_PORT, DEFAULT_IPV4_CLIENT_STRING);
|
||||
std::vector<uint8_t> st = test_utils::pack_socktuple(reinterpret_cast<sockaddr*>(&client), reinterpret_cast<sockaddr*>(&server));
|
||||
|
||||
int64_t new_connected_fd = 6;
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_ACCEPT_5_E, 0);
|
||||
add_event_advance_ts(increasing_ts(), 1, PPME_SOCKET_ACCEPT_5_X, 5, new_connected_fd, scap_const_sized_buffer{st.data(), st.size()}, (uint8_t) 0, (uint32_t) 0, (uint32_t) 5);
|
||||
fdinfo = evt->get_fd_info();
|
||||
fdinfo->m_name.clear();
|
||||
fdinfo->m_name_raw.clear();
|
||||
ASSERT_EQ(get_field_as_string(evt, "fd.num"), "6");
|
||||
// no fallbacks atm
|
||||
ASSERT_EQ(get_field_as_string(evt, "anomaly.count_min_sketch.profile[1]", pl_flist), "16");
|
||||
}
|
|
@ -1,5 +1,74 @@
|
|||
# Changelog
|
||||
|
||||
## dev build (unreleased)
|
||||
|
||||
* [`a214622`](https://github.com/falcosecurity/plugins/commit/a214622) build(deps): bump github.com/aws/aws-lambda-go in /plugins/cloudtrail
|
||||
|
||||
## v0.13.0
|
||||
|
||||
* [`ecff28f`](https://github.com/falcosecurity/plugins/commit/ecff28f) update(cloudtrail): bump to v0.13.0
|
||||
|
||||
* [`ce4e3fc`](https://github.com/falcosecurity/plugins/commit/ce4e3fc) build(deps): bump github.com/aws/aws-lambda-go in /plugins/cloudtrail
|
||||
|
||||
* [`65c9973`](https://github.com/falcosecurity/plugins/commit/65c9973) chore(cloudtrail): allow SQSOwnerAccount parameter
|
||||
|
||||
* [`ba252e3`](https://github.com/falcosecurity/plugins/commit/ba252e3) update(plugins/cloudtrail): upgrade deps
|
||||
|
||||
|
||||
## v0.12.5
|
||||
|
||||
* [`f2fe57d`](https://github.com/falcosecurity/plugins/commit/f2fe57d) update(plugins/cloudtrail): support pre-ControlTower organization trails
|
||||
|
||||
* [`2ea1083`](https://github.com/falcosecurity/plugins/commit/2ea1083) update(plugins/cloudtrail): upgrade direct deps
|
||||
|
||||
|
||||
## v0.12.4
|
||||
|
||||
* [`9663407`](https://github.com/falcosecurity/plugins/commit/9663407) build(deps): bump github.com/aws/aws-sdk-go-v2 in /plugins/cloudtrail
|
||||
|
||||
* [`f6e5098`](https://github.com/falcosecurity/plugins/commit/f6e5098) update(plugins/cloudtrail): bump to v0.12.3
|
||||
|
||||
* [`56c0599`](https://github.com/falcosecurity/plugins/commit/56c0599) build(deps): bump github.com/invopop/jsonschema in /plugins/cloudtrail
|
||||
|
||||
|
||||
## v0.12.3
|
||||
|
||||
* [`fbd9f48`](https://github.com/falcosecurity/plugins/commit/fbd9f48) update(cloudtrail): Update ct.resources handling
|
||||
|
||||
|
||||
## v0.12.2
|
||||
|
||||
* [`63b7093`](https://github.com/falcosecurity/plugins/commit/63b7093) chore(plugin/cloudtrail): bump cloudtrail version to 0.10.0
|
||||
|
||||
|
||||
## v0.12.1
|
||||
|
||||
* [`c4ed2ca`](https://github.com/falcosecurity/plugins/commit/c4ed2ca) chore(plugins/cloudtrail): update changelog
|
||||
|
||||
* [`d775f53`](https://github.com/falcosecurity/plugins/commit/d775f53) chore(cloudtrail): replace moved package
|
||||
|
||||
* [`f43ca43`](https://github.com/falcosecurity/plugins/commit/f43ca43) chore(cloudtrail): update Go and dependencies
|
||||
|
||||
|
||||
## v0.12.0
|
||||
|
||||
* [`b31948c`](https://github.com/falcosecurity/plugins/commit/b31948c) refactor(cloudtrail): Get S3 keys concurrently
|
||||
|
||||
* [`9920d35`](https://github.com/falcosecurity/plugins/commit/9920d35) feat(cloudtrail): support accounts for org trails
|
||||
|
||||
* [`746ea98`](https://github.com/falcosecurity/plugins/commit/746ea98) feat(cloudtrail): Support for organization trails
|
||||
|
||||
* [`9a1f86a`](https://github.com/falcosecurity/plugins/commit/9a1f86a) feat(cloudtrail): Add generic additionalEventData field
|
||||
|
||||
* [`0e4a687`](https://github.com/falcosecurity/plugins/commit/0e4a687) feat(cloudtrail): Add ct.response and ct.request field
|
||||
|
||||
|
||||
## v0.11.0
|
||||
|
||||
|
||||
## v0.10.0
|
||||
|
||||
|
||||
## v0.9.1
|
||||
|
||||
* [`16306f2`](https://github.com/falcosecurity/plugins/commit/16306f2) update(cloudtrail): bump version to 0.9.1
|
||||
|
|
|
@ -19,52 +19,74 @@ The event source for cloudtrail events is `aws_cloudtrail`.
|
|||
Here is the current set of supported fields:
|
||||
|
||||
<!-- README-PLUGIN-FIELDS -->
|
||||
| NAME | TYPE | ARG | DESCRIPTION |
|
||||
|-------------------------------|----------|------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ct.id` | `string` | None | the unique ID of the cloudtrail event (eventID in the json). |
|
||||
| `ct.error` | `string` | None | The error code from the event. Will be "<NA>" (e.g. the NULL/empty/none value) if there was no error. |
|
||||
| `ct.time` | `string` | None | the timestamp of the cloudtrail event (eventTime in the json). |
|
||||
| `ct.src` | `string` | None | the source of the cloudtrail event (eventSource in the json). |
|
||||
| `ct.shortsrc` | `string` | None | the source of the cloudtrail event (eventSource in the json, without the '.amazonaws.com' trailer). |
|
||||
| `ct.name` | `string` | None | the name of the cloudtrail event (eventName in the json). |
|
||||
| `ct.user` | `string` | None | the user of the cloudtrail event (userIdentity.userName in the json). |
|
||||
| `ct.user.accountid` | `string` | None | the account id of the user of the cloudtrail event. |
|
||||
| `ct.user.identitytype` | `string` | None | the kind of user identity (e.g. Root, IAMUser,AWSService, etc.) |
|
||||
| `ct.user.principalid` | `string` | None | A unique identifier for the user that made the request. |
|
||||
| `ct.user.arn` | `string` | None | the Amazon Resource Name (ARN) of the user that made the request. |
|
||||
| `ct.region` | `string` | None | the region of the cloudtrail event (awsRegion in the json). |
|
||||
| `ct.response.subnetid` | `string` | None | the subnet ID included in the response. |
|
||||
| `ct.response.reservationid` | `string` | None | the reservation ID included in the response. |
|
||||
| `ct.request.availabilityzone` | `string` | None | the availability zone included in the request. |
|
||||
| `ct.request.cluster` | `string` | None | the cluster included in the request. |
|
||||
| `ct.request.functionname` | `string` | None | the function name included in the request. |
|
||||
| `ct.request.groupname` | `string` | None | the group name included in the request. |
|
||||
| `ct.request.host` | `string` | None | the host included in the request |
|
||||
| `ct.request.name` | `string` | None | the name of the entity being acted on in the request. |
|
||||
| `ct.request.policy` | `string` | None | the policy included in the request |
|
||||
| `ct.request.serialnumber` | `string` | None | the serial number provided in the request. |
|
||||
| `ct.request.servicename` | `string` | None | the service name provided in the request. |
|
||||
| `ct.request.subnetid` | `string` | None | the subnet ID provided in the request. |
|
||||
| `ct.request.taskdefinition` | `string` | None | the task definition prrovided in the request. |
|
||||
| `ct.request.username` | `string` | None | the username provided in the request. |
|
||||
| `ct.srcip` | `string` | None | the IP address generating the event (sourceIPAddress in the json). |
|
||||
| `ct.useragent` | `string` | None | the user agent generating the event (userAgent in the json). |
|
||||
| `ct.info` | `string` | None | summary information about the event. This varies depending on the event type and, for some events, it contains event-specific details. |
|
||||
| `ct.managementevent` | `string` | None | 'true' if the event is a management event (AwsApiCall, AwsConsoleAction, AwsConsoleSignIn, or AwsServiceEvent), 'false' otherwise. |
|
||||
| `ct.readonly` | `string` | None | 'true' if the event only reads information (e.g. DescribeInstances), 'false' if the event modifies the state (e.g. RunInstances, CreateLoadBalancer...). |
|
||||
| `s3.uri` | `string` | None | the s3 URI (s3://<bucket>/<key>). |
|
||||
| `s3.bucket` | `string` | None | the bucket name for s3 events. |
|
||||
| `s3.key` | `string` | None | the S3 key name. |
|
||||
| `s3.bytes` | `uint64` | None | the size of an s3 download or upload, in bytes. |
|
||||
| `s3.bytes.in` | `uint64` | None | the size of an s3 upload, in bytes. |
|
||||
| `s3.bytes.out` | `uint64` | None | the size of an s3 download, in bytes. |
|
||||
| `s3.cnt.get` | `uint64` | None | the number of get operations. This field is 1 for GetObject events, 0 otherwise. |
|
||||
| `s3.cnt.put` | `uint64` | None | the number of put operations. This field is 1 for PutObject events, 0 otherwise. |
|
||||
| `s3.cnt.other` | `uint64` | None | the number of non I/O operations. This field is 0 for GetObject and PutObject events, 1 for all the other events. |
|
||||
| `ec2.name` | `string` | None | the name of the ec2 instances, typically stored in the instance tags. |
|
||||
| `ec2.imageid` | `string` | None | the ID for the image used to run the ec2 instance in the response. |
|
||||
| `ecr.repository` | `string` | None | the name of the ecr Repository specified in the request. |
|
||||
| `ecr.imagetag` | `string` | None | the tag of the image specified in the request. |
|
||||
| NAME | TYPE | ARG | DESCRIPTION |
|
||||
|------------------------------------------|----------|------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ct.id` | `string` | None | the unique ID of the cloudtrail event (eventID in the json). |
|
||||
| `ct.error` | `string` | None | The error code from the event. Will be "<NA>" (e.g. the NULL/empty/none value) if there was no error. |
|
||||
| `ct.errormessage` | `string` | None | The description of an error. Will be "<NA>" (e.g. the NULL/empty/none value) if there was no error. |
|
||||
| `ct.time` | `string` | None | the timestamp of the cloudtrail event (eventTime in the json). |
|
||||
| `ct.src` | `string` | None | the source of the cloudtrail event (eventSource in the json). |
|
||||
| `ct.shortsrc` | `string` | None | the source of the cloudtrail event (eventSource in the json, without the '.amazonaws.com' trailer). |
|
||||
| `ct.name` | `string` | None | the name of the cloudtrail event (eventName in the json). |
|
||||
| `ct.user` | `string` | None | the user of the cloudtrail event (userIdentity.userName in the json). |
|
||||
| `ct.user.accountid` | `string` | None | the account id of the user of the cloudtrail event. |
|
||||
| `ct.user.identitytype` | `string` | None | the kind of user identity (e.g. Root, IAMUser,AWSService, etc.) |
|
||||
| `ct.user.principalid` | `string` | None | A unique identifier for the user that made the request. |
|
||||
| `ct.user.arn` | `string` | None | the Amazon Resource Name (ARN) of the user that made the request. |
|
||||
| `ct.region` | `string` | None | the region of the cloudtrail event (awsRegion in the json). |
|
||||
| `ct.response.subnetid` | `string` | None | the subnet ID included in the response. |
|
||||
| `ct.response.reservationid` | `string` | None | the reservation ID included in the response. |
|
||||
| `ct.response` | `string` | None | All response elements. |
|
||||
| `ct.request.availabilityzone` | `string` | None | the availability zone included in the request. |
|
||||
| `ct.request.cluster` | `string` | None | the cluster included in the request. |
|
||||
| `ct.request.functionname` | `string` | None | the function name included in the request. |
|
||||
| `ct.request.groupname` | `string` | None | the group name included in the request. |
|
||||
| `ct.request.host` | `string` | None | the host included in the request |
|
||||
| `ct.request.name` | `string` | None | the name of the entity being acted on in the request. |
|
||||
| `ct.request.policy` | `string` | None | the policy included in the request |
|
||||
| `ct.request.serialnumber` | `string` | None | the serial number provided in the request. |
|
||||
| `ct.request.servicename` | `string` | None | the service name provided in the request. |
|
||||
| `ct.request.subnetid` | `string` | None | the subnet ID provided in the request. |
|
||||
| `ct.request.taskdefinition` | `string` | None | the task definition prrovided in the request. |
|
||||
| `ct.request.username` | `string` | None | the username provided in the request. |
|
||||
| `ct.request` | `string` | None | All request parameters. |
|
||||
| `ct.srcip` | `string` | None | the IP address generating the event (sourceIPAddress in the json). |
|
||||
| `ct.useragent` | `string` | None | the user agent generating the event (userAgent in the json). |
|
||||
| `ct.info` | `string` | None | summary information about the event. This varies depending on the event type and, for some events, it contains event-specific details. |
|
||||
| `ct.managementevent` | `string` | None | 'true' if the event is a management event (AwsApiCall, AwsConsoleAction, AwsConsoleSignIn, or AwsServiceEvent), 'false' otherwise. |
|
||||
| `ct.readonly` | `string` | None | 'true' if the event only reads information (e.g. DescribeInstances), 'false' if the event modifies the state (e.g. RunInstances, CreateLoadBalancer...). |
|
||||
| `ct.requestid` | `string` | None | The value that identifies the request. |
|
||||
| `ct.eventtype` | `string` | None | Identifies the type of event that generated the event record. |
|
||||
| `ct.apiversion` | `string` | None | The API version associated with the AwsApiCall eventType value. |
|
||||
| `ct.resources` | `string` | None | A list of resources accessed in the event. |
|
||||
| `ct.recipientaccountid` | `string` | None | The account ID that received this event. |
|
||||
| `ct.serviceeventdetails` | `string` | None | Identifies the service event, including what triggered the event and the result. |
|
||||
| `ct.sharedeventid` | `string` | None | GUID generated by CloudTrail to uniquely identify CloudTrail events. |
|
||||
| `ct.vpcendpointid` | `string` | None | Identifies the VPC endpoint in which requests were made. |
|
||||
| `ct.eventcategory` | `string` | None | Shows the event category that is used in LookupEvents calls. |
|
||||
| `ct.addendum.reason` | `string` | None | The reason that the event or some of its contents were missing. |
|
||||
| `ct.addendum.updatedfields` | `string` | None | The event record fields that are updated by the addendum. |
|
||||
| `ct.addendum.originalrequestid` | `string` | None | The original unique ID of the request. |
|
||||
| `ct.addendum.originaleventid` | `string` | None | The original event ID. |
|
||||
| `ct.sessioncredentialfromconsole` | `string` | None | Shows whether or not an event originated from an AWS Management Console session. |
|
||||
| `ct.edgedevicedetails` | `string` | None | Information about edge devices that are targets of a request. |
|
||||
| `ct.tlsdetails.tlsversion` | `string` | None | The TLS version of a request. |
|
||||
| `ct.tlsdetails.ciphersuite` | `string` | None | The cipher suite (combination of security algorithms used) of a request. |
|
||||
| `ct.tlsdetails.clientprovidedhostheader` | `string` | None | The client-provided host name used in the service API call. |
|
||||
| `ct.additionaleventdata` | `string` | None | All additional event data attributes. |
|
||||
| `s3.uri` | `string` | None | the s3 URI (s3://<bucket>/<key>). |
|
||||
| `s3.bucket` | `string` | None | the bucket name for s3 events. |
|
||||
| `s3.key` | `string` | None | the S3 key name. |
|
||||
| `s3.bytes` | `uint64` | None | the size of an s3 download or upload, in bytes. |
|
||||
| `s3.bytes.in` | `uint64` | None | the size of an s3 upload, in bytes. |
|
||||
| `s3.bytes.out` | `uint64` | None | the size of an s3 download, in bytes. |
|
||||
| `s3.cnt.get` | `uint64` | None | the number of get operations. This field is 1 for GetObject events, 0 otherwise. |
|
||||
| `s3.cnt.put` | `uint64` | None | the number of put operations. This field is 1 for PutObject events, 0 otherwise. |
|
||||
| `s3.cnt.other` | `uint64` | None | the number of non I/O operations. This field is 0 for GetObject and PutObject events, 1 for all the other events. |
|
||||
| `ec2.name` | `string` | None | the name of the ec2 instances, typically stored in the instance tags. |
|
||||
| `ec2.imageid` | `string` | None | the ID for the image used to run the ec2 instance in the response. |
|
||||
| `ecr.repository` | `string` | None | the name of the ecr Repository specified in the request. |
|
||||
| `ecr.imagetag` | `string` | None | the tag of the image specified in the request. |
|
||||
<!-- /README-PLUGIN-FIELDS -->
|
||||
|
||||
## Handling AWS Authentication
|
||||
|
@ -108,6 +130,8 @@ The json object has the following properties:
|
|||
* `s3DownloadConcurrency`: value is numeric. Controls the number of background goroutines used to download S3 files. (Default: 1)
|
||||
* `S3Interval`: value is string. Download log files matching the specified time interval. Note that this matches log file *names*, not event timestamps. CloudTrail logs usually cover [the previous 5 minutes of activity](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/get-and-view-cloudtrail-log-files.html). See *Time Intervals* below for possible formats.
|
||||
* `useS3SNS`: value is boolean. If true, then the plugin will expect SNS messages to originate from S3 instead of directly from Cloudtrail (Default: false)
|
||||
* `S3AccountList`: value is string. Download log files matching the specified account IDs (in a comma separated list) in an organization trail. See *Read From S3 Bucket Directly* below for more details.
|
||||
* `SQSOwnerAccount`: value is string. The AWS account ID that owns the SQS queue in case the queue is owned by a different account. Not required by default.
|
||||
|
||||
The init string can be the empty string, which is treated identically to `{}`.
|
||||
|
||||
|
@ -143,10 +167,16 @@ All objects below the bucket, or below the bucket + prefix, will be considered c
|
|||
|
||||
For example, if a bucket `my-s3-bucket` contained cloudtrail logs below a prefix `AWSLogs/411571310278/CloudTrail/us-west-1/2021/09/23/`, Using an open params of `s3://my-s3-bucket/AWSLogs/411571310278/CloudTrail/us-west-1/2021/09/23/` would configure the plugin to read all files below `AWSLogs/411571310278/CloudTrail/us-west-1/2021/09/23/` as cloudtrail logs and then return EOF. No other files in the bucket will be read.
|
||||
|
||||
For organization trails the files are normally stored like `s3://bucket_name/prefix_name/AWSLogs/O-ID/Account ID/CloudTrail/Region/YYYY/MM/DD/file_name.json.gz`. Using an open parameter of `s3//my-s3-bucket/AWSLogs/o-123abc/` would configure the plugin to read all files for all account IDs in the organization `o-123abc`, for all regions and the entire retention time. Therefore it makes sense to combine this open parameter with `S3AccountList` and `S3Interval` parameters. `S3AccountList` is a comma separated string with account IDs to query.
|
||||
|
||||
Setting `S3AccountList` to `012345678912,987654321012` and `S3Interval` to `3d-1d` with open parameter `s3://my-s3-bucket/AWSLogs/o-123abc/` would get all events for account IDs 12345678912 and 987654321012 for all regions from 3 days ago up to to 1 day ago.
|
||||
|
||||
#### Read from SQS Queue
|
||||
|
||||
When using `sqs://<SQS Queue Name>`, the plugin will read messages from the provided SQS Queue. The messages are assumed to be [SNS Notifications](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/configure-sns-notifications-for-cloudtrail.html) that announce the presence of new Cloudtrail log files in a S3 bucket. Each new file will be read from the provided s3 bucket.
|
||||
|
||||
In case the queue is owned by another AWS account, use the `SQSOwnerAccount` parameter to specify the account ID of the queue's owner. Note that the queue owner must grant you the necessary permissions to access the queue.
|
||||
|
||||
In this mode, the plugin polls the queue forever, waiting for new log files.
|
||||
|
||||
#### Read single file
|
||||
|
|
|
@ -1,16 +1,38 @@
|
|||
module github.com/falcosecurity/plugins/plugins/cloudtrail
|
||||
|
||||
go 1.15
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b
|
||||
github.com/aws/aws-lambda-go v1.34.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.16
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.7
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.18.5
|
||||
github.com/aws/smithy-go v1.13.3
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/aws/aws-lambda-go v1.49.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.4
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.16
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.79
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.2
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.7
|
||||
github.com/aws/smithy-go v1.22.3
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5
|
||||
github.com/invopop/jsonschema v0.13.0
|
||||
github.com/valyala/fastjson v1.6.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.69 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.21 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
@ -1,88 +1,78 @@
|
|||
github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b h1:doCpXjVwui6HUN+xgNsNS3SZ0/jUZ68Eb+mJRNOZfog=
|
||||
github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60=
|
||||
github.com/aws/aws-lambda-go v1.34.1 h1:M3a/uFYBjii+tDcOJ0wL/WyFi2550FHoECdPf27zvOs=
|
||||
github.com/aws/aws-lambda-go v1.34.1/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 h1:tcFliCWne+zOuUfKNRn8JdFBuWPDuISDH08wD2ULkhk=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8/go.mod h1:JTnlBSot91steJeti4ryyu/tLd4Sk84O5W22L7O2EQU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.7 h1:odVM52tFHhpqZBKNjVW5h+Zt1tKHbhdTQRb+0WHrNtw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.7/go.mod h1:dN2gja/QXxFF15hQreyrqYhLBaQo1d9ZKe/v/uplQoI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.20 h1:9+ZhlDY7N9dPnUmf7CDfW9In4sW5Ff3bh7oy4DzS1IE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.20/go.mod h1:UKY5HyIux08bbNA7Blv4PcXQ8cTkGh7ghHMFklaviR4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33 h1:fAoVmNGhir6BR+RU0/EI+6+D7abM+MCwWf8v4ip5jNI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33/go.mod h1:84XgODVR8uRhmOnUkKGUZKqIMxmjmLOR8Uyp7G/TPwc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 h1:ZSIPAkAsCCjYrhqfw2+lNzWDzxzHXEckFkTePL5RSWQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14/go.mod h1:AyGgqiKv9ECM6IZeNQtdT8NnMvUb3/2wokeq2Fgryto=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 h1:Lh1AShsuIJTwMkoxVCAYPJgNG5H+eN6SmoUn8nOZ5wE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9/go.mod h1:a9j48l6yL5XINLHLcOKInjdvknN+vWqPBxqeIDw7ktw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 h1:BBYoNQt2kUZUUK4bIPsKrCcjVPUMNsgQpNAwhznK/zo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18/go.mod h1:NS55eQ4YixUJPTC+INxi2/jCqe1y2Uw3rnh9wEOVJxY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 h1:HfVVR1vItaG6le+Bpw6P4midjBDMKnjMyZnw9MXYUcE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17/go.mod h1:YqMdV+gEKCQ59NrB7rzrJdALeBIsYiVi8Inj3+KcqHI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11 h1:3/gm/JTX9bX8CpzTgIlrtYpB3EVBDxyg/GY/QdcIEZw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11/go.mod h1:fmgDANqTUCxciViKl9hb/zD5LFbvPINFRgWhDbR+vZo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.18.5 h1:Nt1QV0zSgC9WNbcRIgHeYIgFtuuEzijKGYEeB8Xa/zY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.18.5/go.mod h1:UCrTk+1stZ/o3VdJVUhtRIMiU99MY+bKNK8lNtySonQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 h1:GUnZ62TevLqIoDyHeiWj2P7EqaosgakBKVvWriIdLQY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM=
|
||||
github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
|
||||
github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA=
|
||||
github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-lambda-go v1.49.0 h1:z4VhTqkFZPM3xpEtTqWqRqsRH4TZBMJqTkRiBPYLqIQ=
|
||||
github.com/aws/aws-lambda-go v1.49.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.4 h1:GySzjhVvx0ERP6eyfAbAuAXLtAda5TEy19E5q5W8I9E=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.4/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.16 h1:XkruGnXX1nEZ+Nyo9v84TzsX+nj86icbFAeust6uo8A=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.16/go.mod h1:uCW7PNjGwZ5cOGZ5jr8vCWrYkGIhPoTNV23Q/tpHKzg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.69 h1:8B8ZQboRc3uaIKjshve/XlvJ570R7BKNy3gftSbS178=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.69/go.mod h1:gPME6I8grR1jCqBFEGthULiolzf/Sexq/Wy42ibKK9c=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.31 h1:oQWSGexYasNpYp4epLGZxxjsDo8BMBh6iNWkTXQvkwk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.31/go.mod h1:nc332eGUU+djP3vrMI6blS0woaCfHTe3KiSQUVTMRq0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.79 h1:mGo6WGWry+s5GEf2GLfw3zkHad109FQmtvBV3VYQ8mA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.79/go.mod h1:siwnpWxHYFSSge7Euw9lGMgQBgvRyym352mCuGNHsMQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.35 h1:o1v1VFfPcDVlK3ll1L5xHsaQAFdNtZ5GXnNR7SwueC4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.35/go.mod h1:rZUQNYMNG+8uZxz9FOerQJ+FceCiodXvixpeRtdESrU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.35 h1:R5b82ubO2NntENm3SAm0ADME+H630HomNJdgv+yZ3xw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.35/go.mod h1:FuA+nmgMRfkzVKYDNEqQadvEMxtxl9+RLT9ribCwEMs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.35 h1:th/m+Q18CkajTw1iqx2cKkLCij/uz8NMwJFPK91p2ug=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.35/go.mod h1:dkJuf0a1Bc8HAA0Zm2MoTGm/WDC18Td9vSbrQ1+VqE8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.3 h1:VHPZakq2L7w+RLzV54LmQavbvheFaR2u1NomJRSEfcU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.3/go.mod h1:DX1e/lkbsAt0MkY3NgLYuH4jQvRfw8MYxTe9feR7aXM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.16 h1:/ldKrPPXTC421bTNWrUIpq3CxwHwRI/kpc+jPUTJocM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.16/go.mod h1:5vkf/Ws0/wgIMJDQbjI4p2op86hNW6Hie5QtebrDgT8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.16 h1:2HuI7vWKhFWsBhIr2Zq8KfFZT6xqaId2XXnXZjkbEuc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.16/go.mod h1:BrwWnsfbFtFeRjdx0iM1ymvlqDX1Oz68JsQaibX/wG8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.2 h1:T6Wu+8E2LeTUqzqQ/Bh1EoFNj1u4jUyveMgmTlu9fDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.2/go.mod h1:chSY8zfqmS0OnhZoO/hpPx/BHfAIL80m77HwhRLYScY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.7 h1:hbOlzaZYwfKhLss4XhjtcEQkVCI6BnzzYF+Wrlhtv/w=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.7/go.mod h1:cSnwA6RKvtcl0f7ORIrOdSVV6XQmdAHUDAxuQRGF/kw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.4 h1:EU58LP8ozQDVroOEyAfcq0cGc5R/FTZjVoYJ6tvby3w=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.4/go.mod h1:CrtOgCcysxMvrCoHnvNAD7PHWclmoFG78Q2xLK0KKcs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.2 h1:XB4z0hbQtpmBnb1FQYvKaCM7UsS6Y/u8jVBwIUGeCTk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.2/go.mod h1:hwRpqkRxnQ58J9blRDrB4IanlXCpcKmsC83EhG77upg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.21 h1:nyLjs8sYJShFYj6aiyjCBI3EcLn1udWrQTjEF+SOXB0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.21/go.mod h1:EhdxtZ+g84MSGrSrHzZiUm9PYiZkrADNja15wtRJSJo=
|
||||
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
|
||||
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3 h1:nmlBUmeAgEhcEHhSDWeEYgD9WdiHR9uMWyog5Iv7GIA=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.3/go.mod h1:NP+y22DYOS+G3GDXIXNmzf0CBL3nfPPMoQuHvAzfitQ=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk=
|
||||
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5 h1:ke/+kTt0PwedM8+IGTKcW3LrUI/xiJNDCSzqTSW+CvI=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5/go.mod h1:NP+y22DYOS+G3GDXIXNmzf0CBL3nfPPMoQuHvAzfitQ=
|
||||
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
|
||||
github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc=
|
||||
github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
|
||||
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2023 The Falco Authors.
|
||||
Copyright (C) 2025 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -29,16 +29,16 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/alecthomas/jsonschema"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins/source"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/symbols/extract"
|
||||
_ "github.com/falcosecurity/plugin-sdk-go/pkg/sdk/symbols/progress"
|
||||
"github.com/invopop/jsonschema"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
|
@ -48,7 +48,7 @@ const (
|
|||
PluginName = "cloudtrail"
|
||||
PluginDescription = "reads cloudtrail JSON data saved to file in the directory specified in the settings"
|
||||
PluginContact = "github.com/falcosecurity/plugins/"
|
||||
PluginVersion = "0.10.0"
|
||||
PluginVersion = "0.13.0"
|
||||
PluginEventSource = "aws_cloudtrail"
|
||||
)
|
||||
|
||||
|
@ -155,7 +155,7 @@ func (p *Plugin) String(evt sdk.EventReader) (string, error) {
|
|||
var user string
|
||||
var err error
|
||||
|
||||
data, err := ioutil.ReadAll(evt.Reader())
|
||||
data, err := io.ReadAll(evt.Reader())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ func (p *Plugin) Extract(req sdk.ExtractRequest, evt sdk.EventReader) error {
|
|||
// Decode the json, but only if we haven't done it yet for this event
|
||||
if evt.EventNum() != p.jdataEvtnum {
|
||||
// Read the event data
|
||||
data, err := ioutil.ReadAll(evt.Reader())
|
||||
data, err := io.ReadAll(evt.Reader())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ type PluginConfig struct {
|
|||
SQSDelete bool `json:"sqsDelete" jsonschema:"title=Delete SQS messages,description=If true then the plugin will delete SQS messages from the queue immediately after receiving them (Default: true),default=true"`
|
||||
UseAsync bool `json:"useAsync" jsonschema:"title=Use async extraction,description=If true then async extraction optimization is enabled (Default: true),default=true"`
|
||||
UseS3SNS bool `json:"useS3SNS" jsonschema:"title=Use S3 SNS,description=If true then the plugin will expect SNS messages to originate from S3 instead of directly from Cloudtrail (Default: false),default=false"`
|
||||
S3AccountList string `json:"s3AccountList" jsonschema:"title=S3 account list,description=A comma separated list of account IDs for organizational Cloudtrails (Default: no account IDs),default="`
|
||||
SQSOwnerAccount string `json:"sqsOwnerAccount" jsonschema:"title=SQS owner account,description=The AWS account ID that owns the SQS queue in case the queue is owned by a different account (Default: no account ID),default="`
|
||||
AWS PluginConfigAWS `json:"aws"`
|
||||
}
|
||||
|
||||
|
@ -34,5 +36,7 @@ func (p *PluginConfig) Reset() {
|
|||
p.S3Interval = ""
|
||||
p.UseAsync = true
|
||||
p.UseS3SNS = false
|
||||
p.S3AccountList = ""
|
||||
p.SQSOwnerAccount = ""
|
||||
p.AWS.Reset()
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
var supportedFields = []sdk.FieldEntry{
|
||||
{Type: "string", Name: "ct.id", Display: "Event ID", Desc: "the unique ID of the cloudtrail event (eventID in the json)."},
|
||||
{Type: "string", Name: "ct.error", Display: "Error Code", Desc: "The error code from the event. Will be \"<NA>\" (e.g. the NULL/empty/none value) if there was no error."},
|
||||
{Type: "string", Name: "ct.errormessage", Display: "Error Message", Desc: "The description of an error. Will be \"<NA>\" (e.g. the NULL/empty/none value) if there was no error."},
|
||||
{Type: "string", Name: "ct.time", Display: "Timestamp", Desc: "the timestamp of the cloudtrail event (eventTime in the json).", Properties: []string{"hidden"}},
|
||||
{Type: "string", Name: "ct.src", Display: "AWS Service", Desc: "the source of the cloudtrail event (eventSource in the json)."},
|
||||
{Type: "string", Name: "ct.shortsrc", Display: "AWS Service", Desc: "the source of the cloudtrail event (eventSource in the json, without the '.amazonaws.com' trailer)."},
|
||||
|
@ -45,6 +46,7 @@ var supportedFields = []sdk.FieldEntry{
|
|||
{Type: "string", Name: "ct.region", Display: "Region", Desc: "the region of the cloudtrail event (awsRegion in the json)."},
|
||||
{Type: "string", Name: "ct.response.subnetid", Display: "Response Subnet ID", Desc: "the subnet ID included in the response."},
|
||||
{Type: "string", Name: "ct.response.reservationid", Display: "Response Reservation ID", Desc: "the reservation ID included in the response."},
|
||||
{Type: "string", Name: "ct.response", Display: "Response Elements", Desc: "All response elements."},
|
||||
{Type: "string", Name: "ct.request.availabilityzone", Display: "Request Availability Zone", Desc: "the availability zone included in the request."},
|
||||
{Type: "string", Name: "ct.request.cluster", Display: "Request Cluster", Desc: "the cluster included in the request."},
|
||||
{Type: "string", Name: "ct.request.functionname", Display: "Request Function Name", Desc: "the function name included in the request."},
|
||||
|
@ -57,11 +59,31 @@ var supportedFields = []sdk.FieldEntry{
|
|||
{Type: "string", Name: "ct.request.subnetid", Display: "Request Subnet ID", Desc: "the subnet ID provided in the request."},
|
||||
{Type: "string", Name: "ct.request.taskdefinition", Display: "Request Task Definition", Desc: "the task definition prrovided in the request."},
|
||||
{Type: "string", Name: "ct.request.username", Display: "Request User Name", Desc: "the username provided in the request."},
|
||||
{Type: "string", Name: "ct.request", Display: "Request Parameters", Desc: "All request parameters."},
|
||||
{Type: "string", Name: "ct.srcip", Display: "Source IP", Desc: "the IP address generating the event (sourceIPAddress in the json).", Properties: []string{"conversation"}},
|
||||
{Type: "string", Name: "ct.useragent", Display: "User Agent", Desc: "the user agent generating the event (userAgent in the json)."},
|
||||
{Type: "string", Name: "ct.info", Display: "Info", Desc: "summary information about the event. This varies depending on the event type and, for some events, it contains event-specific details.", Properties: []string{"info"}},
|
||||
{Type: "string", Name: "ct.managementevent", Display: "Management Event", Desc: "'true' if the event is a management event (AwsApiCall, AwsConsoleAction, AwsConsoleSignIn, or AwsServiceEvent), 'false' otherwise."},
|
||||
{Type: "string", Name: "ct.readonly", Display: "Read Only", Desc: "'true' if the event only reads information (e.g. DescribeInstances), 'false' if the event modifies the state (e.g. RunInstances, CreateLoadBalancer...)."},
|
||||
{Type: "string", Name: "ct.requestid", Display: "Request ID", Desc: "The value that identifies the request."},
|
||||
{Type: "string", Name: "ct.eventtype", Display: "Event Type", Desc: "Identifies the type of event that generated the event record."},
|
||||
{Type: "string", Name: "ct.apiversion", Display: "API Version", Desc: "The API version associated with the AwsApiCall eventType value."},
|
||||
{Type: "string", Name: "ct.resources", Display: "Resources", Desc: "A list of resources accessed in the event."},
|
||||
{Type: "string", Name: "ct.recipientaccountid", Display: "Recipient Account Id", Desc: "The account ID that received this event."},
|
||||
{Type: "string", Name: "ct.serviceeventdetails", Display: "Service Event Details", Desc: "Identifies the service event, including what triggered the event and the result."},
|
||||
{Type: "string", Name: "ct.sharedeventid", Display: "Shared Event ID", Desc: "GUID generated by CloudTrail to uniquely identify CloudTrail events."},
|
||||
{Type: "string", Name: "ct.vpcendpointid", Display: "VPC Endpoint ID", Desc: "Identifies the VPC endpoint in which requests were made."},
|
||||
{Type: "string", Name: "ct.eventcategory", Display: "Event Category", Desc: "Shows the event category that is used in LookupEvents calls."},
|
||||
{Type: "string", Name: "ct.addendum.reason", Display: "Reason", Desc: "The reason that the event or some of its contents were missing."},
|
||||
{Type: "string", Name: "ct.addendum.updatedfields", Display: "Updated Fields", Desc: "The event record fields that are updated by the addendum."},
|
||||
{Type: "string", Name: "ct.addendum.originalrequestid", Display: "Original Request ID", Desc: "The original unique ID of the request."},
|
||||
{Type: "string", Name: "ct.addendum.originaleventid", Display: "Original Event ID", Desc: "The original event ID."},
|
||||
{Type: "string", Name: "ct.sessioncredentialfromconsole", Display: "Session Credential From Console", Desc: "Shows whether or not an event originated from an AWS Management Console session."},
|
||||
{Type: "string", Name: "ct.edgedevicedetails", Display: "Edge Device Details", Desc: "Information about edge devices that are targets of a request."},
|
||||
{Type: "string", Name: "ct.tlsdetails.tlsversion", Display: "TLS Version", Desc: "The TLS version of a request."},
|
||||
{Type: "string", Name: "ct.tlsdetails.ciphersuite", Display: "TLS Cipher Suite", Desc: "The cipher suite (combination of security algorithms used) of a request."},
|
||||
{Type: "string", Name: "ct.tlsdetails.clientprovidedhostheader", Display: "Client Provided Host Header", Desc: "The client-provided host name used in the service API call."},
|
||||
{Type: "string", Name: "ct.additionaleventdata", Display: "Additional Event Data", Desc: "All additional event data attributes."},
|
||||
{Type: "string", Name: "s3.uri", Display: "Key URI", Desc: "the s3 URI (s3://<bucket>/<key>).", Properties: []string{"conversation"}},
|
||||
{Type: "string", Name: "s3.bucket", Display: "Bucket Name", Desc: "the bucket name for s3 events.", Properties: []string{"conversation"}},
|
||||
{Type: "string", Name: "s3.key", Display: "Key Name", Desc: "the S3 key name."},
|
||||
|
@ -153,7 +175,7 @@ func getEvtInfo(jdata *fastjson.Value) string {
|
|||
return "<invalid cloudtrail event: eventName field missing>"
|
||||
}
|
||||
|
||||
if (evtuser == evtsrcip) {
|
||||
if evtuser == evtsrcip {
|
||||
info = fmt.Sprintf("%v %v%v %v", evtuser, errsymbol, rwsymbol, evtname)
|
||||
} else {
|
||||
info = fmt.Sprintf("%v via %v %v%v %v", evtuser, evtsrcip, errsymbol, rwsymbol, evtname)
|
||||
|
@ -226,6 +248,13 @@ func getfieldStr(jdata *fastjson.Value, field string) (bool, string) {
|
|||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.errormessage":
|
||||
val := jdata.GetStringBytes("errorMessage")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.time":
|
||||
val := jdata.GetStringBytes("eventTime")
|
||||
if val == nil {
|
||||
|
@ -321,6 +350,13 @@ func getfieldStr(jdata *fastjson.Value, field string) (bool, string) {
|
|||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.response":
|
||||
val := jdata.Get("responseElements")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val.MarshalTo(nil))
|
||||
}
|
||||
case "ct.request.availabilityzone":
|
||||
val := jdata.GetStringBytes("requestParameters", "availabilityZone")
|
||||
if val == nil {
|
||||
|
@ -405,6 +441,13 @@ func getfieldStr(jdata *fastjson.Value, field string) (bool, string) {
|
|||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.request":
|
||||
val := jdata.Get("requestParameters")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val.MarshalTo(nil))
|
||||
}
|
||||
case "ct.srcip":
|
||||
val := jdata.GetStringBytes("sourceIPAddress")
|
||||
if val == nil {
|
||||
|
@ -465,27 +508,173 @@ func getfieldStr(jdata *fastjson.Value, field string) (bool, string) {
|
|||
res = "false"
|
||||
}
|
||||
}
|
||||
case "s3.bucket":
|
||||
val := jdata.GetStringBytes("requestParameters", "bucketName")
|
||||
case "ct.requestid":
|
||||
val := jdata.GetStringBytes("requestID")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.eventtype":
|
||||
val := jdata.GetStringBytes("eventType")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.apiversion":
|
||||
val := jdata.GetStringBytes("apiVersion")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.resources":
|
||||
var resources string = ""
|
||||
rlist := jdata.GetArray("resources")
|
||||
if rlist == nil || len(rlist) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
for _, resource := range rlist {
|
||||
resources += string(resource.MarshalTo(nil))
|
||||
resources += ","
|
||||
}
|
||||
resources = strings.TrimSuffix(resources, ",")
|
||||
if resources == "" {
|
||||
return false, ""
|
||||
}
|
||||
res = resources
|
||||
case "ct.recipientaccountid":
|
||||
val := jdata.GetStringBytes("recipientAccountId")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.serviceeventdetails":
|
||||
val := jdata.GetStringBytes("serviceEventDetails")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.sharedeventid":
|
||||
val := jdata.GetStringBytes("sharedEventID")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.vpcendpointid":
|
||||
val := jdata.GetStringBytes("vpcEndpointId")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.eventcategory":
|
||||
val := jdata.GetStringBytes("eventCategory")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.addendum.reason":
|
||||
val := jdata.GetStringBytes("addendum", "reason")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.addendum.updatedfields":
|
||||
val := jdata.GetStringBytes("addendum", "updatedFields")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.addendum.originalrequestid":
|
||||
val := jdata.GetStringBytes("addendum", "originalRequestID")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.addendum.originaleventid":
|
||||
val := jdata.GetStringBytes("addendum", "originalEventID")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.sessioncredentialfromconsole":
|
||||
scc := jdata.GetBool("sessionCredentialFromConsole")
|
||||
if scc {
|
||||
res = "true"
|
||||
} else {
|
||||
res = "false"
|
||||
}
|
||||
case "ct.edgedevicedetails":
|
||||
val := jdata.GetStringBytes("edgeDeviceDetails")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.tlsdetails.tlsversion":
|
||||
val := jdata.GetStringBytes("tlsDetails", "tlsVersion")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.tlsdetails.ciphersuite":
|
||||
val := jdata.GetStringBytes("tlsDetails", "cipherSuite")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.tlsdetails.clientprovidedhostheader":
|
||||
val := jdata.GetStringBytes("tlsDetails", "clientProvidedHostHeader")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
} else {
|
||||
res = string(val)
|
||||
}
|
||||
case "ct.additionaleventdata":
|
||||
val := jdata.Get("additionalEventData")
|
||||
if val == nil {
|
||||
return false, ""
|
||||
}
|
||||
res = string(val.MarshalTo(nil))
|
||||
case "s3.bucket":
|
||||
val := jdata.GetStringBytes("requestParameters", "bucketName")
|
||||
|
||||
if val == nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
res = string(val)
|
||||
case "s3.key":
|
||||
val := jdata.GetStringBytes("requestParameters", "key")
|
||||
|
||||
if val == nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
res = string(val)
|
||||
case "s3.uri":
|
||||
sbucket := jdata.GetStringBytes("requestParameters", "bucketName")
|
||||
if sbucket == nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
skey := jdata.GetStringBytes("requestParameters", "key")
|
||||
if skey == nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
res = fmt.Sprintf("s3://%s/%s", sbucket, skey)
|
||||
case "ec2.name":
|
||||
var iname string = ""
|
||||
|
@ -576,7 +765,7 @@ func getfieldU64(jdata *fastjson.Value, field string) (bool, uint64) {
|
|||
if in != nil {
|
||||
tot = tot + getvalueU64(in)
|
||||
}
|
||||
return (in != nil), tot
|
||||
return in != nil, tot
|
||||
case "s3.bytes.out":
|
||||
var tot uint64 = 0
|
||||
out := jdata.Get("additionalEventData", "bytesTransferredOut")
|
||||
|
@ -597,9 +786,9 @@ func getfieldU64(jdata *fastjson.Value, field string) (bool, uint64) {
|
|||
case "s3.cnt.other":
|
||||
ename := string(jdata.GetStringBytes("eventName"))
|
||||
if ename == "GetObject" || ename == "PutObject" {
|
||||
return true, 1
|
||||
return false, 0
|
||||
}
|
||||
return false, 0
|
||||
return true, 1
|
||||
default:
|
||||
return false, 0
|
||||
}
|
||||
|
|
|
@ -53,6 +53,12 @@ const (
|
|||
sqsMode
|
||||
)
|
||||
|
||||
type listOrigin struct {
|
||||
prefix *string
|
||||
startAfter *string
|
||||
}
|
||||
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
isCompressed bool
|
||||
|
@ -93,6 +99,8 @@ type PluginInstance struct {
|
|||
nextJParser fastjson.Parser
|
||||
}
|
||||
|
||||
var dlErrChan chan error
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
|
@ -153,9 +161,77 @@ func (p *PluginInstance) initS3() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func chunkListOrigin(orgList []listOrigin, chunkSize int) [][]listOrigin {
|
||||
if (len(orgList) == 0 || chunkSize < 1) {
|
||||
return nil
|
||||
}
|
||||
divided := make([][]listOrigin, (len(orgList)+chunkSize-1)/chunkSize)
|
||||
prev := 0
|
||||
i := 0
|
||||
till := len(orgList) - chunkSize
|
||||
for prev < till {
|
||||
next := prev + chunkSize
|
||||
divided[i] = orgList[prev:next]
|
||||
prev = next
|
||||
i++
|
||||
}
|
||||
divided[i] = orgList[prev:]
|
||||
return divided
|
||||
}
|
||||
|
||||
func (oCtx *PluginInstance) listKeys(params listOrigin, startTS string, endTS string) error {
|
||||
defer oCtx.s3.DownloadWg.Done()
|
||||
|
||||
ctx := context.Background()
|
||||
// Fetch the list of keys
|
||||
paginator := s3.NewListObjectsV2Paginator(oCtx.s3.client, &s3.ListObjectsV2Input{
|
||||
Bucket: &oCtx.s3.bucket,
|
||||
Prefix: params.prefix,
|
||||
StartAfter: params.startAfter,
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
dlErrChan <- err
|
||||
return nil
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
path := obj.Key
|
||||
|
||||
filepathRE := regexp.MustCompile(`.*_CloudTrail_[^_]+_([^_]+)Z_`)
|
||||
if startTS != "" {
|
||||
matches := filepathRE.FindStringSubmatch(*path)
|
||||
if matches != nil {
|
||||
pathTS := matches[1]
|
||||
if pathTS < startTS {
|
||||
continue
|
||||
}
|
||||
if endTS != "" && pathTS > endTS {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isCompressed := strings.HasSuffix(*path, ".json.gz")
|
||||
if filepath.Ext(*path) != ".json" && !isCompressed {
|
||||
continue
|
||||
}
|
||||
|
||||
var fi fileInfo = fileInfo{name: *path, isCompressed: isCompressed}
|
||||
oCtx.files = append(oCtx.files, fi)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (oCtx *PluginInstance) openS3(input string) error {
|
||||
oCtx.openMode = s3Mode
|
||||
|
||||
if oCtx.config.S3DownloadConcurrency < 1 {
|
||||
return fmt.Errorf(PluginName + " invalid S3DownloadConcurrency: \"%d\"", oCtx.config.S3DownloadConcurrency)
|
||||
}
|
||||
|
||||
// remove the initial "s3://"
|
||||
input = input[5:]
|
||||
slashindex := strings.Index(input, "/")
|
||||
|
@ -175,13 +251,9 @@ func (oCtx *PluginInstance) openS3(input string) error {
|
|||
}
|
||||
|
||||
|
||||
type listOrigin struct {
|
||||
prefix *string
|
||||
startAfter *string
|
||||
}
|
||||
|
||||
var inputParams []listOrigin
|
||||
ctx := context.Background()
|
||||
var intervalPrefixList []string
|
||||
|
||||
startTime, endTime, err := ParseInterval(oCtx.config.S3Interval)
|
||||
if err != nil {
|
||||
|
@ -189,46 +261,109 @@ func (oCtx *PluginInstance) openS3(input string) error {
|
|||
|
||||
}
|
||||
|
||||
s3AccountList := oCtx.config.S3AccountList
|
||||
accountListRE := regexp.MustCompile(`^(?: *\d{12} *,?)*$`)
|
||||
if (! accountListRE.MatchString(s3AccountList)) {
|
||||
return fmt.Errorf(PluginName + " invalid account list: \"%s\"", oCtx.config.S3AccountList)
|
||||
}
|
||||
|
||||
// CloudTrail logs have the format
|
||||
// bucket_name/prefix_name/AWSLogs/Account ID/CloudTrail/region/YYYY/MM/DD/AccountID_CloudTrail_RegionName_YYYYMMDDTHHmmZ_UniqueString.json.gz
|
||||
// for organization trails the format is
|
||||
// bucket_name/prefix_name/AWSLogs/O-ID/Account ID/CloudTrail/Region/YYYY/MM/DD/AccountID_CloudTrail_RegionName_YYYYMMDDTHHmmZ_UniqueString.json.gz
|
||||
// for ControlTower releases before landing zones version 3.0 the organization trails format is
|
||||
// bucket_name/prefix_name/AWSLogs/Account ID/CloudTrail/Region/YYYY/MM/DD/AccountID_CloudTrail_RegionName_YYYYMMDDTHHmmZ_UniqueString.json.gz
|
||||
// Reduce the number of pages we have to process using "StartAfter" parameters
|
||||
// here, then trim individual filepaths below.
|
||||
|
||||
intervalPrefix := prefix
|
||||
|
||||
// For durations, carve out a special case for "Copy S3 URI" in the AWS console, which gives you
|
||||
// bucket_name/prefix_name/AWSLogs/<Account ID>/
|
||||
awsLogsRE := regexp.MustCompile(`AWSLogs/\d+/?$`)
|
||||
// bucket_name/prefix_name/AWSLogs/<Account ID>/ or bucket_name/prefix_name/AWSLogs/<Org-ID>/<Account ID>/
|
||||
awsLogsRE := regexp.MustCompile(`/AWSLogs/(?:o-[a-z0-9]{10,32}/)?\d{12}/?$`)
|
||||
awsLogsOrgRE := regexp.MustCompile(`/AWSLogs(?:/o-[a-z0-9]{10,32})?/?$`)
|
||||
if awsLogsRE.MatchString(prefix) {
|
||||
if (! strings.HasSuffix(intervalPrefix, "/")) {
|
||||
intervalPrefix += "/"
|
||||
}
|
||||
intervalPrefix += "CloudTrail/"
|
||||
intervalPrefixList = append(intervalPrefixList, intervalPrefix)
|
||||
} else if awsLogsOrgRE.MatchString(prefix) {
|
||||
if (! strings.HasSuffix(intervalPrefix, "/")) {
|
||||
intervalPrefix += "/"
|
||||
}
|
||||
if s3AccountList != "" {
|
||||
// build intervalPrefixList by using the provided S3AccountList
|
||||
accountListArray := strings.Split(s3AccountList , ",")
|
||||
if len(accountListArray) <= 0 {
|
||||
return fmt.Errorf(PluginName + " invalid account list: \"%s\"", oCtx.config.S3AccountList)
|
||||
}
|
||||
for i := range accountListArray {
|
||||
accountListArray[i] = strings.TrimSpace(accountListArray[i])
|
||||
}
|
||||
for _, account := range accountListArray {
|
||||
intervalPrefixList = append(intervalPrefixList, intervalPrefix + account + "/CloudTrail/")
|
||||
}
|
||||
} else {
|
||||
// try to get all available account IDs in the S3 CloudTrail bucket
|
||||
delimiter := "/"
|
||||
paginator := s3.NewListObjectsV2Paginator(oCtx.s3.client, &s3.ListObjectsV2Input{
|
||||
Bucket: &oCtx.s3.bucket,
|
||||
Prefix: &intervalPrefix,
|
||||
Delimiter: &delimiter,
|
||||
})
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
// Try friendlier error sources first.
|
||||
var aErr smithy.APIError
|
||||
if errors.As(err, &aErr) {
|
||||
return fmt.Errorf(PluginName + " plugin error: %s: %s", aErr.ErrorCode(), aErr.ErrorMessage())
|
||||
}
|
||||
|
||||
var oErr *smithy.OperationError
|
||||
if errors.As(err, &oErr) {
|
||||
return fmt.Errorf(PluginName + " plugin error: %s: %s", oErr.Service(), oErr.Unwrap())
|
||||
}
|
||||
|
||||
return fmt.Errorf(PluginName + " plugin error: failed to list accounts: " + err.Error())
|
||||
}
|
||||
for _, commonPrefix := range page.CommonPrefixes {
|
||||
path := commonPrefix.Prefix
|
||||
if awsLogsRE.MatchString(*path) {
|
||||
intervalPrefixList = append(intervalPrefixList, *path + "CloudTrail/")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
intervalPrefixList = append(intervalPrefixList, intervalPrefix)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(intervalPrefix, "/CloudTrail/") {
|
||||
delimiter := "/"
|
||||
// Fetch the list of regions.
|
||||
output, err := oCtx.s3.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: &oCtx.s3.bucket,
|
||||
Prefix: &intervalPrefix,
|
||||
Delimiter: &delimiter,
|
||||
})
|
||||
if err == nil {
|
||||
for _, commonPrefix := range output.CommonPrefixes {
|
||||
params := listOrigin {prefix: commonPrefix.Prefix}
|
||||
if !startTime.IsZero() {
|
||||
// startAfter doesn't have to be a real key.
|
||||
startAfterSuffix := startTime.Format("2006/01/02/")
|
||||
startAfter := *commonPrefix.Prefix + startAfterSuffix
|
||||
params.startAfter = &startAfter
|
||||
for _, intervalPrefix := range intervalPrefixList {
|
||||
if strings.HasSuffix(intervalPrefix, "/CloudTrail/") {
|
||||
delimiter := "/"
|
||||
// Fetch the list of regions.
|
||||
output, err := oCtx.s3.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: &oCtx.s3.bucket,
|
||||
Prefix: &intervalPrefix,
|
||||
Delimiter: &delimiter,
|
||||
})
|
||||
if err == nil {
|
||||
for _, commonPrefix := range output.CommonPrefixes {
|
||||
params := listOrigin {prefix: commonPrefix.Prefix}
|
||||
if !startTime.IsZero() {
|
||||
// startAfter doesn't have to be a real key.
|
||||
startAfterSuffix := startTime.Format("2006/01/02/")
|
||||
startAfter := *commonPrefix.Prefix + startAfterSuffix
|
||||
params.startAfter = &startAfter
|
||||
}
|
||||
inputParams = append(inputParams, params)
|
||||
}
|
||||
inputParams = append(inputParams, params)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filepathRE := regexp.MustCompile(`.*_CloudTrail_[^_]+_([^_]+)Z_`)
|
||||
var startTS string
|
||||
var endTS string
|
||||
|
||||
|
@ -249,17 +384,18 @@ func (oCtx *PluginInstance) openS3(input string) error {
|
|||
inputParams = append(inputParams, params)
|
||||
}
|
||||
|
||||
// Would it make sense to do this concurrently?
|
||||
for _, params := range inputParams {
|
||||
// Fetch the list of keys
|
||||
paginator := s3.NewListObjectsV2Paginator(oCtx.s3.client, &s3.ListObjectsV2Input{
|
||||
Bucket: &oCtx.s3.bucket,
|
||||
Prefix: params.prefix,
|
||||
StartAfter: params.startAfter,
|
||||
})
|
||||
// Devide the inputParams array into chunks and get the keys concurently for all items in a chunk
|
||||
for _, chunk := range chunkListOrigin(inputParams, oCtx.config.S3DownloadConcurrency) {
|
||||
dlErrChan = make(chan error, oCtx.config.S3DownloadConcurrency)
|
||||
for _, params := range chunk {
|
||||
oCtx.s3.DownloadWg.Add(1)
|
||||
go oCtx.listKeys(params, startTS, endTS)
|
||||
}
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
oCtx.s3.DownloadWg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-dlErrChan:
|
||||
if err != nil {
|
||||
// Try friendlier error sources first.
|
||||
var aErr smithy.APIError
|
||||
|
@ -274,30 +410,7 @@ func (oCtx *PluginInstance) openS3(input string) error {
|
|||
|
||||
return fmt.Errorf(PluginName + " plugin error: failed to list objects: " + err.Error())
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
path := obj.Key
|
||||
|
||||
if startTS != "" {
|
||||
matches := filepathRE.FindStringSubmatch(*path)
|
||||
if matches != nil {
|
||||
pathTS := matches[1]
|
||||
if pathTS < startTS {
|
||||
continue
|
||||
}
|
||||
if endTS != "" && pathTS > endTS {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isCompressed := strings.HasSuffix(*path, ".json.gz")
|
||||
if filepath.Ext(*path) != ".json" && !isCompressed {
|
||||
continue
|
||||
}
|
||||
|
||||
var fi fileInfo = fileInfo{name: *path, isCompressed: isCompressed}
|
||||
oCtx.files = append(oCtx.files, fi)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,7 +547,12 @@ func (oCtx *PluginInstance) openSQS(input string) error {
|
|||
|
||||
queueName := input[6:]
|
||||
|
||||
urlResult, err := oCtx.sqsClient.GetQueueUrl(ctx, &sqs.GetQueueUrlInput{QueueName: &queueName})
|
||||
var sqsOwnerAccountPtr *string
|
||||
if oCtx.config.SQSOwnerAccount != "" {
|
||||
sqsOwnerAccountPtr = &oCtx.config.SQSOwnerAccount
|
||||
}
|
||||
|
||||
urlResult, err := oCtx.sqsClient.GetQueueUrl(ctx, &sqs.GetQueueUrlInput{QueueName: &queueName, QueueOwnerAWSAccountId: sqsOwnerAccountPtr})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -445,8 +563,6 @@ func (oCtx *PluginInstance) openSQS(input string) error {
|
|||
return oCtx.getMoreSQSFiles()
|
||||
}
|
||||
|
||||
var dlErrChan chan error
|
||||
|
||||
func (oCtx *PluginInstance) s3Download(downloader *manager.Downloader, name string, dloadSlotNum int) {
|
||||
defer oCtx.s3.DownloadWg.Done()
|
||||
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# Changelog
|
||||
|
||||
## v0.1.0
|
||||
|
||||
* [`6d6d44c`](https://github.com/falcosecurity/plugins/commit/6d6d44c) docs(plugins/collector): add example rule
|
||||
|
||||
* [`f2dcd4d`](https://github.com/falcosecurity/plugins/commit/f2dcd4d) docs(plugins/collector): intial README and CHANGELOG
|
||||
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2025 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
SHELL=/bin/bash -o pipefail
|
||||
GO ?= go
|
||||
|
||||
NAME := collector
|
||||
OUTPUT := lib$(NAME).so
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
GODEBUGFLAGS= GODEBUG=cgocheck=1
|
||||
else
|
||||
GODEBUGFLAGS= GODEBUG=cgocheck=0
|
||||
endif
|
||||
|
||||
all: $(OUTPUT)
|
||||
|
||||
clean:
|
||||
@rm -f $(OUTPUT)
|
||||
|
||||
$(OUTPUT):
|
||||
@$(GODEBUGFLAGS) $(GO) build -buildmode=c-shared -o $(OUTPUT) ./plugin
|
||||
|
||||
readme:
|
||||
@$(READMETOOL) -p ./$(OUTPUT) -f README.md
|
|
@ -0,0 +1,63 @@
|
|||
# Falco Collector Plugin
|
||||
|
||||
|
||||
The `collector` is a generic [Falco](https://falco.org) source plugin that listens for incoming HTTP POST requests and ingests the raw payloads as events. This plugin is designed for use cases where external components (e.g. other Falco instances, alerting systems, or webhooks) need to push data into the Falco engine for further processing.
|
||||
|
||||
This plugin **does not expose any fields** on its own. Instead, it is intended to be used in **conjunction with a parser plugin** such as [`json`](https://github.com/falcosecurity/plugins/tree/main/plugins/json), which can extract structured data from the raw payloads in case of JSON formatted data.
|
||||
|
||||
## Example Use Case
|
||||
|
||||
You can deploy the collector plugin alongside the `json` plugin to:
|
||||
|
||||
- Ingest alerts or events from remote Falco instances configured to send their output in JSON to the collector endpoint.
|
||||
- Use Falco rules based on the `json` to filter and analyze those events by parsing the JSON payload.
|
||||
|
||||
## Plugin Configuration
|
||||
|
||||
The plugin accepts the following configuration parameters as JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"buffer": 0,
|
||||
"addr": ":54827"
|
||||
}
|
||||
```
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
| -------- | -------- | ----------- | --------------------------------------------------------------------------- |
|
||||
| `buffer` | `uint64` | `0` | Number of payloads held by the buffer. |
|
||||
| `addr` | `string` | `:54827` | Address for the HTTP server to listen on (e.g., `:8080`, `127.0.0.1:9000`). |
|
||||
|
||||
### Example Plugin Load Configuration
|
||||
|
||||
When using this plugin in Falco, configure it like this:
|
||||
|
||||
```yaml
|
||||
load_plugins: [collector, json]
|
||||
|
||||
plugins:
|
||||
- name: collector
|
||||
library_path: libcollector.so
|
||||
|
||||
- name: json
|
||||
library_path: libjson.so
|
||||
```
|
||||
|
||||
## Example Payload
|
||||
|
||||
Send an event to the collector plugin using `curl`:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:54827 -d '{"hostname":"x86","output":"14:50:34.502309868: Warning Sensitive file opened for reading by non-trusted program (file=/etc/shadow gparent=sudo ggparent=zsh gggparent=kitty evt_type=openat user=root user_uid=0 user_loginuid=1000 process=cat proc_exepath=/usr/bin/cat parent=sudo command=cat /etc/shadow terminal=34820 container_id=host container_name=host)","output_fields":{"container.id":"host","container.name":"host","evt.time":1746622234502309868,"evt.type":"openat","fd.name":"/etc/shadow","proc.aname[2]":"sudo","proc.aname[3]":"zsh","proc.aname[4]":"kitty","proc.cmdline":"cat /etc/shadow","proc.exepath":"/usr/bin/cat","proc.name":"cat","proc.pname":"sudo","proc.tty":34820,"user.loginuid":1000,"user.name":"root","user.uid":0},"priority":"Warning","rule":"Read sensitive file untrusted","source":"syscall","tags":["T1555","container","filesystem","host","maturity_stable","mitre_credential_access"],"time":"2025-05-07T12:50:34.502309868Z"}'
|
||||
```
|
||||
|
||||
Then, using the `json` plugin, you can create rules that filter on any of the fields in the JSON payload, for example:
|
||||
|
||||
```yaml
|
||||
- rule: Non-container event
|
||||
desc: Match host events from a JSON-formatted Falco alert for syscall source.
|
||||
condition: json.value[/output_fields/container.id] == "host"
|
||||
output: Non-container event (payload=%evt.plugininfo)
|
||||
priority: INFO
|
||||
source: collector
|
||||
```
|
|
@ -0,0 +1,5 @@
|
|||
module github.com/falcosecurity/plugins/plugins/collector
|
||||
|
||||
go 1.15
|
||||
|
||||
require github.com/falcosecurity/plugin-sdk-go v0.7.5
|
|
@ -0,0 +1,24 @@
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5 h1:ke/+kTt0PwedM8+IGTKcW3LrUI/xiJNDCSzqTSW+CvI=
|
||||
github.com/falcosecurity/plugin-sdk-go v0.7.5/go.mod h1:NP+y22DYOS+G3GDXIXNmzf0CBL3nfPPMoQuHvAzfitQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -0,0 +1,133 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2025 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins/source"
|
||||
)
|
||||
|
||||
const (
|
||||
PluginID uint32 = 24
|
||||
PluginName = "collector"
|
||||
PluginDescription = "Generic collector to ingest raw payloads"
|
||||
PluginContact = "github.com/falcosecurity/plugins"
|
||||
PluginVersion = "0.1.0"
|
||||
PluginEventSource = "collector"
|
||||
)
|
||||
|
||||
type PluginOpenParams struct {
|
||||
Buffer uint64 `json:"buffer" jsonschema:"title=Payloads buffer,description=Number of payloads held by the buffer (Default: 0),default=0"`
|
||||
Addr string `json:"addr" jsonschema:"title=Listen address,description=The TCP address for the server to listen on in the form host:port (Default: :54827),default=:54827"`
|
||||
}
|
||||
|
||||
type Plugin struct {
|
||||
plugins.BasePlugin
|
||||
// Contains the open params configuration
|
||||
openParams PluginOpenParams
|
||||
}
|
||||
|
||||
func (p *PluginOpenParams) setDefault() {
|
||||
p.Buffer = 0
|
||||
p.Addr = ":54827"
|
||||
}
|
||||
|
||||
func (m *Plugin) Info() *plugins.Info {
|
||||
return &plugins.Info{
|
||||
ID: PluginID,
|
||||
Name: PluginName,
|
||||
Description: PluginDescription,
|
||||
Contact: PluginContact,
|
||||
Version: PluginVersion,
|
||||
EventSource: PluginEventSource,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Plugin) Init(cfg string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Plugin) Open(prms string) (source.Instance, error) {
|
||||
|
||||
p.openParams.setDefault()
|
||||
if len(prms) != 0 {
|
||||
if err := json.Unmarshal([]byte(prms), &p.openParams); err != nil {
|
||||
return nil, fmt.Errorf("wrong open params format: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
evtC := make(chan source.PushEvent, p.openParams.Buffer)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Only POST allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
data, err := io.ReadAll(r.Body)
|
||||
defer r.Body.Close()
|
||||
|
||||
pushEvt := source.PushEvent{
|
||||
Err: err,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
evtC <- pushEvt
|
||||
})
|
||||
|
||||
server := &http.Server{
|
||||
Addr: p.openParams.Addr,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
go (func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
pushEvt := source.PushEvent{
|
||||
Err: fmt.Errorf("failed to start server: %v", err),
|
||||
}
|
||||
evtC <- pushEvt
|
||||
}
|
||||
})()
|
||||
|
||||
return source.NewPushInstance(
|
||||
evtC,
|
||||
source.WithInstanceClose(func() {
|
||||
if err := server.Close(); err != nil {
|
||||
pushEvt := source.PushEvent{
|
||||
Err: fmt.Errorf("failed to stop server: %v", err),
|
||||
}
|
||||
evtC <- pushEvt
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
func (m *Plugin) String(evt sdk.EventReader) (string, error) {
|
||||
evtBytes, err := io.ReadAll(evt.Reader())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// The string representation of an event is the raw payload
|
||||
return string(evtBytes), nil
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
/*
|
||||
Copyright (C) 2025 The Falco Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins"
|
||||
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk/plugins/source"
|
||||
"github.com/falcosecurity/plugins/plugins/collector/pkg/collector"
|
||||
)
|
||||
|
||||
func init() {
|
||||
plugins.SetFactory(func() plugins.Plugin {
|
||||
p := &collector.Plugin{}
|
||||
source.Register(p)
|
||||
return p
|
||||
})
|
||||
}
|
||||
|
||||
func main() {}
|
|
@ -0,0 +1,6 @@
|
|||
*.so
|
||||
*.a
|
||||
*.o
|
||||
.vscode
|
||||
.idea
|
||||
build*
|
|
@ -0,0 +1,141 @@
|
|||
# Changelog
|
||||
|
||||
## v0.3.4
|
||||
|
||||
* [`885c18e`](https://github.com/falcosecurity/plugins/commit/885c18e) update(plugins/container): bump to 0.3.4.
|
||||
|
||||
* [`11c7d16`](https://github.com/falcosecurity/plugins/commit/11c7d16) chore(plugins/container): move error log to debug level.
|
||||
|
||||
* [`0275c81`](https://github.com/falcosecurity/plugins/commit/0275c81) chore(plugins/container): added some tests around workerLoop().
|
||||
|
||||
* [`4bcabb2`](https://github.com/falcosecurity/plugins/commit/4bcabb2) chore(plugins/container): improve exit strategy for goroutine workers when st...
|
||||
|
||||
* [`dd90663`](https://github.com/falcosecurity/plugins/commit/dd90663) chore(plugins/container): fixed a log.
|
||||
|
||||
* [`4684790`](https://github.com/falcosecurity/plugins/commit/4684790) fix(plugins/container): fixed build under recent gcc by including `algorithm`.
|
||||
|
||||
* [`2487f7c`](https://github.com/falcosecurity/plugins/commit/2487f7c) chore(plugins/container): move `containerEventsErrorTimeout` to cri.
|
||||
|
||||
* [`2fc5772`](https://github.com/falcosecurity/plugins/commit/2fc5772) cleanup(plugins/container): podman `system.Events` now returns error synchron...
|
||||
|
||||
* [`f9da9fa`](https://github.com/falcosecurity/plugins/commit/f9da9fa) chore(plugins/container): port docker engine away from deprecated APIs.
|
||||
|
||||
|
||||
## v0.3.3
|
||||
|
||||
* [`5ca391e`](https://github.com/falcosecurity/plugins/commit/5ca391e) update(plugins/container): bump to v0.3.3
|
||||
|
||||
* [`f28adb7`](https://github.com/falcosecurity/plugins/commit/f28adb7) fix(plugins/container): parse_exit_process_event
|
||||
|
||||
* [`a97e226`](https://github.com/falcosecurity/plugins/commit/a97e226) chore(container/make): add CMAKE_EXPORT_COMPILE_COMMANDS
|
||||
|
||||
|
||||
## v0.3.2
|
||||
|
||||
* [`92ec4dc`](https://github.com/falcosecurity/plugins/commit/92ec4dc) chore(plugins/container): add a trace log when removing container from procexit.
|
||||
|
||||
* [`6a75982`](https://github.com/falcosecurity/plugins/commit/6a75982) update(plugins/container): bump version to 0.3.2.
|
||||
|
||||
* [`1f8a375`](https://github.com/falcosecurity/plugins/commit/1f8a375) fix(plugins/container): properly cleanup stale container cache entries for ex...
|
||||
|
||||
* [`2f4b632`](https://github.com/falcosecurity/plugins/commit/2f4b632) chore(plugins/container): properly cleanup fetchCh in test.
|
||||
|
||||
* [`1c135e3`](https://github.com/falcosecurity/plugins/commit/1c135e3) chore(plugins/container): let async_ctx own the fetcher channel.
|
||||
|
||||
* [`6e02f91`](https://github.com/falcosecurity/plugins/commit/6e02f91) chore(plugins/container): drop fulfilled TODOs
|
||||
|
||||
* [`e8745cf`](https://github.com/falcosecurity/plugins/commit/e8745cf) chore(plugins/container): introduce and use container_info::ptr_t
|
||||
|
||||
* [`db2b9c9`](https://github.com/falcosecurity/plugins/commit/db2b9c9) chore(plugins/container): headers cleanup
|
||||
|
||||
* [`a7da58c`](https://github.com/falcosecurity/plugins/commit/a7da58c) chore(plugins/container): avoid building unneeded RE-flex targets
|
||||
|
||||
* [`e281227`](https://github.com/falcosecurity/plugins/commit/e281227) fix(container): detect libpod container ids with cgroups mode split
|
||||
|
||||
|
||||
## v0.3.1
|
||||
|
||||
* [`398db32`](https://github.com/falcosecurity/plugins/commit/398db32) new(plugins/container): add test around null healthcheck in container json.
|
||||
|
||||
* [`ab266f5`](https://github.com/falcosecurity/plugins/commit/ab266f5) fix(plugins/container): fix healthcheck probe args retrieval since they can b...
|
||||
|
||||
|
||||
## v0.3.0
|
||||
|
||||
* [`2b5f8a8`](https://github.com/falcosecurity/plugins/commit/2b5f8a8) update(plugins/container): bump plugin version to 0.3.0
|
||||
|
||||
* [`5cfa378`](https://github.com/falcosecurity/plugins/commit/5cfa378) chore(plugins/container): set an unexisted tid on generated asyncevents.
|
||||
|
||||
|
||||
## v0.2.6
|
||||
|
||||
* [`f01e70d`](https://github.com/falcosecurity/plugins/commit/f01e70d) update(plugins/container): bump container plugin to 0.2.6.
|
||||
|
||||
* [`5fcee14`](https://github.com/falcosecurity/plugins/commit/5fcee14) fix(plugins/container): avoid possible nil ptr dereference in cri and contain...
|
||||
|
||||
|
||||
## v0.2.5
|
||||
|
||||
* [`2bb872e`](https://github.com/falcosecurity/plugins/commit/2bb872e) fx(plugins/container): do not override containers_image_openpgp tag in `exe` ...
|
||||
|
||||
* [`576b1c9`](https://github.com/falcosecurity/plugins/commit/576b1c9) fix(plugins/container): redefine port binding port and IP as integers
|
||||
|
||||
|
||||
## v0.2.4
|
||||
|
||||
* [`b1a5800`](https://github.com/falcosecurity/plugins/commit/b1a5800) chore(plugins/container): bump version to 0.2.4
|
||||
|
||||
|
||||
## v0.2.3
|
||||
|
||||
* [`bc645a8`](https://github.com/falcosecurity/plugins/commit/bc645a8) docs(plugins/container): deprecation message for old `k8s` fields
|
||||
|
||||
* [`298b671`](https://github.com/falcosecurity/plugins/commit/298b671) chore(plugins/container): avoid useless req.set_value of empty string.
|
||||
|
||||
|
||||
## v0.2.2
|
||||
|
||||
* [`9c1c488`](https://github.com/falcosecurity/plugins/commit/9c1c488) fix(plugins/container): use `C.GoString()` in `AskForContainerInfo`.
|
||||
|
||||
* [`b909298`](https://github.com/falcosecurity/plugins/commit/b909298) update(plugins/container): bumped plugin container to 0.2.2.
|
||||
|
||||
* [`a5840d1`](https://github.com/falcosecurity/plugins/commit/a5840d1) fix(plugins/container): use an unique ctx for fetcher.
|
||||
|
||||
|
||||
## v0.2.1
|
||||
|
||||
* [`7fef864`](https://github.com/falcosecurity/plugins/commit/7fef864) new(plugins/container): suggest more output fields.
|
||||
|
||||
* [`b8140c8`](https://github.com/falcosecurity/plugins/commit/b8140c8) chore(plugins/container): bump version to 0.2.1.
|
||||
|
||||
* [`c122ed4`](https://github.com/falcosecurity/plugins/commit/c122ed4) chore(plugins/container): make ASYNC cap resilient to multiple calls.
|
||||
|
||||
* [`e25a1f8`](https://github.com/falcosecurity/plugins/commit/e25a1f8) cleanup(plugins/container): drop `async_ctx` static variable.
|
||||
|
||||
|
||||
## v0.2.0
|
||||
|
||||
* [`0d595a2`](https://github.com/falcosecurity/plugins/commit/0d595a2) new(plugins/container): added fetcher tests.
|
||||
|
||||
* [`89712a5`](https://github.com/falcosecurity/plugins/commit/89712a5) fix(plugin/container): avoid overwriting host container info when loading pre...
|
||||
|
||||
* [`ff332cb`](https://github.com/falcosecurity/plugins/commit/ff332cb) fix(plugins/container): fixed CRI listing filter.
|
||||
|
||||
* [`5b374f7`](https://github.com/falcosecurity/plugins/commit/5b374f7) new(plugins/container): immediately enrich plugin cache with pre-existing con...
|
||||
|
||||
* [`ca2c560`](https://github.com/falcosecurity/plugins/commit/ca2c560) new(plugins/container): print a debug log with all connected engine sockets.
|
||||
|
||||
* [`d6e6c6e`](https://github.com/falcosecurity/plugins/commit/d6e6c6e) chore(plugins/container): broaden exceptions management.
|
||||
|
||||
* [`e318e18`](https://github.com/falcosecurity/plugins/commit/e318e18) chore(plugins/container): bump container plugin to 0.2.0.
|
||||
|
||||
* [`d81c8c5`](https://github.com/falcosecurity/plugins/commit/d81c8c5) fix(plugins/container): fixed config tests.
|
||||
|
||||
* [`911e33d`](https://github.com/falcosecurity/plugins/commit/911e33d) chore(plugins/container): updated readme.
|
||||
|
||||
* [`adec84a`](https://github.com/falcosecurity/plugins/commit/adec84a) new(plugins/container): allow to specify which hook to be attached between {"...
|
||||
|
||||
|
||||
## v0.1.0
|
||||
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
cmake_minimum_required(VERSION 3.28)
|
||||
|
||||
if(NOT DEFINED CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "")
|
||||
set(CMAKE_BUILD_TYPE "release")
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
|
||||
|
||||
# project metadata
|
||||
project(
|
||||
container
|
||||
VERSION 0.3.6
|
||||
DESCRIPTION "Falco container metadata enrichment Plugin"
|
||||
LANGUAGES CXX)
|
||||
|
||||
# compiler related configs
|
||||
include(compiler)
|
||||
|
||||
# Include capabilities module.
|
||||
# This also defines CAPS_SOURCES.
|
||||
include(caps)
|
||||
|
||||
# Configure the macros header
|
||||
configure_file(src/macros.h.in src/macros.h @ONLY)
|
||||
|
||||
file(GLOB SOURCES src/*.cpp src/matchers/*.cpp)
|
||||
|
||||
# project target
|
||||
add_library(container SHARED ${SOURCES} ${CAPS_SOURCES})
|
||||
|
||||
# dependencies
|
||||
if(ENABLE_ASYNC)
|
||||
include(go-worker)
|
||||
add_dependencies(container go-worker)
|
||||
endif()
|
||||
include(plugin-sdk-cpp)
|
||||
include(reflex)
|
||||
include(fmt)
|
||||
|
||||
# project compilation options
|
||||
set_property(TARGET container PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
if (NOT MSVC)
|
||||
target_compile_options(container PRIVATE "-Wl,-z,relro,-z,now")
|
||||
target_compile_options(container PRIVATE "-fstack-protector-strong")
|
||||
else()
|
||||
# Workaround https://github.com/golang/go/issues/71921
|
||||
target_compile_definitions(container PRIVATE "_CRT_USE_C_COMPLEX_H")
|
||||
endif()
|
||||
# When compiling in Debug mode, this will define the DEBUG symbol for use in your code.
|
||||
target_compile_options(container PUBLIC "$<$<CONFIG:DEBUG>:-DDEBUG>")
|
||||
target_compile_features(container PUBLIC cxx_std_20)
|
||||
|
||||
# project includes
|
||||
target_include_directories(container PRIVATE ${CMAKE_BINARY_DIR}/src/ src/ ${PLUGIN_SDK_INCLUDE} ${PLUGIN_SDK_DEPS_INCLUDE} ${WORKER_INCLUDE})
|
||||
|
||||
# project linked libraries
|
||||
target_link_libraries(container PRIVATE fmt::fmt-header-only ReflexLibStatic ${WORKER_DEP} ${WORKER_LIB})
|
||||
|
||||
option(ENABLE_TESTS "Enable build of unit tests" ON)
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(${CMAKE_SOURCE_DIR}/test/)
|
||||
endif()
|
|
@ -0,0 +1,57 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2024 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
NAME := container
|
||||
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
PROJECT_ROOT_DIR = $(shell git rev-parse --show-toplevel)
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
detected_OS := Windows
|
||||
else
|
||||
detected_OS := $(shell sh -c 'uname 2>/dev/null || echo Unknown')
|
||||
endif
|
||||
|
||||
ifeq ($(detected_OS),Windows)
|
||||
OUTPUT := $(NAME).dll
|
||||
OUTPUT_FILE := build/Release/$(OUTPUT)
|
||||
else ifeq ($(detected_OS),Darwin)
|
||||
OUTPUT := lib$(NAME).dylib
|
||||
OUTPUT_FILE := build/$(OUTPUT)
|
||||
else
|
||||
OUTPUT := lib$(NAME).so
|
||||
OUTPUT_FILE := build/$(OUTPUT)
|
||||
endif
|
||||
|
||||
all: $(OUTPUT)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf build $(OUTPUT)
|
||||
make -C go-worker/ clean
|
||||
|
||||
# This Makefile requires CMake installed on the system
|
||||
.PHONY: $(OUTPUT)
|
||||
$(OUTPUT):
|
||||
cmake -B build -S . -DCMAKE_EXPORT_COMPILE_COMMANDS=ON && cmake --build build --target $(NAME) --parallel 6 --config Release && cp $(OUTPUT_FILE) $(OUTPUT)
|
||||
|
||||
.PHONY: test
|
||||
test: $(OUTPUT)
|
||||
make -C build/ test && build/test/test && make -C go-worker/ test
|
||||
|
||||
readme:
|
||||
@$(READMETOOL) -p ./$(OUTPUT) -f README.md
|
||||
|
||||
# Requires clang-format-18
|
||||
fmt:
|
||||
git ls-files --directory $(ROOT_DIR) | grep -E '\.(cpp|h|c)$$' | xargs clang-format-18 -Werror --style=file:${PROJECT_ROOT_DIR}/.clang-format -i --verbose
|
|
@ -0,0 +1,189 @@
|
|||
# Container metadata enrichment Plugin
|
||||
|
||||
## Introduction
|
||||
|
||||
The `container` plugin enhances the Falco syscall source by providing additional information about container resources involved. You can find the comprehensive list of supported fields [here](#supported-fields).
|
||||
|
||||
### Functionality
|
||||
|
||||
The plugin itself reimplements all the container-related logic that was already present in libs under the form of a plugin, that can be attached to any source.
|
||||
Moreover, it aims to fix issues present in the current implementation, trying to be as quick as possible to gather container metadata information, to avoid losing
|
||||
a single event metadata.
|
||||
|
||||
## Capabilities
|
||||
|
||||
The `container` plugin implements the following capabilities:
|
||||
|
||||
* `capture listening` -> to attach `container_id` foreign key to all pre-existing threadinfos, once they have been scraped from procfs by sinsp
|
||||
* `extraction` -> to extract `container.X` fields
|
||||
* `parsing` -> to parse `async` and `container` events (the latter for backward compatibility with existing scap files), and clone/fork/execve events to attach `container_id` foreign key to any threads
|
||||
* `async` -> to generate events with container information and `dump` current plugin cache state when requested
|
||||
|
||||
It requires **3.10.0** plugin API version.
|
||||
|
||||
## Architecture
|
||||
|
||||

|
||||
|
||||
The `container` plugin is split into 2 modules:
|
||||
* a [C++ shared object](src) that implements the 3 capabilities and holds the cache map `<container_id,container_info>`
|
||||
* a [GO static library](go-worker) (linked inside the C++ shared object) that implements the worker logic to retrieve new containers' metadata leveraging existing SDKs
|
||||
|
||||
As soon as the plugin starts, the go-worker gets started as part of the `async` capability, passing to it plugin init config and a C++ callback to generate async events.
|
||||
Whenever the GO worker finds a new container, it immediately generates an `async` event through the aforementioned callback.
|
||||
The `async` event is then received by the C++ side as part of the `parsing` capability, and it enriches its own internal state cache.
|
||||
Every time a clone/fork/execve event gets parsed, we attach to its thread table entry the information about the container_id, extracted by looking at the `cgroups` field, in a foreign key.
|
||||
Once the extraction is requested for a thread, the container_id is then used as key to access our plugin's internal container metadata cache, and the requested infos extracted.
|
||||
|
||||
Note, however, that for some container engines, namely `{bpm,lxc,libvirt_lcx}`, we only support fetching generic info, ie: the container ID and the container type.
|
||||
Given that there is no "listener" SDK to attach to, for these engines the `async` event is generated directly by the C++ code, as soon as the container ID is retrieved.
|
||||
|
||||
### Plugin official name
|
||||
|
||||
`container`
|
||||
|
||||
### Supported Fields
|
||||
|
||||
<!-- README-PLUGIN-FIELDS -->
|
||||
| NAME | TYPE | ARG | DESCRIPTION |
|
||||
|-------------------------------------|-----------|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `container.id` | `string` | None | The truncated container ID (first 12 characters), e.g. 3ad7b26ded6d is extracted from the Linux cgroups by Falco within the kernel. Consequently, this field is reliably available and serves as the lookup key for Falco's synchronous or asynchronous requests against the container runtime socket to retrieve all other 'container.*' information. One important aspect to be aware of is that if the process occurs on the host, meaning not in the container PID namespace, this field is set to a string called 'host'. In Kubernetes, pod sandbox container processes can exist where `container.id` matches `k8s.pod.sandbox_id`, lacking other 'container.*' details. |
|
||||
| `container.full_id` | `string` | None | The full container ID, e.g. 3ad7b26ded6d8e7b23da7d48fe889434573036c27ae5a74837233de441c3601e. In contrast to `container.id`, we enrich this field as part of the container engine enrichment. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.name` | `string` | None | The container name. In instances of userspace container engine lookup delays, this field may not be available yet. One important aspect to be aware of is that if the process occurs on the host, meaning not in the container PID namespace, this field is set to a string called 'host'. |
|
||||
| `container.image` | `string` | None | The container image name (e.g. falcosecurity/falco:latest for docker). In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.image.id` | `string` | None | The container image id (e.g. 6f7e2741b66b). In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.type` | `string` | None | The container type, e.g. docker, cri-o, containerd etc. |
|
||||
| `container.privileged` | `bool` | None | 'true' for containers running as privileged, 'false' otherwise. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mounts` | `string` | None | A space-separated list of mount information. Each item in the list has the format 'source:dest:mode:rdrw:propagation'. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mount` | `string` | Index, Key, Required | Information about a single mount, specified by number (e.g. container.mount[0]) or mount source (container.mount[/usr/local]). The pathname can be a glob (container.mount[/usr/local/*]), in which case the first matching mount will be returned. The information has the format 'source:dest:mode:rdrw:propagation'. If there is no mount with the specified index or matching the provided source, returns the string "none" instead of a NULL value. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mount.source` | `string` | Index, Key, Required | The mount source, specified by number (e.g. container.mount.source[0]) or mount destination (container.mount.source[/host/lib/modules]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mount.dest` | `string` | Index, Key, Required | The mount destination, specified by number (e.g. container.mount.dest[0]) or mount source (container.mount.dest[/lib/modules]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mount.mode` | `string` | Index, Key, Required | The mount mode, specified by number (e.g. container.mount.mode[0]) or mount source (container.mount.mode[/usr/local]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mount.rdwr` | `string` | Index, Key, Required | The mount rdwr value, specified by number (e.g. container.mount.rdwr[0]) or mount source (container.mount.rdwr[/usr/local]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.mount.propagation` | `string` | Index, Key, Required | The mount propagation value, specified by number (e.g. container.mount.propagation[0]) or mount source (container.mount.propagation[/usr/local]). The pathname can be a glob. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.image.repository` | `string` | None | The container image repository (e.g. falcosecurity/falco). In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.image.tag` | `string` | None | The container image tag (e.g. stable, latest). In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.image.digest` | `string` | None | The container image registry digest (e.g. sha256:d977378f890d445c15e51795296e4e5062f109ce6da83e0a355fc4ad8699d27). In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.healthcheck` | `string` | None | The container's health check. Will be the null value ("N/A") if no healthcheck configured, "NONE" if configured but explicitly not created, and the healthcheck command line otherwise. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.liveness_probe` | `string` | None | The container's liveness probe. Will be the null value ("N/A") if no liveness probe configured, the liveness probe command line otherwise. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.readiness_probe` | `string` | None | The container's readiness probe. Will be the null value ("N/A") if no readiness probe configured, the readiness probe command line otherwise. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.start_ts` | `abstime` | None | Container start as epoch timestamp in nanoseconds based on proc.pidns_init_start_ts and extracted in the kernel and not from the container runtime socket / container engine. |
|
||||
| `container.duration` | `reltime` | None | Number of nanoseconds since container.start_ts. |
|
||||
| `container.ip` | `string` | None | The container's / pod's primary ip address as retrieved from the container engine. Only ipv4 addresses are tracked. Consider container.cni.json (CRI use case) for logging ip addresses for each network interface. In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.cni.json` | `string` | None | The container's / pod's CNI result field from the respective pod status info. It contains ip addresses for each network interface exposed as unparsed escaped JSON string. Supported for CRI container engine (containerd, cri-o runtimes), optimized for containerd (some non-critical JSON keys removed). Useful for tracking ips (ipv4 and ipv6, dual-stack support) for each network interface (multi-interface support). In instances of userspace container engine lookup delays, this field may not be available yet. |
|
||||
| `container.host_pid` | `bool` | None | 'true' if the container is running in the host PID namespace, 'false' otherwise. |
|
||||
| `container.host_network` | `bool` | None | 'true' if the container is running in the host network namespace, 'false' otherwise. |
|
||||
| `container.host_ipc` | `bool` | None | 'true' if the container is running in the host IPC namespace, 'false' otherwise. |
|
||||
| `container.label` | `string` | Key, Required | Container label. E.g. 'container.label.foo'. |
|
||||
| `container.labels` | `string` | None | Container comma-separated key/value labels. E.g. 'foo1:bar1,foo2:bar2'. |
|
||||
| `proc.is_container_healthcheck` | `bool` | None | 'true' if this process is running as a part of the container's health check. |
|
||||
| `proc.is_container_liveness_probe` | `bool` | None | 'true' if this process is running as a part of the container's liveness probe. |
|
||||
| `proc.is_container_readiness_probe` | `bool` | None | 'true' if this process is running as a part of the container's readiness probe. |
|
||||
| `k8s.pod.name` | `string` | None | The Kubernetes pod name. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.ns.name` | `string` | None | The Kubernetes namespace name. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.id` | `string` | None | [LEGACY] The Kubernetes pod UID, e.g. 3e41dc6b-08a8-44db-bc2a-3724b18ab19a. This legacy field points to `k8s.pod.uid`; however, the pod ID typically refers to the pod sandbox ID. We recommend using the semantically more accurate `k8s.pod.uid` field. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.uid` | `string` | None | The Kubernetes pod UID, e.g. 3e41dc6b-08a8-44db-bc2a-3724b18ab19a. Note that the pod UID is a unique identifier assigned upon pod creation within Kubernetes, allowing the Kubernetes control plane to manage and track pods reliably. As such, it is fundamentally a different concept compared to the pod sandbox ID. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.sandbox_id` | `string` | None | The truncated Kubernetes pod sandbox ID (first 12 characters), e.g 63060edc2d3a. The sandbox ID is specific to the container runtime environment. It is the equivalent of the container ID for the pod / sandbox and extracted from the Linux cgroups. As such, it differs from the pod UID. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. In Kubernetes, pod sandbox container processes can exist where `container.id` matches `k8s.pod.sandbox_id`, lacking other 'container.*' details. |
|
||||
| `k8s.pod.full_sandbox_id` | `string` | None | The full Kubernetes pod / sandbox ID, e.g 63060edc2d3aa803ab559f2393776b151f99fc5b05035b21db66b3b62246ad6a. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.label` | `string` | Key, Required | The Kubernetes pod label. The label can be accessed either with the familiar brackets notation, e.g. 'k8s.pod.label[foo]' or by appending a dot followed by the name, e.g. 'k8s.pod.label.foo'. The label name itself can include the original special characters such as '.', '-', '_' or '/' characters. For instance, 'k8s.pod.label[app.kubernetes.io/name]', 'k8s.pod.label.app.kubernetes.io/name' or 'k8s.pod.label[custom-label_one]' are all valid. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.labels` | `string` | None | The Kubernetes pod comma-separated key/value labels. E.g. 'foo1:bar1,foo2:bar2'. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.ip` | `string` | None | The Kubernetes pod ip, same as container.ip field as each container in a pod shares the network stack of the sandbox / pod. Only ipv4 addresses are tracked. Consider k8s.pod.cni.json for logging ip addresses for each network interface. This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.pod.cni.json` | `string` | None | The Kubernetes pod CNI result field from the respective pod status info, same as container.cni.json field. It contains ip addresses for each network interface exposed as unparsed escaped JSON string. Supported for CRI container engine (containerd, cri-o runtimes), optimized for containerd (some non-critical JSON keys removed). Useful for tracking ips (ipv4 and ipv6, dual-stack support) for each network interface (multi-interface support). This field is extracted from the container runtime socket simultaneously as we look up the 'container.*' fields. In cases of lookup delays, it may not be available yet. |
|
||||
| `k8s.rc.name` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rc.id` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rc.label` | `string` | Key, Required | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rc.labels` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.svc.name` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.svc.id` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.svc.label` | `string` | Key, Required | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.svc.labels` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.ns.id` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.ns.label` | `string` | Key, Required | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.ns.labels` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rs.name` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rs.id` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rs.label` | `string` | Key, Required | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.rs.labels` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.deployment.name` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.deployment.id` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.deployment.label` | `string` | Key, Required | Deprecated. Use `k8smeta` plugin instead. |
|
||||
| `k8s.deployment.labels` | `string` | None | Deprecated. Use `k8smeta` plugin instead. |
|
||||
<!-- /README-PLUGIN-FIELDS -->
|
||||
|
||||
## Requirements
|
||||
|
||||
* `containerd` >= 1.7 (https://kubernetes.io/docs/tasks/administer-cluster/switch-to-evented-pleg/, https://github.com/containerd/containerd/pull/7073)
|
||||
* `cri-o` >= 1.26 (https://kubernetes.io/docs/tasks/administer-cluster/switch-to-evented-pleg/)
|
||||
* `podman` >= v4.0.0 (2.0.0 introduced https://github.com/containers/podman/commit/165aef7766953cd0c0589ffa1abc25022a905adb, but the client library requires 4.0.0)
|
||||
|
||||
## Usage
|
||||
|
||||
### Configuration
|
||||
|
||||
By default, all engines are enabled on **default sockets**:
|
||||
* Docker: [`/var/run/docker.sock`]
|
||||
* Podman: [`/run/podman/podman.sock` for root, + `/run/user/$uid/podman/podman.sock` for each user in the system]
|
||||
* Containerd: [`/run/host-containerd/containerd.sock`]
|
||||
* Cri: [`/run/containerd/containerd.sock`, `/run/crio/crio.sock`, `/run/k3s/containerd/containerd.sock`, `/run/host-containerd/containerd.sock`]
|
||||
|
||||
Here's an example of configuration of `falco.yaml`:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- name: container
|
||||
# path to the plugin .so file
|
||||
library_path: libcontainer.so
|
||||
init_config:
|
||||
label_max_len: 100 # (optional, default: 100; container labels larger than this won't be reported)
|
||||
with_size: false # (optional, default: false; whether to enable container size inspection, which is inherently slow)
|
||||
hooks: ['create', 'start'] # (optional, default: 'create'. Some fields might not be available in create hook, but we are guaranteed that it gets triggered before first process gets started)
|
||||
engines:
|
||||
docker:
|
||||
enabled: true
|
||||
sockets: ['/var/run/docker.sock']
|
||||
podman:
|
||||
enabled: true
|
||||
sockets: ['/run/podman/podman.sock', '/run/user/1000/podman/podman.sock']
|
||||
containerd:
|
||||
enabled: true
|
||||
sockets: ['/run/containerd/containerd.sock']
|
||||
cri:
|
||||
enabled: true
|
||||
sockets: ['/run/crio/crio.sock']
|
||||
lxc:
|
||||
enabled: false
|
||||
libvirt_lxc:
|
||||
enabled: false
|
||||
bpm:
|
||||
enabled: false
|
||||
|
||||
load_plugins: [container]
|
||||
```
|
||||
|
||||
### Rules
|
||||
|
||||
This plugin doesn't provide any custom rule, you can use the default Falco ruleset and add the necessary `container` fields.
|
||||
Note: leveraging latest plugin SDK features, the plugin itself will expose certain fields as suggested output fields:
|
||||
* `container.id`
|
||||
* `container.name`
|
||||
|
||||
### Running
|
||||
|
||||
This plugin requires Falco with version >= **0.41.0**.
|
||||
The plugin is bundled within Falco, so you only need to run Falco as you would do normally.
|
||||
|
||||
## Local development
|
||||
|
||||
### Build and test
|
||||
|
||||
Build the plugin on a fresh `Ubuntu 22.04` machine:
|
||||
|
||||
```bash
|
||||
sudo apt update -y
|
||||
sudo apt install -y cmake build-essential autoconf libtool pkg-config
|
||||
git clone https://github.com/falcosecurity/plugins.git
|
||||
cd plugins/container
|
||||
make libcontainer.so
|
||||
```
|
||||
|
||||
You can also run `make exe` from withing the `go-worker` folder to build a `worker` executable to test the go-worker implementation.
|
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 279 KiB |
|
@ -0,0 +1,33 @@
|
|||
# NOTE: containerd go package does only depend on the
|
||||
# header files from libbtrfs.
|
||||
# Therefore we just fetch the repo and fixup the paths to the
|
||||
# header files, without building the library.
|
||||
|
||||
set(BTRFS_SRC "${PROJECT_BINARY_DIR}/_deps/btrfs-src")
|
||||
set(LIBBTRFS_SRC "${BTRFS_SRC}/libbtrfs")
|
||||
|
||||
include(FetchContent)
|
||||
|
||||
FetchContent_Declare(
|
||||
btrfs
|
||||
GIT_REPOSITORY https://github.com/kdave/btrfs-progs.git
|
||||
GIT_TAG v6.13
|
||||
)
|
||||
FetchContent_MakeAvailable(btrfs)
|
||||
|
||||
# Configure version.h.in with pre-defined values
|
||||
# (same values of v6.13).
|
||||
# See https://github.com/kdave/btrfs-progs/blob/devel/configure.ac#L18
|
||||
set(LIBBTRFS_MAJOR 0)
|
||||
set(LIBBTRFS_MINOR 1)
|
||||
set(LIBBTRFS_PATCHLEVEL 4)
|
||||
set(PACKAGE_VERSION v6.13)
|
||||
configure_file(${LIBBTRFS_SRC}/version.h.in ${LIBBTRFS_SRC}/version.h)
|
||||
|
||||
# Create a `btrfs` folder and move required `*.h` there,
|
||||
# since the includes will be <btrfs/foo.h>
|
||||
file(GLOB LIBBTRFS_HEADERS "${LIBBTRFS_SRC}/*.h" "${BTRFS_SRC}/kernel-lib/*.h")
|
||||
file(MAKE_DIRECTORY ${LIBBTRFS_SRC}/btrfs)
|
||||
file(COPY ${LIBBTRFS_HEADERS} DESTINATION ${LIBBTRFS_SRC}/btrfs/)
|
||||
|
||||
set(BTRFS_CGO_CFLAG -e CGO_CFLAGS=-I${LIBBTRFS_SRC})
|
|
@ -0,0 +1,21 @@
|
|||
set(CAPS_SOURCES "")
|
||||
|
||||
macro(ADD_CAP cap)
|
||||
option(ENABLE_${cap} "Enable support for ${cap} capability" ON)
|
||||
if(${ENABLE_${cap}})
|
||||
message(STATUS "${cap} capability enabled")
|
||||
add_compile_definitions(_HAS_${cap})
|
||||
string(TOLOWER ${cap} lower_cap)
|
||||
file(GLOB_RECURSE SOURCES src/caps/${lower_cap}/*.cpp)
|
||||
list(APPEND CAPS_SOURCES ${SOURCES})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
ADD_CAP(ASYNC)
|
||||
ADD_CAP(EXTRACT)
|
||||
ADD_CAP(LISTENING)
|
||||
ADD_CAP(PARSE)
|
||||
|
||||
if(NOT ENABLE_ASYNC AND NOT ENABLE_EXTRACT AND NOT ENABLE_LISTENING AND NOT ENABLE_PARSE)
|
||||
message(FATAL_ERROR "No capabilities enabled.")
|
||||
endif ()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue