Compare commits
327 Commits
falco-4.1.
...
master
Author | SHA1 | Date |
---|---|---|
|
7ad10b8063 | |
|
cc96a4dde6 | |
|
9717814edb | |
|
6305d9bf7d | |
|
0b9b5a01d4 | |
|
01ed738a2c | |
|
11be245149 | |
|
65ba4c266e | |
|
530eded713 | |
|
9e1550ab44 | |
|
3a7cb6edba | |
|
2646171e4c | |
|
9f5ead4705 | |
|
3cbf72bd9c | |
|
ff984cc8a8 | |
|
cd4dc68cb1 | |
|
56f2eb7ccf | |
|
489e4d67b6 | |
|
b821e9db06 | |
|
4ba195cc61 | |
|
2206d6ce36 | |
|
b31c2881eb | |
|
1f52c29818 | |
|
42b6a54d71 | |
|
f9c9f14e04 | |
|
d7816c9a2f | |
|
36b77e4937 | |
|
a03331d05c | |
|
60f43e7ad4 | |
|
9655a0f6da | |
|
a6baf31059 | |
|
049a366d92 | |
|
90dea388ad | |
|
da70b354c2 | |
|
e9164d6a17 | |
|
fdf085c249 | |
|
e90802d1bf | |
|
4c48271f75 | |
|
14b38d251b | |
|
560fd390bc | |
|
29c627a4ff | |
|
6d9ccd5078 | |
|
ea882813a8 | |
|
36160ce337 | |
|
7baa31fbd5 | |
|
601643dbbf | |
|
3b9030f7ef | |
|
7a89dc0a6d | |
|
931b11fd6e | |
|
bb053c5c12 | |
|
dba12e1e0d | |
|
092de0da9d | |
|
911c16ce46 | |
|
1c8fb43e58 | |
|
cc6cb3771d | |
|
4e49371d8c | |
|
8ab13ea972 | |
|
e74948391f | |
|
1c5ac83a96 | |
|
003d93735e | |
|
ac7d08f06b | |
|
4bbc57bc78 | |
|
6db1b396ae | |
|
0547284c57 | |
|
a306f299a3 | |
|
00dacd98de | |
|
e648b42f87 | |
|
bd24ca27db | |
|
8e24293503 | |
|
347a231b17 | |
|
9c59182fe2 | |
|
56a04dac13 | |
|
6d160f3560 | |
|
a23ab7247b | |
|
06156a4c23 | |
|
61bd0f0fc5 | |
|
a759414abf | |
|
5b033a204a | |
|
1442228ac7 | |
|
0dbc58494c | |
|
bc78f46298 | |
|
894f7f7968 | |
|
c3123e77ac | |
|
06bd1848c8 | |
|
3ac90fe8c6 | |
|
168b69d3f1 | |
|
5e50321e52 | |
|
4612281076 | |
|
6b132bbcae | |
|
c6140ee5b2 | |
|
f7f219ae14 | |
|
0f539f3336 | |
|
9cd296cd3e | |
|
8bbd18ea0c | |
|
19ecc8fe26 | |
|
6acbc6c1a9 | |
|
659637942d | |
|
8f45a9bb4a | |
|
95b273be5b | |
|
4f6e4312a9 | |
|
96209d4898 | |
|
966f414577 | |
|
06766a82f8 | |
|
6ccf1a8b7e | |
|
06e6e42d4b | |
|
c872eb7463 | |
|
00e51da381 | |
|
77021a105a | |
|
986a7ad988 | |
|
8ff6323cbe | |
|
0b78768f83 | |
|
316932811c | |
|
f3fbfb2594 | |
|
a6c5c7b686 | |
|
cf7545718c | |
|
f0e7921270 | |
|
16e88288c9 | |
|
efbd212da2 | |
|
8103605112 | |
|
6b8234fd3d | |
|
b5118f9cab | |
|
fe8df69d7d | |
|
83686cbb42 | |
|
989a399116 | |
|
affbecd2bf | |
|
c0d1ba51ab | |
|
c617abfbc9 | |
|
bd57711e7c | |
|
2a68bfe1a2 | |
|
b4e6411fca | |
|
bc297e36af | |
|
0bac3d3884 | |
|
e9784ecb65 | |
|
b8cc0959ea | |
|
013117730a | |
|
7dfa1e0929 | |
|
b8dbf1f0be | |
|
4f70f39a26 | |
|
f3a8b2e27a | |
|
355fbcf1a9 | |
|
29abc02c2c | |
|
e7ab64bd70 | |
|
5e50b52572 | |
|
d4e9b6f9d3 | |
|
2eeffce731 | |
|
e457572a3d | |
|
bbc8397a61 | |
|
fb9d2db88f | |
|
f10d0a48f5 | |
|
186f916634 | |
|
15f97d55f6 | |
|
d1a1384ef7 | |
|
227325789b | |
|
d952b7b1bd | |
|
3d0e6ffdf3 | |
|
70d76b08b4 | |
|
b4b4ae091c | |
|
56d5b2822a | |
|
4c8848de9b | |
|
94b9db3ab0 | |
|
5fa1fcc710 | |
|
4a1350f9d0 | |
|
9a7685dd0c | |
|
b273725559 | |
|
e980a2c5bb | |
|
87478dcace | |
|
64cc7959a4 | |
|
a6a4cfbb41 | |
|
b72b40c18a | |
|
9f5ea39b63 | |
|
cc71346aee | |
|
4183124d1f | |
|
ba7220bb9a | |
|
63edaffa81 | |
|
3fde64336c | |
|
3f00ed2095 | |
|
6018dfb241 | |
|
a9c342383e | |
|
17df3b9473 | |
|
1310e60b75 | |
|
a3e4db32f8 | |
|
22b3f58f5e | |
|
6f1c964128 | |
|
5c8ef86e88 | |
|
6ff5758424 | |
|
f717e3bb34 | |
|
1f8db46be0 | |
|
78a720f9ea | |
|
40d27a7cec | |
|
f49025e5ef | |
|
146aa19cc3 | |
|
2fcaa862cd | |
|
684145440e | |
|
e91f988285 | |
|
1550223b81 | |
|
9483726553 | |
|
5f318e167f | |
|
5bb945686b | |
|
8347de739a | |
|
7f2731bb73 | |
|
f3c3fd8ba7 | |
|
b7d2ca20d0 | |
|
04f146ab02 | |
|
980234e6da | |
|
d3148cac82 | |
|
401b9c2336 | |
|
47541d456b | |
|
ed8c5351e6 | |
|
0d870e7556 | |
|
27c637d05e | |
|
b81522f94b | |
|
1fd1c1c8e4 | |
|
15dfc64bfd | |
|
4e87255cef | |
|
98897b00df | |
|
3d3ab261f6 | |
|
4d2da46d13 | |
|
46516b090c | |
|
31ba1705c2 | |
|
961efb68c6 | |
|
9382b35f49 | |
|
9ff842d99a | |
|
7747f9852d | |
|
b1fea7cdf6 | |
|
c052931a84 | |
|
66bb77c075 | |
|
196cb665b4 | |
|
a27ba1877a | |
|
ba95f4cbf2 | |
|
d17eebf46c | |
|
4fba8a3d70 | |
|
fa426c37d4 | |
|
90cfd82e6b | |
|
0a7da65024 | |
|
467c2a270d | |
|
ad412ee45d | |
|
10161c82c0 | |
|
ebf1ff84b3 | |
|
e1deeb0c82 | |
|
e8a0387945 | |
|
78aa87c107 | |
|
1e11d6d5c2 | |
|
cf21212a29 | |
|
05dd011762 | |
|
bb4fc15ba9 | |
|
b9d09260cf | |
|
f8205957df | |
|
50dc3b1c0c | |
|
7527d0f635 | |
|
df1606ce6b | |
|
040aea0137 | |
|
3ed680173b | |
|
37a10fb98e | |
|
d9c3ff9c91 | |
|
1b9c56ea08 | |
|
2ba9c51743 | |
|
5b69ab8d84 | |
|
7ea44ac0aa | |
|
1aa6e5d817 | |
|
34448915ba | |
|
e3ed4d18fe | |
|
b798b44e85 | |
|
b76280cfa5 | |
|
addf0d3cc3 | |
|
c90a63978f | |
|
c5cf615993 | |
|
307e59bf4f | |
|
fe11d265b1 | |
|
c18039eb67 | |
|
f5a6974b12 | |
|
8c6542c342 | |
|
439d14042f | |
|
22073b516f | |
|
69efe9fcc7 | |
|
61e99e2aca | |
|
79d1dfff24 | |
|
94073c768d | |
|
9b52cb7e18 | |
|
11e5d98538 | |
|
c52f43857c | |
|
127563906e | |
|
619b9f2347 | |
|
3799bd9a80 | |
|
bab068e34f | |
|
90cd218cc0 | |
|
9dacaf69d2 | |
|
f47d8c91d3 | |
|
c333878923 | |
|
181239680d | |
|
144c33e9c8 | |
|
138eb3a145 | |
|
51a377c0dd | |
|
32ba0bbe5d | |
|
8653678175 | |
|
1d59e8a018 | |
|
b9776c8b62 | |
|
8598119dbc | |
|
20f7b67921 | |
|
fb6d059bae | |
|
91bfff2bf1 | |
|
e34aadfc48 | |
|
4a04852dde | |
|
aafdba37bc | |
|
e3354e2dae | |
|
45c363686c | |
|
68db066571 | |
|
73f17ba805 | |
|
77b233c4dc | |
|
5535326247 | |
|
d97cbf7539 | |
|
a6623b20a4 | |
|
34f7db6fba | |
|
23f9fe870d | |
|
70efc03efe | |
|
9fbf3ee93f | |
|
bf8c072f8c | |
|
e7879b81a1 | |
|
399474d1b7 | |
|
27cea0a8a6 | |
|
747df180b9 | |
|
8004bef2a1 | |
|
b16bf3a2a2 | |
|
f644c8becd | |
|
a670ff35eb | |
|
c96fdb0e5d | |
|
efed89519c | |
|
ed573c82d7 |
|
@ -35,13 +35,13 @@ Please remove the leading whitespace before the `/kind <>` you uncommented.
|
|||
|
||||
> /area falco-chart
|
||||
|
||||
> /area falco-exporter-chart
|
||||
|
||||
> /area falcosidekick-chart
|
||||
|
||||
> /area falco-talon-chart
|
||||
|
||||
> /area event-generator-chart
|
||||
|
||||
> /area k8s-metacollector
|
||||
> /area k8s-metacollector-chart
|
||||
|
||||
<!--
|
||||
Please remove the leading whitespace before the `/area <>` you uncommented.
|
||||
|
@ -61,6 +61,7 @@ Fixes #
|
|||
|
||||
**Special notes for your reviewer**:
|
||||
|
||||
|
||||
**Checklist**
|
||||
<!--
|
||||
Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.
|
||||
|
|
|
@ -4,7 +4,3 @@ updates:
|
|||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
labels:
|
||||
- "area/dependency"
|
||||
- "release-note-none"
|
||||
- "ok-to-test"
|
||||
|
|
|
@ -9,11 +9,11 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run Helm Docs and check the outcome
|
||||
run: |
|
||||
for chart in event-generator falco k8s-metacollector; do
|
||||
for chart in $(ls ./charts); do
|
||||
docker run \
|
||||
--rm \
|
||||
--workdir=/helm-docs \
|
||||
|
@ -27,9 +27,9 @@ jobs:
|
|||
|
||||
- name: Print a comment in case of failure
|
||||
run: |
|
||||
echo "The README.md filer are not up to date.
|
||||
echo "The README.md files are not up to date.
|
||||
|
||||
Please, run make docs before pushing."
|
||||
Please, run \"make docs\" before pushing."
|
||||
exit 1
|
||||
if: |
|
||||
failure() && github.event.pull_request.head.repo.full_name == github.repository
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
name: Links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
linkChecker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Link Checker
|
||||
uses: lycheeverse/lychee-action@5c4ee84814c983aa7164eaee476f014e53ff3963 #v2.5.0
|
||||
with:
|
||||
args: --no-progress './**/*.yml' './**/*.yaml' './**/*.md' './**/*.gotmpl' './**/*.tpl' './**/OWNERS' './**/LICENSE'
|
||||
token: ${{ secrets.GITHUB_TOKE }}
|
||||
fail: true
|
|
@ -19,12 +19,12 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
|
||||
uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
|
@ -32,21 +32,22 @@ jobs:
|
|||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
helm repo add falcosecurity https://falcosecurity.github.io/charts
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0
|
||||
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
|
||||
with:
|
||||
charts_dir: charts
|
||||
config: cr.yaml
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
|
|
@ -8,21 +8,21 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
with:
|
||||
version: '3.14.0'
|
||||
version: "3.14.0"
|
||||
|
||||
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --config ct.yaml
|
||||
|
@ -37,47 +37,34 @@ jobs:
|
|||
|
||||
- name: Create KIND Cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
config: ./tests/kind-config.yaml
|
||||
|
||||
- name: install falco if needed (ie for falco-exporter)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
changed=$(ct list-changed --config ct.yaml)
|
||||
if [[ "$changed[@]" =~ "charts/falco-exporter" ]]; then
|
||||
helm repo add falcosecurity https://falcosecurity.github.io/charts
|
||||
helm repo update
|
||||
helm install falco falcosecurity/falco -f ./tests/falco-test-ci.yaml
|
||||
kubectl get po -A
|
||||
sleep 120
|
||||
kubectl get po -A
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config ct.yaml
|
||||
run: ct install --exclude-deprecated --config ct.yaml
|
||||
|
||||
go-unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
with:
|
||||
version: '3.10.3'
|
||||
version: "3.10.3"
|
||||
|
||||
- name: Update repo deps
|
||||
run: helm dependency update ./charts/falco
|
||||
run: helm dependency update ./charts/falco
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
|
||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: "1.21"
|
||||
check-latest: true
|
||||
|
||||
- name: K8s-metacollector unit tests
|
||||
|
|
|
@ -3,3 +3,4 @@
|
|||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.vscode
|
|
@ -0,0 +1,22 @@
|
|||
nats:/host:port
|
||||
https://yds.serverless.yandexcloud.net/
|
||||
http:/host:port
|
||||
https://chat.googleapis.com/v1/spaces/XXXXXX/YYYYYY
|
||||
https://xxxx/hooks/YYYY
|
||||
https://cliq.zoho.eu/api/v2/channelsbyname/XXXX/message*
|
||||
https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY
|
||||
https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY
|
||||
https://discord.com/api/webhooks/xxxxxxxxxx
|
||||
http://kafkarest:8082/topics/test
|
||||
https://api.spyderbat.com/
|
||||
https://hooks.slack.com/services/XXXX/YYYY/ZZZZ
|
||||
http://\{domain*
|
||||
https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick
|
||||
http://some.url/some/path/
|
||||
https://localhost:32765/k8s-audit
|
||||
https://some.url/some/path/
|
||||
http://localhost:8765/versions
|
||||
https://environmentid.live.dynatrace.com/api
|
||||
https://yourdomain/e/ENVIRONMENTID/api
|
||||
http://falco-talon:2803
|
||||
https://http-intake.logs.datadoghq.com/
|
4
Makefile
4
Makefile
|
@ -20,9 +20,9 @@ lint-%:
|
|||
-u $$(id -u) \
|
||||
quay.io/helmpack/chart-testing:$(LINT_IMAGE_VERSION) \
|
||||
ct lint --config ./ct.yaml --charts ./charts/$*
|
||||
|
||||
|
||||
.PHONY: docs
|
||||
docs: $(addprefix docs-, $(filter-out falco-exporter,$(CHARTS_NAMES)))
|
||||
docs: $(addprefix docs-, $(CHARTS_NAMES))
|
||||
|
||||
docs-%:
|
||||
@docker run \
|
||||
|
|
3
OWNERS
3
OWNERS
|
@ -2,9 +2,10 @@ approvers:
|
|||
- leogr
|
||||
- Issif
|
||||
- cpanato
|
||||
- alacuku
|
||||
- ekoops
|
||||
reviewers:
|
||||
- bencer
|
||||
- alacuku
|
||||
emeritus_approvers:
|
||||
- leodido
|
||||
- fntlnz
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
This GitHub project is the source for the [Falco](https://github.com/falcosecurity/falco) Helm chart repository that you can use to [deploy](https://falco.org/docs/getting-started/deployment/) Falco in your Kubernetes infrastructure.
|
||||
|
||||
The purpose of this repository is to provide a place for maintaining and contributing Charts related to the Falco project, with CI processes in place for managing the releasing of Charts into [our Helm Chart Repository]((https://falcosecurity.github.io/charts)).
|
||||
The purpose of this repository is to provide a place for maintaining and contributing Charts related to the Falco project, with CI processes in place for managing the releasing of Charts into [our Helm Chart Repository](https://falcosecurity.github.io/charts).
|
||||
|
||||
For more information about installing and using Helm, see the
|
||||
[Helm Docs](https://helm.sh/docs/).
|
||||
|
@ -16,17 +16,17 @@ We also, are publishing the charts in a OCI Image and it is hosted in [GitHub Pa
|
|||
|
||||
The Charts in this repository are organized into folders: each directory that contains a `Chart.yaml` is a chart.
|
||||
|
||||
The Charts in the `master` branch (with a corresponding [GitHub release](https://github.com/falcosecurity/charts/releases)) match the latest packaged Charts in [our Helm Chart Repository]((https://falcosecurity.github.io/charts)), though there may be previous versions of a Chart available in that Chart Repository.
|
||||
The Charts in the `master` branch (with a corresponding [GitHub release](https://github.com/falcosecurity/charts/releases)) match the latest packaged Charts in [our Helm Chart Repository](https://falcosecurity.github.io/charts), though there may be previous versions of a Chart available in that Chart Repository.
|
||||
|
||||
## Charts
|
||||
|
||||
Charts currently available are listed below.
|
||||
|
||||
- [falco](./charts/falco)
|
||||
- [falco-exporter](./charts/falco-exporter)
|
||||
- [falcosidekick](./charts/falcosidekick)
|
||||
- [event-generator](./charts/event-generator)
|
||||
- [k8s-metacollector](./charts/k8s-metacollector)
|
||||
- [falco-talon](./charts/falco-talon)
|
||||
|
||||
## Usage
|
||||
|
||||
|
@ -52,7 +52,7 @@ So, we ask you to follow these simple steps when making your PR:
|
|||
- The [DCO](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md#developer-certificate-of-origin) is required to contribute to a `falcosecurity` project. So ensure that all your commits have been signed off. We will not be able to merge the PR if a commit is not signed off.
|
||||
- Bump the version number of the chart by modifying the `version` value in the chart's `Chart.yaml` file. This is particularly important, as it allows our CI to release a new chart version. If the version has not been increased, we will not be able to merge the PR.
|
||||
- Add a new section in the chart's `CHANGELOG.md` file with the new version number of the chart.
|
||||
- If your changes affect any chart variables, please update the chart's `README.md` file accordingly and run `make docs` in the chart folder.
|
||||
- If your changes affect any chart variables, please update the chart's `README.gotmpl` file accordingly and run `make docs` in the main folder.
|
||||
|
||||
Finally, when opening your PR, please fill in the provided PR template, including the final checklist of items to indicate that all the steps above have been performed.
|
||||
|
||||
|
|
|
@ -4,6 +4,10 @@
|
|||
This file documents all notable changes to `event-generator` Helm Chart. The release
|
||||
numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
## v0.3.4
|
||||
|
||||
* Pass `--all` flag to event-generator binary to allow disabled rules to run, e.g. the k8saudit ruleset.
|
||||
|
||||
## v0.3.3
|
||||
|
||||
* Update README.md.
|
||||
|
|
|
@ -15,7 +15,7 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.3
|
||||
version: 0.3.4
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
|
|
@ -117,7 +117,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
|
||||
## Configuration
|
||||
|
||||
The following table lists the main configurable parameters of the event-generator chart v0.3.3 and their default values. See `values.yaml` for full list.
|
||||
The following table lists the main configurable parameters of the event-generator chart v0.3.4 and their default values. See `values.yaml` for full list.
|
||||
|
||||
## Values
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ spec:
|
|||
command:
|
||||
- /bin/event-generator
|
||||
- {{ .Values.config.command }}
|
||||
- --all
|
||||
{{- if .Values.config.actions }}
|
||||
- {{ .Values.config.actions }}
|
||||
{{- end }}
|
||||
|
|
|
@ -1,210 +0,0 @@
|
|||
# Change Log
|
||||
|
||||
This file documents all notable changes to `falco-exporter` Helm Chart. The release
|
||||
numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
## v0.9.9
|
||||
|
||||
* update tolerations
|
||||
|
||||
## v0.9.8
|
||||
|
||||
* add annotation for set of folder's grafana-chart
|
||||
|
||||
## v0.9.7
|
||||
|
||||
* noop change just to test the ci
|
||||
|
||||
## v0.9.6
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Bump falco-exporter to v0.8.3
|
||||
|
||||
## v0.9.5
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Removed unnecessary capabilities from security context
|
||||
* Setted filesystem on read-only
|
||||
|
||||
## v0.9.4
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add options to configure readiness/liveness probe values
|
||||
|
||||
## v0.9.3
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Bump falco-exporter to v0.8.2
|
||||
|
||||
## v0.9.2
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add option to place Grafana dashboard in a folder
|
||||
|
||||
## v0.9.1
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix PSP allowed host path prefix to match grpc socket path change.
|
||||
|
||||
## v0.8.3
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Changing the grpc socket path from `unix:///var/run/falco/falco.soc` to `unix:///run/falco/falco.sock`.
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Bump falco-exporter to v0.8.0
|
||||
|
||||
## v0.8.2
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Support configuration of updateStrategy of the Daemonset
|
||||
|
||||
## v0.8.0
|
||||
|
||||
* Upgrade falco-exporter version to v0.7.0 (see the [falco-exporter changelog](https://github.com/falcosecurity/falco-exporter/releases/tag/v0.7.0))
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add option to add labels to the Daemonset pods
|
||||
|
||||
## v0.7.2
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add option to add labels to the Daemonset pods
|
||||
|
||||
## v0.7.1
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix `FalcoExporterAbsent` expression
|
||||
|
||||
## v0.7.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Adds ability to create custom PrometheusRules for alerting
|
||||
|
||||
## v0.6.2
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* Add Check availability of 'monitoring.coreos.com/v1' api version
|
||||
|
||||
## v0.6.1
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add option the add annotations to the Daemonset
|
||||
|
||||
## v0.6.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Upgrade falco-exporter version to v0.6.0 (see the [falco-exporter changelog](https://github.com/falcosecurity/falco-exporter/releases/tag/v0.6.0))
|
||||
|
||||
## v0.5.2
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Make image registry configurable
|
||||
|
||||
## v0.5.1
|
||||
|
||||
* Display only non-zero rates in Grafana dashboard template
|
||||
|
||||
## v0.5.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Upgrade falco-exporter version to v0.5.0
|
||||
* Add metrics about Falco drops
|
||||
* Make `unix://` prefix optional
|
||||
|
||||
## v0.4.2
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix Prometheus datasource name reference in grafana dashboard template
|
||||
|
||||
## v0.4.1
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Support release namespace configuration
|
||||
|
||||
## v0.4.0
|
||||
|
||||
### Mayor Changes
|
||||
|
||||
* Add Mutual TLS for falco-exporter enable/disabled feature
|
||||
|
||||
## v0.3.8
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Replace extensions apiGroup/apiVersion because of deprecation
|
||||
|
||||
## v0.3.7
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fixed falco-exporter PSP by allowing secret volumes
|
||||
|
||||
## v0.3.6
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add SecurityContextConstraint to allow deploying in Openshift
|
||||
|
||||
## v0.3.5
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Added the possibility to automatically add a PSP (in combination with a Role and a RoleBindung) via the podSecurityPolicy values
|
||||
* Namespaced the falco-exporter ServiceAccount and Service
|
||||
|
||||
## v0.3.4
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add priorityClassName to values
|
||||
|
||||
## v0.3.3
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add grafana dashboard to helm chart
|
||||
|
||||
## v0.3.2
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix for additional labels for falco-exporter servicemonitor
|
||||
|
||||
## v0.3.1
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Added the support to deploy a Prometheus Service Monitor. Is disables by default.
|
||||
|
||||
## v0.3.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Chart moved to [falcosecurity/charts](https://github.com/falcosecurity/charts) repository
|
||||
* gRPC over unix socket support (by default)
|
||||
* Updated falco-exporter version to `0.3.0`
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* README.md and CHANGELOG.md added
|
|
@ -1,36 +0,0 @@
|
|||
apiVersion: v2
|
||||
name: falco-exporter
|
||||
description: Prometheus Metrics Exporter for Falco output events
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.9.9
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.8.3
|
||||
|
||||
keywords:
|
||||
- monitoring
|
||||
- security
|
||||
- alerting
|
||||
- metric
|
||||
- troubleshooting
|
||||
- run-time
|
||||
|
||||
sources:
|
||||
- https://github.com/falcosecurity/falco-exporter
|
||||
|
||||
maintainers:
|
||||
- name: leogr
|
||||
email: me@leonardograsso.com
|
|
@ -1,108 +0,0 @@
|
|||
# falco-exporter Helm Chart
|
||||
|
||||
[falco-exporter](https://github.com/falcosecurity/falco-exporter) is a Prometheus Metrics Exporter for Falco output events.
|
||||
|
||||
Before using this chart, you need [Falco installed](https://falco.org/docs/installation/) and running with the [gRPC Output](https://falco.org/docs/grpc/) enabled (over Unix socket by default).
|
||||
|
||||
This chart is compatible with the [Falco Chart](https://github.com/falcosecurity/charts/tree/master/falco) version `v1.2.0` or greater. Instructions to enable the gRPC Output in the Falco Helm Chart can be found [here](https://github.com/falcosecurity/charts/tree/master/falco#enabling-grpc). We also strongly recommend using [gRPC over Unix socket](https://github.com/falcosecurity/charts/tree/master/falco#grpc-over-unix-socket-default).
|
||||
|
||||
## Introduction
|
||||
|
||||
The chart deploys **falco-exporter** as Daemon Set on your the Kubernetes cluster. If a [Prometheus installation](https://github.com/helm/charts/tree/master/stable/prometheus) is running within your cluster, metrics provided by **falco-exporter** will be automatically discovered.
|
||||
|
||||
## Adding `falcosecurity` repository
|
||||
|
||||
Prior to installing the chart, add the `falcosecurity` charts repository:
|
||||
|
||||
```bash
|
||||
helm repo add falcosecurity https://falcosecurity.github.io/charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `falco-exporter` run:
|
||||
|
||||
```bash
|
||||
helm install falco-exporter falcosecurity/falco-exporter
|
||||
```
|
||||
|
||||
After a few seconds, **falco-exporter** should be running.
|
||||
|
||||
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall the `falco-exporter` deployment:
|
||||
|
||||
```bash
|
||||
helm uninstall falco-exporter
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following table lists the main configurable parameters of the chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ---------------------------------- |
|
||||
| `image.registry` | The image registry to pull from | `docker.io` |
|
||||
| `image.repository` | The image repository to pull from | `falcosecurity/falco-exporter` |
|
||||
| `image.tag` | The image tag to pull | `0.8.3` |
|
||||
| `image.pullPolicy` | The image pull policy | `IfNotPresent` |
|
||||
| `falco.grpcUnixSocketPath` | Unix socket path for connecting to a Falco gRPC server | `unix:///var/run/falco/falco.sock` |
|
||||
| `falco.grpcTimeout` | gRPC connection timeout | `2m` |
|
||||
| `serviceAccount.create` | Specify if a service account should be created | `true` |
|
||||
| `podSecurityPolicy.create` | Specify if a PSP, Role & RoleBinding should be created | `false` |
|
||||
| `serviceMonitor.enabled` | Enabled deployment of a Prometheus operator Service Monitor | `false` |
|
||||
| `serviceMonitor.additionalLabels` | Add additional Labels to the Service Monitor | `{}` |
|
||||
| `serviceMonitor.interval` | Specify a user defined interval for the Service Monitor | `""` |
|
||||
| `serviceMonitor.scrapeTimeout` | Specify a user defined scrape timeout for the Service Monitor | `""` |
|
||||
| `grafanaDashboard.enabled` | Enable the falco security dashboard, see https://github.com/falcosecurity/falco-exporter#grafana | `false` |
|
||||
| `grafanaDashboard.folder` | The grafana folder to deplay the dashboard in | `""` |
|
||||
| `grafanaDashboard.namespace` | The namespace to deploy the dashboard configmap in | `default` |
|
||||
| `grafanaDashboard.prometheusDatasourceName` | The prometheus datasource name to be used for the dashboard | `Prometheus` |
|
||||
| `scc.create` | Create OpenShift's Security Context Constraint | `true` |
|
||||
| `service.mTLS.enabled` | Enable falco-exporter server Mutual TLS feature | `false` |
|
||||
| `prometheusRules.enabled` | Enable the creation of falco-exporter PrometheusRules | `false` |
|
||||
| `daemonset.podLabels` | Customized Daemonset pod labels | `{}` |
|
||||
| `healthChecks.livenessProbe.probesPort` | Liveness probes port | `19376` |
|
||||
| `healthChecks.readinessProbe.probesPort` | Readiness probes port | `19376` |
|
||||
| `healthChecks.livenessProbe.initialDelaySeconds` | Number of seconds before performing the first liveness probe | `60` |
|
||||
| `healthChecks.readinessProbe.initialDelaySeconds`| Number of seconds before performing the first readiness probe | `30` |
|
||||
| `healthChecks.livenessProbe.timeoutSeconds` | Number of seconds after which the liveness probe times out | `5` |
|
||||
| `healthChecks.readinessProbe.timeoutSeconds` | Number of seconds after which the readiness probe times out | `5` |
|
||||
| `healthChecks.livenessProbe.periodSeconds` | Time interval in seconds to perform the liveness probe | `15` |
|
||||
| `healthChecks.readinessProbe.periodSeconds` | Time interval in seconds to perform the readiness probe | `15` |
|
||||
|
||||
Please, refer to [values.yaml](./values.yaml) for the full list of configurable parameters.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```bash
|
||||
helm install falco-exporter --set falco.grpcTimeout=3m falcosecurity/falco-exporter
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the parameters' values can be provided while installing the chart. For example,
|
||||
|
||||
```bash
|
||||
helm install falco-exporter -f values.yaml falcosecurity/falco-exporter
|
||||
```
|
||||
|
||||
### Enable Mutual TLS
|
||||
|
||||
Mutual TLS for `/metrics` endpoint can be enabled to prevent alerts content from being consumed by unauthorized components.
|
||||
|
||||
To install falco-exporter with Mutual TLS enabled, you have to:
|
||||
|
||||
```shell
|
||||
helm install falco-exporter \
|
||||
--set service.mTLS.enabled=true \
|
||||
--set-file service.mTLS.server.key=/path/to/server.key \
|
||||
--set-file service.mTLS.server.crt=/path/to/server.crt \
|
||||
--set-file service.mTLS.ca.crt=/path/to/ca.crt \
|
||||
falcosecurity/falco-exporter
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
|
@ -1,16 +0,0 @@
|
|||
Get the falco-exporter metrics URL by running these commands:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falco-exporter.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo {{- if .Values.service.mTLS.enabled }} https{{- else }} http{{- end }}://$NODE_IP:$NODE_PORT/metrics
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "falco-exporter.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "falco-exporter.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo {{- if .Values.service.mTLS.enabled }} https{{- else }} http{{- end }}://$SERVICE_IP:{{ .Values.service.port }}/metrics
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "falco-exporter.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit {{- if .Values.service.mTLS.enabled }} https{{- else }} http{{- end }}://127.0.0.1:{{ .Values.service.targetPort }}/metrics to use your application"
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.service.targetPort }}
|
||||
{{- end }}
|
||||
echo {{- if .Values.service.mTLS.enabled }} "You'll need a valid client certificate and its corresponding key for Mutual TLS handshake" {{- end }}
|
|
@ -1,98 +0,0 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "falco-exporter.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "falco-exporter.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "falco-exporter.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "falco-exporter.labels" -}}
|
||||
{{ include "falco-exporter.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
{{- if not .Values.skipHelm }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
{{- if not .Values.skipHelm }}
|
||||
helm.sh/chart: {{ include "falco-exporter.chart" . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "falco-exporter.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "falco-exporter.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "falco-exporter.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "falco-exporter.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the PSP to use
|
||||
*/}}
|
||||
{{- define "falco-exporter.podSecurityPolicyName" -}}
|
||||
{{- if .Values.podSecurityPolicy.create -}}
|
||||
{{ default (include "falco-exporter.fullname" .) .Values.podSecurityPolicy.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.podSecurityPolicy.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Extract the unixSocket's directory path
|
||||
*/}}
|
||||
{{- define "falco-exporter.unixSocketDir" -}}
|
||||
{{- if .Values.falco.grpcUnixSocketPath -}}
|
||||
{{- .Values.falco.grpcUnixSocketPath | trimPrefix "unix://" | dir -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for rbac.
|
||||
*/}}
|
||||
{{- define "rbac.apiVersion" -}}
|
||||
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
|
||||
{{- print "rbac.authorization.k8s.io/v1" -}}
|
||||
{{- else -}}
|
||||
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -1,132 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.fullname" . }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "falco-exporter.selectorLabels" . | nindent 6 }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.daemonset.updateStrategy | indent 4 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "falco-exporter.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.daemonset.podLabels }}
|
||||
{{ toYaml .Values.daemonset.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.daemonset.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.daemonset.annotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: "{{ .Values.priorityClassName }}"
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "falco-exporter.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- /usr/bin/falco-exporter
|
||||
{{- if .Values.falco.grpcUnixSocketPath }}
|
||||
- --client-socket={{ .Values.falco.grpcUnixSocketPath }}
|
||||
{{- else }}
|
||||
- --client-hostname={{ .Values.falco.grpcHostname }}
|
||||
- --client-port={{ .Values.falco.grpcPort }}
|
||||
{{- end }}
|
||||
- --timeout={{ .Values.falco.grpcTimeout }}
|
||||
- --listen-address=0.0.0.0:{{ .Values.service.port }}
|
||||
{{- if .Values.service.mTLS.enabled }}
|
||||
- --server-ca=/etc/falco/server-certs/ca.crt
|
||||
- --server-cert=/etc/falco/server-certs/server.crt
|
||||
- --server-key=/etc/falco/server-certs/server.key
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: {{ .Values.service.targetPort }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: {{ .Values.healthChecks.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.healthChecks.livenessProbe.timeoutSeconds }}
|
||||
periodSeconds: {{ .Values.healthChecks.livenessProbe.periodSeconds }}
|
||||
httpGet:
|
||||
path: /liveness
|
||||
port: {{ .Values.healthChecks.livenessProbe.probesPort }}
|
||||
readinessProbe:
|
||||
initialDelaySeconds: {{ .Values.healthChecks.readinessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.healthChecks.readinessProbe.timeoutSeconds }}
|
||||
periodSeconds: {{ .Values.healthChecks.readinessProbe.periodSeconds }}
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: {{ .Values.healthChecks.readinessProbe.probesPort }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
{{- if .Values.falco.grpcUnixSocketPath }}
|
||||
- mountPath: {{ include "falco-exporter.unixSocketDir" . }}
|
||||
name: falco-socket-dir
|
||||
readOnly: true
|
||||
{{- else }}
|
||||
- mountPath: /etc/falco/certs
|
||||
name: certs-volume
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.service.mTLS.enabled }}
|
||||
- mountPath: /etc/falco/server-certs
|
||||
name: server-certs-volume
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if .Values.falco.grpcUnixSocketPath }}
|
||||
- name: falco-socket-dir
|
||||
hostPath:
|
||||
path: {{ include "falco-exporter.unixSocketDir" . }}
|
||||
{{- else }}
|
||||
- name: certs-volume
|
||||
secret:
|
||||
secretName: {{ include "falco-exporter.fullname" . }}-certs
|
||||
items:
|
||||
- key: client.key
|
||||
path: client.key
|
||||
- key: client.crt
|
||||
path: client.crt
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
{{- end }}
|
||||
{{- if .Values.service.mTLS.enabled }}
|
||||
- name: server-certs-volume
|
||||
secret:
|
||||
secretName: {{ include "falco-exporter.fullname" . }}-server-certs
|
||||
items:
|
||||
- key: server.key
|
||||
path: server.key
|
||||
- key: server.crt
|
||||
path: server.crt
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,316 +0,0 @@
|
|||
{{- if .Values.grafanaDashboard.enabled }}
|
||||
apiVersion: v1
|
||||
data:
|
||||
grafana-falco.json: |-
|
||||
{
|
||||
"__inputs": [
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "6.7.3"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"description": "",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null as zero",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": true,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(falco_events[5m]) > 0",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{`{{rule}} (node=\"{{kubernetes_node}}\",ns=\"{{k8s_ns_name}}\",pod=\"{{k8s_pod_name}}\")"`}},
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Events rate",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"columns": [],
|
||||
"datasource": "$datasource",
|
||||
"fontSize": "100%",
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 4,
|
||||
"links": [],
|
||||
"pageSize": null,
|
||||
"showHeader": true,
|
||||
"sort": {
|
||||
"col": null,
|
||||
"desc": false
|
||||
},
|
||||
"styles": [
|
||||
{
|
||||
"alias": "Time",
|
||||
"align": "auto",
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"pattern": "Time",
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"align": "auto",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"decimals": 2,
|
||||
"link": false,
|
||||
"mappingType": 1,
|
||||
"pattern": "/__name__|instance|job|kubernetes_name|(__name|helm_|app_).*/",
|
||||
"sanitize": false,
|
||||
"thresholds": [],
|
||||
"type": "hidden",
|
||||
"unit": "short"
|
||||
},
|
||||
{
|
||||
"alias": "Count",
|
||||
"align": "auto",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"decimals": 0,
|
||||
"mappingType": 1,
|
||||
"pattern": "Value",
|
||||
"thresholds": [],
|
||||
"type": "number",
|
||||
"unit": "short"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"align": "left",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"decimals": 0,
|
||||
"mappingType": 1,
|
||||
"pattern": "priority",
|
||||
"thresholds": [
|
||||
""
|
||||
],
|
||||
"type": "number",
|
||||
"unit": "none",
|
||||
"valueMaps": [
|
||||
{
|
||||
"text": "5",
|
||||
"value": "5"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"align": "left",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"decimals": 2,
|
||||
"pattern": "/.*/",
|
||||
"thresholds": [],
|
||||
"type": "string",
|
||||
"unit": "short"
|
||||
}
|
||||
],
|
||||
"targets": [
|
||||
{
|
||||
"expr": "falco_events",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Totals",
|
||||
"transform": "table",
|
||||
"transparent": true,
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 22,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "{{ .Values.grafanaDashboard.prometheusDatasourceName }}",
|
||||
"value": "{{ .Values.grafanaDashboard.prometheusDatasourceName }}"
|
||||
},
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": null,
|
||||
"multi": false,
|
||||
"name": "datasource",
|
||||
"options": [],
|
||||
"query": "prometheus",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"type": "datasource"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "Falco Dashboard",
|
||||
"uid": "FvUFlfuZz"
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
{{- if .Values.grafanaDashboard.folder }}
|
||||
annotations:
|
||||
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafanaDashboard.folder }}
|
||||
grafana_dashboard_folder: {{ .Values.grafanaDashboard.folder }}
|
||||
{{- end }}
|
||||
name: grafana-falco
|
||||
{{- if .Values.grafanaDashboard.namespace }}
|
||||
namespace: {{ .Values.grafanaDashboard.namespace }}
|
||||
{{- else }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end}}
|
||||
{{- end -}}
|
|
@ -1,28 +0,0 @@
|
|||
{{- if and .Values.podSecurityPolicy.create (.Capabilities.APIVersions.Has "policy/v1beta1") }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- with .Values.podSecurityPolicy.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
allowedHostPaths:
|
||||
- pathPrefix: "/run/falco"
|
||||
readOnly: true
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- 'hostPath'
|
||||
- 'secret'
|
||||
{{- end -}}
|
|
@ -1,81 +0,0 @@
|
|||
{{- if and .Values.prometheusRules.enabled .Values.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.fullname" . }}
|
||||
{{- if .Values.prometheusRules.namespace }}
|
||||
namespace: {{ .Values.prometheusRules.namespace }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- if .Values.prometheusRules.additionalLabels }}
|
||||
{{- toYaml .Values.prometheusRules.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: falco-exporter
|
||||
rules:
|
||||
{{- if .Values.prometheusRules.enabled }}
|
||||
- alert: FalcoExporterAbsent
|
||||
expr: absent(up{job="{{- include "falco-exporter.fullname" . }}"})
|
||||
for: 10m
|
||||
annotations:
|
||||
summary: Falco Exporter has dissapeared from Prometheus service discovery.
|
||||
description: No metrics are being scraped from falco. No events will trigger any alerts.
|
||||
labels:
|
||||
severity: critical
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusRules.alerts.warning.enabled }}
|
||||
- alert: FalcoWarningEventsRateHigh
|
||||
annotations:
|
||||
summary: Falco is experiencing high rate of warning events
|
||||
description: A high rate of warning events are being detected by Falco
|
||||
expr: rate(falco_events{priority="4"}[{{ .Values.prometheusRules.alerts.warning.rate_interval }}]) > {{ .Values.prometheusRules.alerts.warning.threshold }}
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusRules.alerts.error.enabled }}
|
||||
- alert: FalcoErrorEventsRateHigh
|
||||
annotations:
|
||||
summary: Falco is experiencing high rate of error events
|
||||
description: A high rate of error events are being detected by Falco
|
||||
expr: rate(falco_events{priority="3"}[{{ .Values.prometheusRules.alerts.error.rate_interval }}]) > {{ .Values.prometheusRules.alerts.error.threshold }}
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusRules.alerts.critical.enabled }}
|
||||
- alert: FalcoCriticalEventsRateHigh
|
||||
annotations:
|
||||
summary: Falco is experiencing high rate of critical events
|
||||
description: A high rate of critical events are being detected by Falco
|
||||
expr: rate(falco_events{priority="2"}[{{ .Values.prometheusRules.alerts.critical.rate_interval }}]) > {{ .Values.prometheusRules.alerts.critical.threshold }}
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusRules.alerts.alert.enabled }}
|
||||
- alert: FalcoAlertEventsRateHigh
|
||||
annotations:
|
||||
summary: Falco is experiencing high rate of alert events
|
||||
description: A high rate of alert events are being detected by Falco
|
||||
expr: rate(falco_events{priority="1"}[{{ .Values.prometheusRules.alerts.alert.rate_interval }}]) > {{ .Values.prometheusRules.alerts.alert.threshold }}
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusRules.alerts.emergency.enabled }}
|
||||
- alert: FalcoEmergencyEventsRateHigh
|
||||
annotations:
|
||||
summary: Falco is experiencing high rate of emergency events
|
||||
description: A high rate of emergency events are being detected by Falco
|
||||
expr: rate(falco_events{priority="0"}[{{ .Values.prometheusRules.alerts.emergency.rate_interval }}]) > {{ .Values.prometheusRules.alerts.emergency.threshold }}
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
{{- end }}
|
||||
{{- with .Values.prometheusRules.additionalAlerts }}
|
||||
{{ . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,22 +0,0 @@
|
|||
{{- if .Values.podSecurityPolicy.create -}}
|
||||
kind: Role
|
||||
apiVersion: {{ template "rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- with .Values.podSecurityPolicy.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- {{ include "falco-exporter.podSecurityPolicyName" . }}
|
||||
verbs:
|
||||
- use
|
||||
{{- end -}}
|
|
@ -1,20 +0,0 @@
|
|||
{{- if .Values.podSecurityPolicy.create -}}
|
||||
kind: RoleBinding
|
||||
apiVersion: {{ template "rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- with .Values.podSecurityPolicy.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "falco-exporter.serviceAccountName" . }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
|
@ -1,24 +0,0 @@
|
|||
{{- if .Values.certs }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.fullname" . }}-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- if .Values.certs }}
|
||||
{{- if and .Values.certs.ca .Values.certs.ca.crt }}
|
||||
ca.crt: {{ .Values.certs.ca.crt | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.certs.client }}
|
||||
{{- if .Values.certs.client.key }}
|
||||
client.key: {{ .Values.certs.client.key | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.certs.client.crt }}
|
||||
client.crt: {{ .Values.certs.client.crt | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,41 +0,0 @@
|
|||
{{- if and .Values.scc.create (.Capabilities.APIVersions.Has "security.openshift.io/v1") }}
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/description: |
|
||||
This provides the minimum requirements Falco-exporter to run in Openshift.
|
||||
name: {{ template "falco-exporter.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
allowHostDirVolumePlugin: true
|
||||
allowHostIPC: false
|
||||
allowHostNetwork: false
|
||||
allowHostPID: false
|
||||
allowHostPorts: false
|
||||
allowPrivilegeEscalation: false
|
||||
allowPrivilegedContainer: false
|
||||
allowedCapabilities: []
|
||||
allowedUnsafeSysctls: []
|
||||
defaultAddCapabilities: []
|
||||
fsGroup:
|
||||
type: RunAsAny
|
||||
groups: []
|
||||
priority: 0
|
||||
readOnlyRootFilesystem: false
|
||||
requiredDropCapabilities: []
|
||||
runAsUser:
|
||||
type: RunAsAny
|
||||
seLinuxContext:
|
||||
type: RunAsAny
|
||||
seccompProfiles:
|
||||
- '*'
|
||||
supplementalGroups:
|
||||
type: RunAsAny
|
||||
users:
|
||||
- system:serviceaccount:{{ .Release.Namespace }}:{{ include "falco-exporter.serviceAccountName" . }}
|
||||
volumes:
|
||||
- hostPath
|
||||
- secret
|
||||
{{- end }}
|
|
@ -1,14 +0,0 @@
|
|||
{{- if .Values.service.mTLS.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.fullname" . }}-server-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
server.crt: {{ .Values.service.mTLS.server.crt | b64enc | quote }}
|
||||
server.key: {{ .Values.service.mTLS.server.key | b64enc | quote }}
|
||||
ca.crt: {{ .Values.service.mTLS.ca.crt | b64enc | quote }}
|
||||
{{- end }}
|
|
@ -1,42 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.fullname" . }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $cidr := .Values.service.loadBalancerSourceRanges }}
|
||||
- {{ $cidr }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
{{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
targetPort: {{ .Values.service.targetPort }}
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
selector:
|
||||
{{- include "falco-exporter.selectorLabels" . | nindent 4 }}
|
|
@ -1,13 +0,0 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
|
@ -1,24 +0,0 @@
|
|||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "falco-exporter.fullname" . }}
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
{{- range $key, $value := .Values.serviceMonitor.additionalLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: metrics
|
||||
{{- if .Values.serviceMonitor.interval }}
|
||||
interval: {{ .Values.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "falco-exporter.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "falco-exporter.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "falco-exporter.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "falco-exporter.fullname" . }}:{{ .Values.service.port }}/metrics']
|
||||
restartPolicy: Never
|
|
@ -1,167 +0,0 @@
|
|||
# Default values for falco-exporter.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
port: 9376
|
||||
targetPort: 9376
|
||||
nodePort:
|
||||
labels: {}
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9376"
|
||||
# Enable Mutual TLS for HTTP metrics server
|
||||
mTLS:
|
||||
enabled: false
|
||||
|
||||
healthChecks:
|
||||
livenessProbe:
|
||||
# liveness probes port
|
||||
probesPort: 19376
|
||||
# -- Tells the kubelet that it should wait X seconds before performing the first probe.
|
||||
initialDelaySeconds: 60
|
||||
# -- Number of seconds after which the probe times out.
|
||||
timeoutSeconds: 5
|
||||
# -- Specifies that the kubelet should perform the check every x seconds.
|
||||
periodSeconds: 15
|
||||
readinessProbe:
|
||||
# readiness probes port
|
||||
probesPort: 19376
|
||||
# -- Tells the kubelet that it should wait X seconds before performing the first probe.
|
||||
initialDelaySeconds: 30
|
||||
# -- Number of seconds after which the probe times out.
|
||||
timeoutSeconds: 5
|
||||
# -- Specifies that the kubelet should perform the check every x seconds.
|
||||
periodSeconds: 15
|
||||
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: falcosecurity/falco-exporter
|
||||
tag: 0.8.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
priorityClassName:
|
||||
|
||||
falco:
|
||||
grpcUnixSocketPath: "unix:///run/falco/falco.sock"
|
||||
grpcTimeout: 2m
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template.
|
||||
# If set and create is false, an already existing serviceAccount must be provided.
|
||||
name:
|
||||
|
||||
podSecurityPolicy:
|
||||
# Specifies whether a PSP, Role and RoleBinding should be created
|
||||
create: false
|
||||
# Annotations to add to the PSP, Role and RoleBinding
|
||||
annotations: {}
|
||||
# The name of the PSP, Role and RoleBinding to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
daemonset:
|
||||
# Perform rolling updates by default in the DaemonSet agent
|
||||
# ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
|
||||
updateStrategy:
|
||||
# You can also customize maxUnavailable or minReadySeconds if you
|
||||
# need it
|
||||
type: RollingUpdate
|
||||
|
||||
# Annotations to add to the DaemonSet pods
|
||||
annotations: {}
|
||||
podLabels: {}
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
resources:
|
||||
{}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
# -- Tolerations to run on Kubernetes control planes.
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
|
||||
affinity: {}
|
||||
|
||||
serviceMonitor:
|
||||
# Enable the deployment of a Service Monitor for the Prometheus Operator.
|
||||
enabled: false
|
||||
# Specify Additional labels to be added on the Service Monitor.
|
||||
additionalLabels: {}
|
||||
# Specify a user defined interval. When not specified Prometheus default interval is used.
|
||||
interval: ""
|
||||
# Specify a user defined scrape timeout. When not specified Prometheus default scrape timeout is used.
|
||||
scrapeTimeout: ""
|
||||
|
||||
grafanaDashboard:
|
||||
enabled: false
|
||||
folder:
|
||||
namespace: default
|
||||
prometheusDatasourceName: Prometheus
|
||||
scc:
|
||||
# true here enabled creation of Security Context Constraints in Openshift
|
||||
create: true
|
||||
|
||||
# Create PrometheusRules for alerting on priority events
|
||||
prometheusRules:
|
||||
enabled: false
|
||||
alerts:
|
||||
warning:
|
||||
enabled: true
|
||||
rate_interval: "5m"
|
||||
threshold: 0
|
||||
error:
|
||||
enabled: true
|
||||
rate_interval: "5m"
|
||||
threshold: 0
|
||||
critical:
|
||||
enabled: true
|
||||
rate_interval: "5m"
|
||||
threshold: 0
|
||||
alert:
|
||||
enabled: true
|
||||
rate_interval: "5m"
|
||||
threshold: 0
|
||||
emergency:
|
||||
enabled: true
|
||||
rate_interval: "1m"
|
||||
threshold: 0
|
||||
additionalAlerts: {}
|
|
@ -0,0 +1,39 @@
|
|||
# Change Log
|
||||
|
||||
This file documents all notable changes to Falco Talon Helm Chart. The release
|
||||
numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
## 0.3.0 - 2024-02-07
|
||||
|
||||
- bump up version to `v0.3.0`
|
||||
- fix missing usage of the `imagePullSecrets`
|
||||
|
||||
## 0.2.3 - 2024-12-18
|
||||
|
||||
- add a Grafana dashboard for the Prometheus metrics
|
||||
|
||||
## 0.2.1 - 2024-12-09
|
||||
|
||||
- bump up version to `v0.2.1` for bug fixes
|
||||
|
||||
## 0.2.0 - 2024-11-26
|
||||
- configure pod to not rollout on configmap change
|
||||
- configure pod to rollout on secret change
|
||||
- add config.rulesOverride allowing users to override config rules
|
||||
|
||||
## 0.1.3 - 2024-11-08
|
||||
|
||||
- change the key for the range over the rules files
|
||||
|
||||
## 0.1.2 - 2024-10-14
|
||||
|
||||
- remove all refs to the previous org
|
||||
|
||||
## 0.1.1 - 2024-10-01
|
||||
|
||||
- Use version `0.1.1`
|
||||
- Fix wrong port for the `serviceMonitor`
|
||||
|
||||
## 0.1.0 - 2024-09-05
|
||||
|
||||
- First release
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
appVersion: 0.3.0
|
||||
description: React to the events from Falco
|
||||
name: falco-talon
|
||||
version: 0.3.0
|
||||
keywords:
|
||||
- falco
|
||||
- monitoring
|
||||
- security
|
||||
- response-engine
|
||||
home: https://github.com/falcosecurity/falco-talon
|
||||
sources:
|
||||
- https://github.com/falcosecurity/falco-talon
|
||||
maintainers:
|
||||
- name: Issif
|
||||
email: issif+github@gadz.org
|
||||
- name: IgorEulalio
|
||||
email: igoreulalio.ie@gmail.com
|
|
@ -0,0 +1,76 @@
|
|||
# Falco Talon
|
||||
|
||||
   
|
||||
|
||||
## Description
|
||||
|
||||
`Falco Talon` is a Response Engine for managing threats in your Kubernetes. It enhances the solutions proposed by the Falco community with a no-code tailor made solution. With easy rules, you can react to `events` from [`Falco`](https://falco.org) in milliseconds.
|
||||
|
||||
## Architecture
|
||||
|
||||
`Falco Talon` can receive the `events` from [`Falco`](https://falco.org) or [`Falcosidekick`](https://github.com/falcosecurity/falcosidekick):
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
falco
|
||||
falcosidekick
|
||||
falco-talon
|
||||
falco -- event --> falcosidekick
|
||||
falco -- event --> falco-talon
|
||||
falcosidekick -- event --> falco-talon
|
||||
kubernetes -- context --> falco-talon
|
||||
falco-talon -- action --> aws
|
||||
falco-talon -- output --> minio
|
||||
falco-talon -- action --> kubernetes
|
||||
falco-talon -- notification --> slack
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
The full documentation is available on its own website: [https://docs.falco-talon.org/docs](https://docs.falco-talon.org/docs).
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
helm repo add falcosecurity https://falcosecurity.github.io/charts
|
||||
helm repo update
|
||||
helm install falco-talon falcosecurity/falco-talon -n falco --create-namespace -f values.yaml
|
||||
```
|
||||
|
||||
### Update the rules
|
||||
|
||||
Update `rules.yaml` then:
|
||||
|
||||
```
|
||||
helm upgrade falco-talon falcosecurity/falco-talon -n falco -f values.yaml
|
||||
```
|
||||
|
||||
### Uninstall Falco Talon
|
||||
|
||||
```
|
||||
helm delete falco-talon -n falco
|
||||
````
|
||||
|
||||
## Configuration
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
||||
## Connect Falcosidekick
|
||||
|
||||
Once you have installed `Falco Talon` with Helm, you need to connect `Falcosidekick` by adding the flag `--set falcosidekick.config.webhook.address=http://falco-talon:2803`
|
||||
|
||||
```shell
|
||||
helm upgrade -i falco falcosecurity/falco --namespace falco \
|
||||
--create-namespace \
|
||||
--set tty=true \
|
||||
--set falcosidekick.enabled=true \
|
||||
--set falcosidekick.config.talon.address=http://falco-talon:2803
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Falco Talon is licensed to you under the **Apache 2.0** open source license.
|
||||
|
||||
## Author
|
||||
|
||||
Thomas Labarussias (https://github.com/Issif)
|
|
@ -0,0 +1,184 @@
|
|||
# Falco Talon
|
||||
|
||||
   
|
||||
|
||||
## Description
|
||||
|
||||
`Falco Talon` is a Response Engine for managing threats in your Kubernetes. It enhances the solutions proposed by the Falco community with a no-code tailor made solution. With easy rules, you can react to `events` from [`Falco`](https://falco.org) in milliseconds.
|
||||
|
||||
## Architecture
|
||||
|
||||
`Falco Talon` can receive the `events` from [`Falco`](https://falco.org) or [`Falcosidekick`](https://github.com/falcosecurity/falcosidekick):
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
falco
|
||||
falcosidekick
|
||||
falco-talon
|
||||
falco -- event --> falcosidekick
|
||||
falco -- event --> falco-talon
|
||||
falcosidekick -- event --> falco-talon
|
||||
kubernetes -- context --> falco-talon
|
||||
falco-talon -- action --> aws
|
||||
falco-talon -- output --> minio
|
||||
falco-talon -- action --> kubernetes
|
||||
falco-talon -- notification --> slack
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
The full documentation is available on its own website: [https://docs.falco-talon.org/docs](https://docs.falco-talon.org/docs).
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
helm repo add falcosecurity https://falcosecurity.github.io/charts
|
||||
helm repo update
|
||||
helm install falco-talon falcosecurity/falco-talon -n falco --create-namespace -f values.yaml
|
||||
```
|
||||
|
||||
### Update the rules
|
||||
|
||||
Update `rules.yaml` then:
|
||||
|
||||
```
|
||||
helm upgrade falco-talon falcosecurity/falco-talon -n falco -f values.yaml
|
||||
```
|
||||
|
||||
### Uninstall Falco Talon
|
||||
|
||||
```
|
||||
helm delete falco-talon -n falco
|
||||
````
|
||||
|
||||
## Configuration
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | affinity |
|
||||
| config | object | `{"aws":{"accesKey":"","externalId":"","region":"","roleArn":"","secretKey":""},"deduplication":{"leaderElection":true,"timeWindowSeconds":5},"defaultNotifiers":["k8sevents"],"listenAddress":"0.0.0.0","listenPort":2803,"minio":{"accessKey":"","endpoint":"","secretKey":"","useSsl":false},"notifiers":{"elasticsearch":{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""},"loki":{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""},"slack":{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""},"smtp":{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""},"webhook":{"url":""}},"otel":{"collectorEndpoint":"","collectorPort":4317,"collectorUseInsecureGrpc":false,"metricsEnabled":false,"tracesEnabled":false},"printAllEvents":false,"rulesOverride":"- action: Terminate Pod\n actionner: kubernetes:terminate\n parameters:\n ignore_daemonsets: true\n ignore_statefulsets: true\n grace_period_seconds: 20\n","watchRules":true}` | config of Falco Talon (See https://docs.falco-talon.org/docs/configuration/) |
|
||||
| config.aws | object | `{"accesKey":"","externalId":"","region":"","roleArn":"","secretKey":""}` | aws |
|
||||
| config.aws.accesKey | string | `""` | access key (if not specified, default access_key from provider credential chain will be used) |
|
||||
| config.aws.externalId | string | `""` | external id |
|
||||
| config.aws.region | string | `""` | region (if not specified, default region from provider credential chain will be used) |
|
||||
| config.aws.roleArn | string | `""` | role arn |
|
||||
| config.aws.secretKey | string | `""` | secret key (if not specified, default secret_key from provider credential chain will be used) |
|
||||
| config.deduplication | object | `{"leaderElection":true,"timeWindowSeconds":5}` | deduplication of the Falco events |
|
||||
| config.deduplication.leaderElection | bool | `true` | enable the leader election for cluster mode |
|
||||
| config.deduplication.timeWindowSeconds | int | `5` | duration in seconds for the deduplication time window |
|
||||
| config.defaultNotifiers | list | `["k8sevents"]` | default notifiers for all rules |
|
||||
| config.listenAddress | string | `"0.0.0.0"` | listen address |
|
||||
| config.listenPort | int | `2803` | listen port |
|
||||
| config.minio | object | `{"accessKey":"","endpoint":"","secretKey":"","useSsl":false}` | minio |
|
||||
| config.minio.accessKey | string | `""` | access key |
|
||||
| config.minio.endpoint | string | `""` | endpoint |
|
||||
| config.minio.secretKey | string | `""` | secret key |
|
||||
| config.minio.useSsl | bool | `false` | use ssl |
|
||||
| config.notifiers | object | `{"elasticsearch":{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""},"loki":{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""},"slack":{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""},"smtp":{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""},"webhook":{"url":""}}` | notifiers (See https://docs.falco-talon.org/docs/notifiers/list/ for the settings) |
|
||||
| config.notifiers.elasticsearch | object | `{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""}` | elasticsearch |
|
||||
| config.notifiers.elasticsearch.createIndexTemplate | bool | `true` | create the index template |
|
||||
| config.notifiers.elasticsearch.numberOfReplicas | int | `1` | number of replicas |
|
||||
| config.notifiers.elasticsearch.numberOfShards | int | `1` | number of shards |
|
||||
| config.notifiers.elasticsearch.url | string | `""` | url |
|
||||
| config.notifiers.loki | object | `{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""}` | loki |
|
||||
| config.notifiers.loki.apiKey | string | `""` | api key |
|
||||
| config.notifiers.loki.customHeaders | list | `[]` | custom headers |
|
||||
| config.notifiers.loki.hostPort | string | `""` | host:port |
|
||||
| config.notifiers.loki.tenant | string | `""` | tenant |
|
||||
| config.notifiers.loki.user | string | `""` | user |
|
||||
| config.notifiers.slack | object | `{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""}` | slack |
|
||||
| config.notifiers.slack.footer | string | `"https://github.com/falcosecurity/falco-talon"` | footer |
|
||||
| config.notifiers.slack.format | string | `"long"` | format |
|
||||
| config.notifiers.slack.icon | string | `"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg"` | icon |
|
||||
| config.notifiers.slack.username | string | `"Falco Talon"` | username |
|
||||
| config.notifiers.slack.webhookUrl | string | `""` | webhook url |
|
||||
| config.notifiers.smtp | object | `{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""}` | smtp |
|
||||
| config.notifiers.smtp.format | string | `"html"` | format |
|
||||
| config.notifiers.smtp.from | string | `""` | from |
|
||||
| config.notifiers.smtp.hostPort | string | `""` | host:port |
|
||||
| config.notifiers.smtp.password | string | `""` | password |
|
||||
| config.notifiers.smtp.tls | bool | `false` | enable tls |
|
||||
| config.notifiers.smtp.to | string | `""` | to |
|
||||
| config.notifiers.smtp.user | string | `""` | user |
|
||||
| config.notifiers.webhook | object | `{"url":""}` | webhook |
|
||||
| config.notifiers.webhook.url | string | `""` | url |
|
||||
| config.otel | object | `{"collectorEndpoint":"","collectorPort":4317,"collectorUseInsecureGrpc":false,"metricsEnabled":false,"tracesEnabled":false}` | open telemetry parameters |
|
||||
| config.otel.collectorEndpoint | string | `""` | collector endpoint |
|
||||
| config.otel.collectorPort | int | `4317` | collector port |
|
||||
| config.otel.collectorUseInsecureGrpc | bool | `false` | use insecure grpc |
|
||||
| config.otel.metricsEnabled | bool | `false` | enable otel metrics |
|
||||
| config.otel.tracesEnabled | bool | `false` | enable otel traces |
|
||||
| config.printAllEvents | bool | `false` | print in stdout all received events, not only those which match a rule |
|
||||
| config.watchRules | bool | `true` | auto reload the rules when the files change |
|
||||
| extraEnv | list | `[{"name":"LOG_LEVEL","value":"warning"}]` | extra env |
|
||||
| grafana | object | `{"dashboards":{"configMaps":{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}},"enabled":false}}` | grafana contains the configuration related to grafana. |
|
||||
| grafana.dashboards | object | `{"configMaps":{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}},"enabled":false}` | dashboards contains configuration for grafana dashboards. |
|
||||
| grafana.dashboards.configMaps | object | `{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}}` | configmaps to be deployed that contain a grafana dashboard. |
|
||||
| grafana.dashboards.configMaps.talon | object | `{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}` | falco-talon contains the configuration for falco talon's dashboard. |
|
||||
| grafana.dashboards.configMaps.talon.folder | string | `""` | folder where the dashboard is stored by grafana. |
|
||||
| grafana.dashboards.configMaps.talon.name | string | `"falco-talon-grafana-dashboard"` | name specifies the name for the configmap. |
|
||||
| grafana.dashboards.configMaps.talon.namespace | string | `""` | namespace specifies the namespace for the configmap. |
|
||||
| grafana.dashboards.enabled | bool | `false` | enabled specifies whether the dashboards should be deployed. |
|
||||
| image | object | `{"pullPolicy":"Always","registry":"falco.docker.scarf.sh","repository":"falcosecurity/falco-talon","tag":""}` | image parameters |
|
||||
| image.pullPolicy | string | `"Always"` | The image pull policy |
|
||||
| image.registry | string | `"falco.docker.scarf.sh"` | The image registry to pull from |
|
||||
| image.repository | string | `"falcosecurity/falco-talon"` | The image repository to pull from |
|
||||
| image.tag | string | `""` | Override the image tag to pull |
|
||||
| imagePullSecrets | list | `[]` | one or more secrets to be used when pulling images |
|
||||
| ingress | object | `{"annotations":{},"enabled":false,"hosts":[{"host":"falco-talon.local","paths":[{"path":"/"}]}],"tls":[]}` | ingress parameters |
|
||||
| ingress.annotations | object | `{}` | annotations of the ingress |
|
||||
| ingress.enabled | bool | `false` | enable the ingress |
|
||||
| ingress.hosts | list | `[{"host":"falco-talon.local","paths":[{"path":"/"}]}]` | hosts |
|
||||
| ingress.tls | list | `[]` | tls |
|
||||
| nameOverride | string | `""` | override name |
|
||||
| nodeSelector | object | `{}` | node selector |
|
||||
| podAnnotations | object | `{}` | pod annotations |
|
||||
| podSecurityContext | object | `{"fsGroup":1234,"runAsUser":1234}` | pod security context |
|
||||
| podSecurityContext.fsGroup | int | `1234` | group |
|
||||
| podSecurityContext.runAsUser | int | `1234` | user id |
|
||||
| podSecurityPolicy | object | `{"create":false}` | pod security policy |
|
||||
| podSecurityPolicy.create | bool | `false` | enable the creation of the PSP |
|
||||
| priorityClassName | string | `""` | priority class name |
|
||||
| rbac | object | `{"caliconetworkpolicies":["get","update","patch","create"],"ciliumnetworkpolicies":["get","update","patch","create"],"clusterroles":["get","delete"],"configmaps":["get","delete"],"daemonsets":["get","delete"],"deployments":["get","delete"],"events":["get","update","patch","create"],"leases":["get","update","patch","watch","create"],"namespaces":["get","delete"],"networkpolicies":["get","update","patch","create"],"nodes":["get","update","patch","watch","create"],"pods":["get","update","patch","delete","list"],"podsEphemeralcontainers":["patch","create"],"podsEviction":["get","create"],"podsExec":["get","create"],"podsLog":["get"],"replicasets":["get","delete"],"roles":["get","delete"],"secrets":["get","delete"],"serviceAccount":{"create":true,"name":""},"statefulsets":["get","delete"]}` | rbac |
|
||||
| rbac.serviceAccount.create | bool | `true` | create the service account. If create is false, name is required |
|
||||
| rbac.serviceAccount.name | string | `""` | name of the service account |
|
||||
| replicaCount | int | `2` | number of running pods |
|
||||
| resources | object | `{}` | resources |
|
||||
| service | object | `{"annotations":{},"port":2803,"type":"ClusterIP"}` | service parameters |
|
||||
| service.annotations | object | `{}` | annotations of the service |
|
||||
| service.port | int | `2803` | port of the service |
|
||||
| service.type | string | `"ClusterIP"` | type of service |
|
||||
| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"interval":"30s","path":"/metrics","port":"http","relabelings":[],"scheme":"http","scrapeTimeout":"10s","targetLabels":[],"tlsConfig":{}}` | serviceMonitor holds the configuration for the ServiceMonitor CRD. |
|
||||
| serviceMonitor.additionalLabels | object | `{}` | additionalLabels specifies labels to be added on the Service Monitor. |
|
||||
| serviceMonitor.enabled | bool | `false` | enable the deployment of a Service Monitor for the Prometheus Operator. |
|
||||
| serviceMonitor.interval | string | `"30s"` | interval specifies the time interval at which Prometheus should scrape metrics from the service. |
|
||||
| serviceMonitor.path | string | `"/metrics"` | path at which the metrics are exposed |
|
||||
| serviceMonitor.port | string | `"http"` | portname at which the metrics are exposed |
|
||||
| serviceMonitor.relabelings | list | `[]` | relabelings configures the relabeling rules to apply the target’s metadata labels. |
|
||||
| serviceMonitor.scheme | string | `"http"` | scheme specifies network protocol used by the metrics endpoint. In this case HTTP. |
|
||||
| serviceMonitor.scrapeTimeout | string | `"10s"` | scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request. If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for that target. |
|
||||
| serviceMonitor.targetLabels | list | `[]` | targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics. |
|
||||
| serviceMonitor.tlsConfig | object | `{}` | tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when scraping metrics from a service. It allows you to define the details of the TLS connection, such as CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support TLS configuration for the metrics endpoint. |
|
||||
| tolerations | list | `[]` | tolerations |
|
||||
|
||||
## Connect Falcosidekick
|
||||
|
||||
Once you have installed `Falco Talon` with Helm, you need to connect `Falcosidekick` by adding the flag `--set falcosidekick.config.webhook.address=http://falco-talon:2803`
|
||||
|
||||
```shell
|
||||
helm upgrade -i falco falcosecurity/falco --namespace falco \
|
||||
--create-namespace \
|
||||
--set tty=true \
|
||||
--set falcosidekick.enabled=true \
|
||||
--set falcosidekick.config.talon.address=http://falco-talon:2803
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Falco Talon is licensed to you under the **Apache 2.0** open source license.
|
||||
|
||||
## Author
|
||||
|
||||
Thomas Labarussias (https://github.com/Issif)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,8 @@
|
|||
- action: Terminate Pod
|
||||
actionner: kubernetes:terminate
|
||||
|
||||
- action: Label Pod as Suspicious
|
||||
actionner: kubernetes:label
|
||||
parameters:
|
||||
labels:
|
||||
analysis/status: "suspicious"
|
|
@ -0,0 +1,73 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "falco-talon.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "falco-talon.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for ingress.
|
||||
*/}}
|
||||
{{- define "falco-talon.ingress.apiVersion" -}}
|
||||
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
|
||||
{{- print "networking.k8s.io/v1" -}}
|
||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
|
||||
{{- print "networking.k8s.io/v1beta1" -}}
|
||||
{{- else -}}
|
||||
{{- print "extensions/v1beta1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "falco-talon.labels" -}}
|
||||
helm.sh/chart: {{ include "falco-talon.chart" . }}
|
||||
app.kubernetes.io/part-of: {{ include "falco-talon.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Name }}
|
||||
{{ include "falco-talon.selectorLabels" . }}
|
||||
{{- if .Values.image.tag }}
|
||||
app.kubernetes.io/version: {{ .Values.image.tag }}
|
||||
{{- else }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "falco-talon.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return if ingress is stable.
|
||||
*/}}
|
||||
{{- define "falco-talon.ingress.isStable" -}}
|
||||
{{- eq (include "falco-talon.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return if ingress supports pathType.
|
||||
*/}}
|
||||
{{- define "falco-talon.ingress.supportsPathType" -}}
|
||||
{{- or (eq (include "falco-talon.ingress.isStable" .) "true") (and (eq (include "falco-talon.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Validate if either serviceAccount create is set to true or serviceAccount name is passed
|
||||
*/}}
|
||||
{{- define "falco-talon.validateServiceAccount" -}}
|
||||
{{- if and (not .Values.rbac.serviceAccount.create) (not .Values.rbac.serviceAccount.name) -}}
|
||||
{{- fail ".Values.rbac.serviceAccount.create is set to false and .Values.rbac.serviceAccount.name is not provided or is provided as empty string." -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,18 @@
|
|||
{{- if .Values.podSecurityPolicy.create }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "falco-talon.name" .}}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- {{ template "falco-talon.name" . }}
|
||||
verbs:
|
||||
- use
|
||||
{{- end }}
|
|
@ -0,0 +1,22 @@
|
|||
{{- if .Values.grafana.dashboards.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.grafana.dashboards.configMaps.talon.name }}
|
||||
{{ if .Values.grafana.dashboards.configMaps.talon.namespace }}
|
||||
namespace: {{ .Values.grafana.dashboards.configMaps.talon.namespace }}
|
||||
{{- else -}}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
grafana_dashboard: "1"
|
||||
annotations:
|
||||
{{- if .Values.grafana.dashboards.configMaps.talon.folder }}
|
||||
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.talon.folder}}
|
||||
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.talon.folder }}
|
||||
{{- end }}
|
||||
data:
|
||||
falco-talon-grafana-dashboard.json: |-
|
||||
{{- .Files.Get "dashboards/falco-talon-grafana-dashboard.json" | nindent 4 }}
|
||||
{{- end -}}
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}-rules
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
data:
|
||||
rules.yaml: |-
|
||||
{{ $.Files.Get "rules.yaml" | nindent 4 }}
|
||||
{{- if .Values.config.rulesOverride }}
|
||||
{{ .Values.config.rulesOverride | nindent 4 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,101 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
secret-checksum: {{ (lookup "v1" "Secret" .Release.Namespace (include "falco-talon.name" . | cat "-config")).data | toJson | sha256sum }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "falco-talon.name" . }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: "{{ .Values.priorityClassName }}"
|
||||
{{- end }}
|
||||
securityContext:
|
||||
runAsUser: {{ .Values.podSecurityContext.runAsUser }}
|
||||
fsGroup: {{ .Values.podSecurityContext.fsGroup }}
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
{{- if .Values.image.registry }}
|
||||
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
|
||||
{{- else }}
|
||||
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args: ["server", "-c", "/etc/falco-talon/config.yaml", "-r", "/etc/falco-talon/rules.yaml"]
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 2803
|
||||
protocol: TCP
|
||||
- name: nats
|
||||
containerPort: 4222
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
{{- if .Values.extraEnv }}
|
||||
env:
|
||||
{{- toYaml .Values.extraEnv | nindent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: "config"
|
||||
mountPath: "/etc/falco-talon/config.yaml"
|
||||
subPath: config.yaml
|
||||
readOnly: true
|
||||
- name: "rules"
|
||||
mountPath: "/etc/falco-talon/rules.yaml"
|
||||
subPath: rules.yaml
|
||||
readOnly: true
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: "rules"
|
||||
configMap:
|
||||
name: "{{ include "falco-talon.name" . }}-rules"
|
||||
- name: "config"
|
||||
secret:
|
||||
secretName: "{{ include "falco-talon.name" . }}-config"
|
|
@ -0,0 +1,50 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $name := include "falco-talon.name" . -}}
|
||||
{{- $ingressApiIsStable := eq (include "falco-talon.ingress.isStable" .) "true" -}}
|
||||
{{- $ingressSupportsPathType := eq (include "falco-talon.ingress.supportsPathType" .) "true" -}}
|
||||
---
|
||||
apiVersion: {{ include "falco-talon.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if $ingressSupportsPathType }}
|
||||
pathType: {{ default "ImplementationSpecific" .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $ingressApiIsStable }}
|
||||
service:
|
||||
name: {{ $name }}
|
||||
port:
|
||||
name: http
|
||||
{{- else }}
|
||||
serviceName: {{ $name }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,32 @@
|
|||
{{- if .Values.podSecurityPolicy.create}}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "falco-talon.name" . }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
hostNetwork: false
|
||||
readOnlyRootFilesystem: true
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
fsGroup:
|
||||
ranges:
|
||||
- max: 65535
|
||||
min: 1
|
||||
rule: MustRunAs
|
||||
runAsUser:
|
||||
rule: MustRunAsNonRoot
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
ranges:
|
||||
- max: 65535
|
||||
min: 1
|
||||
rule: MustRunAs
|
||||
volumes:
|
||||
- configMap
|
||||
- secret
|
||||
{{- end }}
|
|
@ -0,0 +1,216 @@
|
|||
{{- include "falco-talon.validateServiceAccount" . -}}
|
||||
---
|
||||
{{- if .Values.rbac.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
|
||||
helm.sh/chart: {{ include "falco-talon.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
rules:
|
||||
{{- if .Values.rbac.namespaces }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.namespaces | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.pods }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.pods | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.podsEphemeralcontainers }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/ephemeralcontainers
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.podsEphemeralcontainers | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.nodes }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.nodes | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.podsLog }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/log
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.podsLog | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.podsExec }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/exec
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.podsExec | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.podsEviction }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/eviction
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.podsEviction | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.events }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.events | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.daemonsets }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.daemonsets | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.deployments }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.deployments | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.replicasets }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.replicasets | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.statefulsets }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.statefulsets | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.networkpolicies }}
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.networkpolicies | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.caliconetworkpolicies }}
|
||||
- apiGroups:
|
||||
- "projectcalico.org"
|
||||
resources:
|
||||
- caliconetworkpolicies
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.caliconetworkpolicies | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.ciliumnetworkpolicies }}
|
||||
- apiGroups:
|
||||
- "cilium.io"
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.ciliumnetworkpolicies | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.roles }}
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.roles | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.clusterroles }}
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
resources:
|
||||
- clusterroles
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.clusterroles | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.configmaps }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.configmaps | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.secrets }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.secrets | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.leases }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
{{ toYaml .Values.rbac.leases | indent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityPolicy.create }}
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- {{ template "falco-talon.name" . }}
|
||||
verbs:
|
||||
- use
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
|
||||
helm.sh/chart: {{ include "falco-talon.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
{{- if .Values.rbac.serviceAccount.create }}
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
{{- else }}
|
||||
name: {{ .Values.rbac.serviceAccount.name }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
|
@ -0,0 +1,71 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}-config
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
config.yaml: |
|
||||
listen_address: {{ default "0.0.0.0" .Values.config.listenAddress }}
|
||||
listen_port: {{ default 2803 .Values.config.listenPort }}
|
||||
watch_rules: {{ default true .Values.config.watchRules }}
|
||||
print_all_events: {{ default false .Values.config.printAllEvents }}
|
||||
deduplication:
|
||||
leader_election: {{ default true .Values.config.deduplication.leaderElection }}
|
||||
time_window_seconds: {{ default 5 .Values.config.deduplication.timeWindowSeconds }}
|
||||
|
||||
default_notifiers:
|
||||
{{- range .Values.config.defaultNotifiers }}
|
||||
- {{ . -}}
|
||||
{{ end }}
|
||||
|
||||
otel:
|
||||
traces_enabled: {{ default false .Values.config.otel.tracesEnabled }}
|
||||
metrics_enabled: {{ default false .Values.config.otel.metricsEnabled }}
|
||||
collector_port: {{ default 4317 .Values.config.otel.collectorPort }}
|
||||
collector_endpoint: {{ .Values.config.otel.collectorEndpoint }}
|
||||
collector_use_insecure_grpc: {{ default false .Values.config.otel.collectorUseInsecureGrpc }}
|
||||
|
||||
notifiers:
|
||||
slack:
|
||||
webhook_url: {{ .Values.config.notifiers.slack.webhookUrl }}
|
||||
icon: {{ .Values.config.notifiers.slack.icon }}
|
||||
username: {{ .Values.config.notifiers.slack.username }}
|
||||
footer: {{ .Values.config.notifiers.slack.footer }}
|
||||
format: {{ .Values.config.notifiers.slack.format }}
|
||||
webhook:
|
||||
url: {{ .Values.config.notifiers.webhook.url }}
|
||||
smtp:
|
||||
host_port: {{ .Values.config.notifiers.smtp.hostPort }}
|
||||
from: {{ .Values.config.notifiers.smtp.from }}
|
||||
to: {{ .Values.config.notifiers.smtp.to }}
|
||||
user: {{ .Values.config.notifiers.smtp.user }}
|
||||
password: {{ .Values.config.notifiers.smtp.password }}
|
||||
format: {{ .Values.config.notifiers.smtp.format }}
|
||||
tls: {{ .Values.config.notifiers.smtp.tls }}
|
||||
loki:
|
||||
url: {{ .Values.config.notifiers.loki.url }}
|
||||
user: {{ .Values.config.notifiers.loki.user }}
|
||||
api_key: {{ .Values.config.notifiers.loki.apiKey }}
|
||||
tenant: {{ .Values.config.notifiers.loki.tenant }}
|
||||
custom_headers:
|
||||
{{- range .Values.config.notifiers.loki.customHeaders }}
|
||||
- {{ . -}}
|
||||
{{ end }}
|
||||
elasticsearch:
|
||||
url: {{ .Values.config.notifiers.elasticsearch.url }}
|
||||
create_index_template: {{ .Values.config.notifiers.loki.createIndexTemplate }}
|
||||
number_of_shards: {{ .Values.config.notifiers.loki.numberOfShards }}
|
||||
number_of_replicas: {{ .Values.config.notifiers.loki.numberOfReplicas }}
|
||||
|
||||
aws:
|
||||
role_arn: {{ .Values.config.aws.roleArn }}
|
||||
external_id: {{ .Values.config.aws.externalId }}
|
||||
region: {{ .Values.config.aws.region }}
|
||||
access_key: {{ .Values.config.aws.accessKey }}
|
||||
secret_key: {{ .Values.config.aws.secretKey }}
|
||||
minio:
|
||||
endpoint: {{ .Values.config.minio.endpoint }}
|
||||
access_key: {{ .Values.config.minio.accessKey }}
|
||||
secret_key: {{ .Values.config.minio.secretKey }}
|
||||
use_ssl: {{ .Values.config.minio.useSsl }}
|
|
@ -0,0 +1,44 @@
|
|||
{{- if .Values.serviceMonitor.enabled }}
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: {{ .Values.serviceMonitor.port }}
|
||||
{{- with .Values.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: true
|
||||
path: {{ .Values.serviceMonitor.path }}
|
||||
scheme: {{ .Values.serviceMonitor.scheme }}
|
||||
{{- with .Values.serviceMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
jobLabel: "{{ .Release.Name }}"
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "falco-talon.selectorLabels" . | nindent 6 }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
{{- with .Values.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "falco-talon.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "falco-talon.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "falco-talon.selectorLabels" . | nindent 4 }}
|
|
@ -0,0 +1,309 @@
|
|||
# Default values for falco-talon.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# -- number of running pods
|
||||
replicaCount: 2
|
||||
|
||||
# -- image parameters
|
||||
image:
|
||||
# -- The image registry to pull from
|
||||
registry: falco.docker.scarf.sh
|
||||
# -- The image repository to pull from
|
||||
repository: falcosecurity/falco-talon
|
||||
# -- Override the image tag to pull
|
||||
tag: ""
|
||||
# -- The image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
# -- pod security policy
|
||||
podSecurityPolicy:
|
||||
# -- enable the creation of the PSP
|
||||
create: false
|
||||
|
||||
# -- pod security context
|
||||
podSecurityContext:
|
||||
# -- user id
|
||||
runAsUser: 1234
|
||||
# -- group
|
||||
fsGroup: 1234
|
||||
|
||||
# -- one or more secrets to be used when pulling images
|
||||
imagePullSecrets: []
|
||||
# - registrySecretName
|
||||
|
||||
# -- override name
|
||||
nameOverride: ""
|
||||
|
||||
# -- extra env
|
||||
extraEnv:
|
||||
- name: LOG_LEVEL
|
||||
value: warning
|
||||
# - name: AWS_REGION # Specify if running on EKS, ECS or EC2
|
||||
# value: us-east-1
|
||||
|
||||
# -- priority class name
|
||||
priorityClassName: ""
|
||||
|
||||
# -- pod annotations
|
||||
podAnnotations: {}
|
||||
|
||||
# -- service parameters
|
||||
service:
|
||||
# -- type of service
|
||||
type: ClusterIP
|
||||
# -- port of the service
|
||||
port: 2803
|
||||
# -- annotations of the service
|
||||
annotations: {}
|
||||
# networking.gke.io/load-balancer-type: Internal
|
||||
|
||||
# -- ingress parameters
|
||||
ingress:
|
||||
# -- enable the ingress
|
||||
enabled: false
|
||||
# -- annotations of the ingress
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
# -- hosts
|
||||
hosts:
|
||||
- host: falco-talon.local
|
||||
paths:
|
||||
- path: /
|
||||
# -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.)
|
||||
# pathType: Prefix
|
||||
# -- tls
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
# -- resources
|
||||
resources: {}
|
||||
# -- limits
|
||||
# limits:
|
||||
# # -- cpu limit
|
||||
# cpu: 100m
|
||||
# # -- memory limit
|
||||
# memory: 128Mi
|
||||
# -- requests
|
||||
# requests:
|
||||
# # -- cpu request
|
||||
# cpu: 100m
|
||||
# # -- memory request
|
||||
# memory: 128Mi
|
||||
|
||||
# -- node selector
|
||||
nodeSelector: {}
|
||||
|
||||
# -- tolerations
|
||||
tolerations: []
|
||||
|
||||
# -- affinity
|
||||
affinity: {}
|
||||
|
||||
# -- rbac
|
||||
rbac:
|
||||
serviceAccount:
|
||||
# -- create the service account. If create is false, name is required
|
||||
create: true
|
||||
# -- name of the service account
|
||||
name: ""
|
||||
namespaces: ["get", "delete"]
|
||||
pods: ["get", "update", "patch", "delete", "list"]
|
||||
podsEphemeralcontainers: ["patch", "create"]
|
||||
nodes: ["get", "update", "patch", "watch", "create"]
|
||||
podsLog: ["get"]
|
||||
podsExec: ["get", "create"]
|
||||
podsEviction: ["get", "create"]
|
||||
events: ["get", "update", "patch", "create"]
|
||||
daemonsets: ["get", "delete"]
|
||||
deployments: ["get", "delete"]
|
||||
replicasets: ["get", "delete"]
|
||||
statefulsets: ["get", "delete"]
|
||||
networkpolicies: ["get", "update", "patch", "create"]
|
||||
caliconetworkpolicies: ["get", "update", "patch", "create"]
|
||||
ciliumnetworkpolicies: ["get", "update", "patch", "create"]
|
||||
roles: ["get", "delete"]
|
||||
clusterroles: ["get", "delete"]
|
||||
configmaps: ["get", "delete"]
|
||||
secrets: ["get", "delete"]
|
||||
leases: ["get", "update", "patch", "watch", "create"]
|
||||
|
||||
# -- config of Falco Talon (See https://docs.falco-talon.org/docs/configuration/)
|
||||
config:
|
||||
# -- listen address
|
||||
listenAddress: 0.0.0.0
|
||||
# -- listen port
|
||||
listenPort: 2803
|
||||
|
||||
# -- default notifiers for all rules
|
||||
defaultNotifiers:
|
||||
# - slack
|
||||
- k8sevents
|
||||
|
||||
# -- auto reload the rules when the files change
|
||||
watchRules: true
|
||||
|
||||
# -- deduplication of the Falco events
|
||||
deduplication:
|
||||
# -- enable the leader election for cluster mode
|
||||
leaderElection: true
|
||||
# -- duration in seconds for the deduplication time window
|
||||
timeWindowSeconds: 5
|
||||
|
||||
# -- print in stdout all received events, not only those which match a rule
|
||||
printAllEvents: false
|
||||
|
||||
# User-defined additional rules for rules_override.yaml
|
||||
rulesOverride: |
|
||||
- action: Terminate Pod
|
||||
actionner: kubernetes:terminate
|
||||
parameters:
|
||||
ignore_daemonsets: true
|
||||
ignore_statefulsets: true
|
||||
grace_period_seconds: 20
|
||||
|
||||
# -- open telemetry parameters
|
||||
otel:
|
||||
# -- enable otel traces
|
||||
tracesEnabled: false
|
||||
# -- enable otel metrics
|
||||
metricsEnabled: false
|
||||
# -- collector port
|
||||
collectorPort: 4317
|
||||
# -- collector endpoint
|
||||
collectorEndpoint: ""
|
||||
# -- use insecure grpc
|
||||
collectorUseInsecureGrpc: false
|
||||
|
||||
# -- notifiers (See https://docs.falco-talon.org/docs/notifiers/list/ for the settings)
|
||||
notifiers:
|
||||
# -- slack
|
||||
slack:
|
||||
# -- webhook url
|
||||
webhookUrl: ""
|
||||
# -- icon
|
||||
icon: "https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg"
|
||||
# -- username
|
||||
username: "Falco Talon"
|
||||
# -- footer
|
||||
footer: "https://github.com/falcosecurity/falco-talon"
|
||||
# -- format
|
||||
format: "long"
|
||||
# -- webhook
|
||||
webhook:
|
||||
# -- url
|
||||
url: ""
|
||||
# -- smtp
|
||||
smtp:
|
||||
# -- host:port
|
||||
hostPort: ""
|
||||
# -- from
|
||||
from: ""
|
||||
# -- to
|
||||
to: ""
|
||||
# -- user
|
||||
user: ""
|
||||
# -- password
|
||||
password: ""
|
||||
# -- format
|
||||
format: "html"
|
||||
# -- enable tls
|
||||
tls: false
|
||||
# -- loki
|
||||
loki:
|
||||
# -- host:port
|
||||
hostPort: ""
|
||||
# -- user
|
||||
user: ""
|
||||
# -- api key
|
||||
apiKey: ""
|
||||
# -- tenant
|
||||
tenant: ""
|
||||
# -- custom headers
|
||||
customHeaders: []
|
||||
# -- elasticsearch
|
||||
elasticsearch:
|
||||
# -- url
|
||||
url: ""
|
||||
# -- create the index template
|
||||
createIndexTemplate: true
|
||||
# -- number of shards
|
||||
numberOfShards: 1
|
||||
# -- number of replicas
|
||||
numberOfReplicas: 1
|
||||
|
||||
# -- aws
|
||||
aws:
|
||||
# -- role arn
|
||||
roleArn: ""
|
||||
# -- external id
|
||||
externalId: ""
|
||||
# -- region (if not specified, default region from provider credential chain will be used)
|
||||
region: ""
|
||||
# -- access key (if not specified, default access_key from provider credential chain will be used)
|
||||
accesKey: ""
|
||||
# -- secret key (if not specified, default secret_key from provider credential chain will be used)
|
||||
secretKey: ""
|
||||
|
||||
# -- minio
|
||||
minio:
|
||||
# -- endpoint
|
||||
endpoint: ""
|
||||
# -- access key
|
||||
accessKey: ""
|
||||
# -- secret key
|
||||
secretKey: ""
|
||||
# -- use ssl
|
||||
useSsl: false
|
||||
|
||||
# -- serviceMonitor holds the configuration for the ServiceMonitor CRD.
|
||||
serviceMonitor:
|
||||
# -- enable the deployment of a Service Monitor for the Prometheus Operator.
|
||||
enabled: false
|
||||
# -- portname at which the metrics are exposed
|
||||
port: http
|
||||
# -- path at which the metrics are exposed
|
||||
path: /metrics
|
||||
# -- additionalLabels specifies labels to be added on the Service Monitor.
|
||||
additionalLabels: {}
|
||||
# -- interval specifies the time interval at which Prometheus should scrape metrics from the service.
|
||||
interval: "30s"
|
||||
# -- scheme specifies network protocol used by the metrics endpoint. In this case HTTP.
|
||||
scheme: http
|
||||
# -- scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request.
|
||||
# If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for
|
||||
# that target.
|
||||
scrapeTimeout: "10s"
|
||||
# -- relabelings configures the relabeling rules to apply the target’s metadata labels.
|
||||
relabelings: []
|
||||
# -- targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics.
|
||||
targetLabels: []
|
||||
# -- tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when
|
||||
# scraping metrics from a service. It allows you to define the details of the TLS connection, such as
|
||||
# CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support
|
||||
# TLS configuration for the metrics endpoint.
|
||||
tlsConfig: {}
|
||||
# insecureSkipVerify: false
|
||||
# caFile: /path/to/ca.crt
|
||||
# certFile: /path/to/client.crt
|
||||
# keyFile: /path/to/client.key
|
||||
|
||||
# -- grafana contains the configuration related to grafana.
|
||||
grafana:
|
||||
# -- dashboards contains configuration for grafana dashboards.
|
||||
dashboards:
|
||||
# -- enabled specifies whether the dashboards should be deployed.
|
||||
enabled: false
|
||||
# --configmaps to be deployed that contain a grafana dashboard.
|
||||
configMaps:
|
||||
# -- falco-talon contains the configuration for falco talon's dashboard.
|
||||
talon:
|
||||
# -- name specifies the name for the configmap.
|
||||
name: falco-talon-grafana-dashboard
|
||||
# -- namespace specifies the namespace for the configmap.
|
||||
namespace: ""
|
||||
# -- folder where the dashboard is stored by grafana.
|
||||
folder: ""
|
|
@ -1,4 +1,6 @@
|
|||
# Helm chart Breaking Changes
|
||||
- [5.0.0](#500)
|
||||
- [Default Falco Image](#default-falco-image)
|
||||
- [4.0.0](#400)
|
||||
- [Drivers](#drivers)
|
||||
- [K8s Collector](#k8s-collector)
|
||||
|
@ -9,6 +11,21 @@
|
|||
- [Falco Images](#drop-support-for-falcosecurityfalco-image)
|
||||
- [Driver Loader Init Container](#driver-loader-simplified-logic)
|
||||
|
||||
## 6.0.0
|
||||
|
||||
### Falco Talon configuration changes
|
||||
|
||||
The following backward-incompatible changes have been made to `values.yaml`:
|
||||
- `falcotalon` configuration has been renamed to `falco-talon`
|
||||
- `falcotalon.enabled` has been renamed to `responseActions.enabled`
|
||||
|
||||
## 5.0.0
|
||||
### Default Falco Image
|
||||
**Starting with version 5.0.0, the Helm chart now uses the default Falco container image, which is a distroless image without any additional tools installed.**
|
||||
Previously, the chart used the `debian` image with the several tools included to avoid breaking changes during upgrades. The new image is more secure and lightweight, but it does not include these tools.
|
||||
|
||||
If you rely on some tool—for example, when using the `program_output` feature—you can manually override the `image.tag` value to use a different image flavor. For instance, setting `image.tag` to `0.41.0-debian` will restore access to the tools available in the Debian-based image.
|
||||
|
||||
## 4.0.0
|
||||
### Drivers
|
||||
The `driver` section has been reworked based on the following PR: https://github.com/falcosecurity/falco/pull/2413.
|
||||
|
@ -59,7 +76,7 @@ This way you will upgrade Falco to `v0.34.0`.
|
|||
|
||||
### Falcoctl support
|
||||
|
||||
[Falcoctl](https://https://github.com/falcosecurity/falcoctl) is a new tool born to automatize operations when deploying Falco.
|
||||
[Falcoctl](https://github.com/falcosecurity/falcoctl) is a new tool born to automatize operations when deploying Falco.
|
||||
|
||||
Before the `v3.0.0` of the charts *rulesfiles* and *plugins* were shipped bundled in the Falco docker image. It precluded the possibility to update the *rulesfiles* and *plugins* until a new version of Falco was released. Operators had to manually update the *rulesfiles or add new *plugins* to Falco. The process was cumbersome and error-prone. Operators had to create their own Falco docker images with the new plugins baked into it or wait for a new Falco release.
|
||||
|
||||
|
@ -212,11 +229,15 @@ Starting from `v0.3.0`, the chart drops the bundled **rulesfiles**. The previous
|
|||
|
||||
The reason why we are dropping them is pretty simple, the files are already shipped within the Falco image and do not apport any benefit. On the other hand, we had to manually update those files for each Falco release.
|
||||
|
||||
For users out there, do not worry, we have you covered. As said before the **rulesfiles** are already shipped inside the Falco image. Still, this solution has some drawbacks such as users having to wait for the next releases of Falco to get the latest version of those **rulesfiles**. Or they could manually update them by using the [custom rules](https://https://github.com/falcosecurity/charts/tree/master/falco#loading-custom-rules).
|
||||
For users out there, do not worry, we have you covered. As said before the **rulesfiles** are already shipped inside
|
||||
the Falco image. Still, this solution has some drawbacks such as users having to wait for the next releases of Falco
|
||||
to get the latest version of those **rulesfiles**. Or they could manually update them by using the [custom rules](.
|
||||
/README.md#loading-custom-rules).
|
||||
|
||||
We came up with a better solution and that is **falcoctl**. Users can configure the **falcoctl** tool to fetch and install the latest **rulesfiles** as provided by the *falcosecurity* organization. For more info, please check the **falcoctl** section.
|
||||
|
||||
**NOTE**: if any user (wrongly) used to customize those files before deploying Falco please switch to using the [custom rules](https://https://github.com/falcosecurity/charts/tree/master/falco#loading-custom-rules).
|
||||
**NOTE**: if any user (wrongly) used to customize those files before deploying Falco please switch to using the
|
||||
[custom rules](./README.md#loading-custom-rules).
|
||||
|
||||
### Drop support for `falcosecurity/falco` image
|
||||
|
||||
|
|
|
@ -3,6 +3,333 @@
|
|||
This file documents all notable changes to Falco Helm Chart. The release
|
||||
numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
## v6.2.2
|
||||
|
||||
* Bump container plugin to 0.3.5
|
||||
* Bump k8smeta plugin to 0.3.1
|
||||
|
||||
## v6.2.1
|
||||
|
||||
* Bump container plugin to 0.3.3
|
||||
|
||||
## v6.2.0
|
||||
|
||||
* Switch to `collectors.containerEngine` configuration by default
|
||||
* Update `collectors.containerEngine.engines` default values
|
||||
* Fix containerd socket path configuration
|
||||
* Address "container.name shows container.id" issue
|
||||
* Address "Missing k8s.pod name, container.name, other metadata with k3s" issue
|
||||
* Bump container plugin to 0.3.2
|
||||
|
||||
## v6.1.0
|
||||
|
||||
* feat(falco): Add possibility to custom falco pods hostname
|
||||
|
||||
## v6.0.2
|
||||
|
||||
* Bump Falco to 0.41.3
|
||||
* Bump container plugin to 0.3.1
|
||||
|
||||
## v6.0.1
|
||||
|
||||
* Bump Falco to 0.41.2
|
||||
* Bump container plugin to 0.3.0
|
||||
|
||||
## v6.0.0
|
||||
|
||||
* Rename Falco Talon configuration keys naming
|
||||
|
||||
## v5.0.3
|
||||
|
||||
* Bump container plugin to 0.2.6
|
||||
|
||||
## v5.0.2
|
||||
|
||||
* Bump container plugin to 0.2.5
|
||||
* Bump Falco to 0.41.1
|
||||
|
||||
## v5.0.1
|
||||
|
||||
* Correct installation issue when both artifact installation and follow are enabled
|
||||
|
||||
## v5.0.0
|
||||
* Bump falcoctl to 0.11.2
|
||||
* Use default falco image flavor (wolfi) by default
|
||||
|
||||
## v4.22.0
|
||||
* Bump Falco to 0.41.0;
|
||||
* Bump falco rules to 4.0.0;
|
||||
* Deprecate old container engines in favor of the new container plugin;
|
||||
* Add support for the new container plugin;
|
||||
* Update k8smeta plugin to 0.3.0;
|
||||
* Update falco configuration;
|
||||
|
||||
## v4.21.2
|
||||
|
||||
* add falco-talon as falco subchart
|
||||
|
||||
## v4.21.1
|
||||
|
||||
* removed falco-expoter (now deprecated) references from the readme
|
||||
|
||||
## v4.21.0
|
||||
|
||||
* feat(falco): adding imagePullSecrets at the service account level
|
||||
|
||||
## v4.20.1
|
||||
|
||||
* correctly mount the volumes based on socket path
|
||||
* unit tests for container engines socket paths
|
||||
|
||||
## v4.20.0
|
||||
|
||||
* bump falcoctl to 0.11.0
|
||||
|
||||
## v4.19.0
|
||||
|
||||
* fix falco version to 0.40.0
|
||||
|
||||
## v4.18.0
|
||||
|
||||
* update the chart for falco 0.40;
|
||||
* remove deprecated cli flag `--cri` and use instead the configuration file. More info here: https://github.com/falcosecurity/falco/pull/3329
|
||||
* use new falco images, for more info see: https://github.com/falcosecurity/falco/issues/3165
|
||||
|
||||
## v4.17.2
|
||||
|
||||
* update(falco): add ports definition in falco container spec
|
||||
|
||||
## v4.17.1
|
||||
|
||||
* docs(falco): update README.md to reflect latest driver configuration and correct broken links
|
||||
|
||||
## v4.17.0
|
||||
|
||||
* update(falco): bump k8saudit version to 0.11
|
||||
|
||||
## v4.16.2
|
||||
|
||||
* fix(falco): set dnsPolicy to ClusterFirstWithHostNet when gvisor driver is enabled to prevent DNS lookup failures for cluster-internal services
|
||||
|
||||
## v4.16.1
|
||||
|
||||
* fix(falco/serviceMonitor): set service label selector
|
||||
* new(falco/tests): add unit tests for serviceMonitor label selector
|
||||
|
||||
## v4.16.0
|
||||
|
||||
* bump falcosidekick dependency to v0.9.* to match with future versions
|
||||
|
||||
## v4.15.1
|
||||
|
||||
* fix: change the url for the concurrent queue classes docs
|
||||
|
||||
## v4.15.0
|
||||
|
||||
* update(falco): bump falco version to 0.39.2 and falcoctl to 0.10.1
|
||||
|
||||
## v4.14.2
|
||||
|
||||
* fix(falco/readme): use `rules_files` instead of deprecated `rules_file` in README config snippet
|
||||
|
||||
## v4.14.1
|
||||
|
||||
* fix(falco/dashboard): make pod variable independent of triggered rules. CPU and memory are now visible for each
|
||||
pod, even when no rules have been triggered for that falco instance.
|
||||
|
||||
## v4.14.0
|
||||
|
||||
* Bump k8smeta plugin to 0.2.1, see: https://github.com/falcosecurity/plugins/releases/tag/plugins%2Fk8smeta%2Fv0.2.1
|
||||
|
||||
## v4.13.0
|
||||
|
||||
* Expose new config entries for k8smeta plugin:`verbosity` and `hostProc`.
|
||||
|
||||
## v4.12.0
|
||||
|
||||
* Set apparmor to `unconfined` (disabled) when `leastPrivileged: true` and (`kind: modern_ebpf` or `kind: ebpf`)
|
||||
|
||||
## v4.11.2
|
||||
|
||||
* only prints env key if there are env values to be passed on `falcoctl.initContainer` and `falcoctl.sidecar`
|
||||
|
||||
## v4.11.1
|
||||
|
||||
* add details for the scap drops buffer charts with the dir and drops labels
|
||||
|
||||
## v4.11.0
|
||||
|
||||
* new(falco): add grafana dashboard for falco
|
||||
|
||||
## v4.10.0
|
||||
|
||||
* Bump Falco to v0.39.1
|
||||
|
||||
## v4.9.1
|
||||
|
||||
* feat(falco): add labels and annotations to the metrics service
|
||||
|
||||
## v4.9.0
|
||||
|
||||
* Bump Falco to v0.39.0
|
||||
* update(falco): add new configuration entries for Falco
|
||||
This commit adds new config keys introduces in Falco 0.39.0.
|
||||
Furthermore, updates the unit tests for the latest changes
|
||||
in the values.yaml.
|
||||
* cleanup(falco): remove deprecated falco configuration
|
||||
This commit removes the "output" config key that has
|
||||
been deprecated in falco.
|
||||
* update(falco): mount proc filesystem for plugins
|
||||
The following PR in libs https://github.com/falcosecurity/libs/pull/1969
|
||||
introduces a new platform for plugins that requires access to the
|
||||
proc filesystem.
|
||||
* fix(falco): update broken link pointing to Falco docs
|
||||
After the changes made by the following PR to the Falco docs https://github.com/falcosecurity/falco-website/pull/1362
|
||||
this commit updates a broken link.
|
||||
|
||||
## v4.8.3
|
||||
|
||||
* The init container, when driver.kind=auto, automatically generates
|
||||
a new Falco configuration file and selects the appropriate engine
|
||||
kind based on the environment where Falco is deployed.
|
||||
|
||||
With this commit, along with falcoctl PR #630, the Helm charts now
|
||||
support different driver kinds for Falco instances based on the
|
||||
specific node they are running on. When driver.kind=auto is set,
|
||||
each Falco instance dynamically selects the most suitable
|
||||
driver (e.g., ebpf, kmod, modern_ebpf) for the node.
|
||||
+-------------------------------------------------------+
|
||||
| Kubernetes Cluster |
|
||||
| |
|
||||
| +-------------------+ +-------------------+ |
|
||||
| | Node 1 | | Node 2 | |
|
||||
| | | | | |
|
||||
| | Falco (ebpf) | | Falco (kmod) | |
|
||||
| +-------------------+ +-------------------+ |
|
||||
| |
|
||||
| +-------------------+ |
|
||||
| | Node 3 | |
|
||||
| | | |
|
||||
| | Falco (modern_ebpf)| |
|
||||
| +-------------------+ |
|
||||
+-------------------------------------------------------+
|
||||
|
||||
## v4.8.2
|
||||
|
||||
* fix(falco): correctly mount host filesystems when driver.kind is auto
|
||||
|
||||
When falco runs with kmod/module driver it needs special filesystems
|
||||
to be mounted from the host such /dev and /sys/module/falco.
|
||||
This commit ensures that we mount them in the falco container.
|
||||
|
||||
Note that, the /sys/module/falco is now mounted as /sys/module since
|
||||
we do not know which kind of driver will be used. The falco folder
|
||||
exists under /sys/module only when the kernel module is loaded,
|
||||
hence it's not possible to use the /sys/module/falco hostpath when driver.kind
|
||||
is set to auto.
|
||||
|
||||
## v4.8.1
|
||||
|
||||
* fix(falcosidekick): add support for custom service type for webui redis
|
||||
|
||||
## v4.8.0
|
||||
|
||||
* Upgrade Falco version to 0.38.2
|
||||
|
||||
## v4.7.2
|
||||
|
||||
* use rules_files key in the preset values files
|
||||
|
||||
## v4.7.1
|
||||
|
||||
* fix(falco/config): use rules_files instead of deprecated key rules_file
|
||||
|
||||
## v4.7.0
|
||||
|
||||
* bump k8smeta plugin to version 0.2.0. The new version, resolves a bug that prevented the plugin
|
||||
from populating the k8smeta fields. For more info see:
|
||||
* https://github.com/falcosecurity/plugins/issues/514
|
||||
* https://github.com/falcosecurity/plugins/pull/517
|
||||
|
||||
## v4.6.3
|
||||
|
||||
* fix(falco): mount client-certs-volume only if certs.existingClientSecret is defined
|
||||
|
||||
## v4.6.2
|
||||
|
||||
* bump falcosidekick dependency to v0.8.* to match with future versions
|
||||
|
||||
## v4.6.1
|
||||
|
||||
* bump falcosidekick dependency to v0.8.2 (fixes bug when using externalRedis in UI)
|
||||
|
||||
## v4.6.0
|
||||
|
||||
* feat(falco): add support for Falco metrics
|
||||
|
||||
## v4.5.2
|
||||
|
||||
* bump falcosidekick dependency version to v0.8.0, for falcosidekick 2.29.0
|
||||
|
||||
## v4.5.2
|
||||
|
||||
* reording scc configuration, making it more robust to plain yaml comparison
|
||||
|
||||
## v4.5.1
|
||||
|
||||
* falco is now able to reconnect to containerd.socket
|
||||
|
||||
## v4.5.0
|
||||
|
||||
* bump Falco version to 0.38.1
|
||||
|
||||
## v4.4.3
|
||||
|
||||
* Added a `labels` field in the controller to provide extra labeling for the daemonset/deployment
|
||||
|
||||
## v4.4.2
|
||||
|
||||
* fix wrong check in pod template where `existingSecret` was used instead of `existingClientSecret`
|
||||
|
||||
## v4.4.1
|
||||
|
||||
* bump k8s-metacollector dependency version to v0.1.1. See: https://github.com/falcosecurity/k8s-metacollector/releases
|
||||
|
||||
## v4.3.1
|
||||
|
||||
* bump falcosidekick dependency version to v0.7.19 install latest version through falco chart
|
||||
|
||||
## v4.3.0
|
||||
|
||||
* `FALCO_HOSTNAME` and `HOST_ROOT` are now set by default in pods configuration.
|
||||
|
||||
## v4.2.6
|
||||
|
||||
* bump falcosidekick dependency version to v0.7.17 install latest version through falco chart
|
||||
|
||||
## v4.2.5
|
||||
|
||||
* fix docs
|
||||
|
||||
## v4.2.4
|
||||
|
||||
* bump falcosidekick dependency version to v0.7.15 install latest version through falco chart
|
||||
|
||||
## v4.2.3
|
||||
|
||||
* fix(falco/helpers): adjust formatting to be compatible with older helm versions
|
||||
|
||||
## v4.2.2
|
||||
|
||||
* fix(falco/README): dead link
|
||||
|
||||
## v4.2.1
|
||||
* fix(falco/README): typos, formatting and broken links
|
||||
|
||||
## v4.2.0
|
||||
|
||||
* Bump falco to v0.37.1 and falcoctl to v0.7.2
|
||||
|
||||
## v4.1.2
|
||||
* Fix links in output after falco install without sidekick
|
||||
|
||||
|
@ -101,7 +428,7 @@ The new chart introduces some breaking changes. For folks upgrading Falco please
|
|||
## v3.3.0
|
||||
* Upgrade Falco to 0.35.1. For more info see the release notes: https://github.com/falcosecurity/falco/releases/tag/0.35.1
|
||||
* Upgrade falcoctl to 0.5.1. For more info see the release notes: https://github.com/falcosecurity/falcoctl/releases/tag/v0.5.1
|
||||
* Introduce least privileged mode in modern ebpf. For more info see: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-2
|
||||
* Introduce least privileged mode in modern ebpf. For more info see: https://falco.org/docs/setup/container/#docker-least-privileged-modern-ebpf
|
||||
|
||||
## v3.2.1
|
||||
* Set falco.http_output.url to empty string in values.yaml file
|
||||
|
@ -591,7 +918,7 @@ Remove whitespace around `falco.httpOutput.url` to fix the error `libcurl error:
|
|||
|
||||
### Minor Changes
|
||||
|
||||
* Upgrade to Falco 0.26.2, `DRIVERS_REPO` now defaults to https://download.falco.org/driver (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.26.2/CHANGELOG.md))
|
||||
* Upgrade to Falco 0.26.2, `DRIVERS_REPO` now defaults to https://download.falco.org/?prefix=driver/ (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.26.2/CHANGELOG.md))
|
||||
|
||||
## v1.5.3
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: falco
|
||||
version: 4.1.2
|
||||
appVersion: "0.37.0"
|
||||
version: 6.2.2
|
||||
appVersion: "0.41.3"
|
||||
description: Falco
|
||||
keywords:
|
||||
- monitoring
|
||||
|
@ -19,10 +19,14 @@ maintainers:
|
|||
email: cncf-falco-dev@lists.cncf.io
|
||||
dependencies:
|
||||
- name: falcosidekick
|
||||
version: "0.7.11"
|
||||
version: "0.9.*"
|
||||
condition: falcosidekick.enabled
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
- name: k8s-metacollector
|
||||
version: 0.1.*
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
condition: collectors.kubernetes.enabled
|
||||
- name: falco-talon
|
||||
version: 0.3.*
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
condition: responseActions.enabled
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
## Introduction
|
||||
|
||||
The deployment of Falco in a Kubernetes cluster is managed through a **Helm chart**. This chart manages the lifecycle of Falco in a cluster by handling all the k8s objects needed by Falco to be seamlessly integrated in your environment. Based on the configuration in `values.yaml` file, the chart will render and install the required k8s objects. Keep in mind that Falco could be deployed in your cluster using a `daemonset` or a `deployment`. See next sections for more info.
|
||||
The deployment of Falco in a Kubernetes cluster is managed through a **Helm chart**. This chart manages the lifecycle of Falco in a cluster by handling all the k8s objects needed by Falco to be seamlessly integrated in your environment. Based on the configuration in [values.yaml](./values.yaml) file, the chart will render and install the required k8s objects. Keep in mind that Falco could be deployed in your cluster using a `daemonset` or a `deployment`. See next sections for more info.
|
||||
|
||||
## Attention
|
||||
|
||||
|
@ -24,7 +24,9 @@ helm repo update
|
|||
To install the chart with the release name `falco` in namespace `falco` run:
|
||||
|
||||
```bash
|
||||
helm install falco falcosecurity/falco --namespace falco --create-namespace
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco
|
||||
```
|
||||
|
||||
After a few minutes Falco instances should be running on all your nodes. The status of Falco pods can be inspected through *kubectl*:
|
||||
|
@ -39,23 +41,24 @@ falco-57w7q 1/1 Running 0 3m12s 10.244.0.1 control-plane
|
|||
falco-h4596 1/1 Running 0 3m12s 10.244.1.2 worker-node-1 <none> <none>
|
||||
falco-kb55h 1/1 Running 0 3m12s 10.244.2.3 worker-node-2 <none> <none>
|
||||
```
|
||||
The cluster in our example has three nodes, one *control-plane* node and two *worker* nodes. The default configuration in `values.yaml` of our helm chart deploys Falco using a `daemonset`. That's the reason why we have one Falco pod in each node.
|
||||
> **Tip**: List Falco release using `helm list -n falco`, a release is a name used to track a specific deployment
|
||||
The cluster in our example has three nodes, one *control-plane* node and two *worker* nodes. The default configuration in [values.yaml](./values.yaml) of our helm chart deploys Falco using a `daemonset`. That's the reason why we have one Falco pod in each node.
|
||||
> **Tip**: List Falco release using `helm list -n falco`, a release is a name used to track a specific deployment.
|
||||
|
||||
### Falco, Event Sources and Kubernetes
|
||||
Starting from Falco 0.31.0 the [new plugin system](https://falco.org/docs/plugins/) is stable and production ready. The **plugin system** can be seen as the next step in the evolution of Falco. Historically, Falco monitored system events from the **kernel** trying to detect malicious behaviors on Linux systems. It also had the capability to process k8s Audit Logs to detect suspicious activities in Kubernetes clusters. Since Falco 0.32.0 all the related code to the k8s Audit Logs in Falco was removed and ported in a [plugin](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit). At the time being Falco supports different event sources coming from **plugins** or **drivers** (system events).
|
||||
|
||||
Note that **a Falco instance can handle multiple event sources in parallel**. you can deploy Falco leveraging **drivers** for syscalls events and at the same time loading **plugins**. A step by step guide on how to deploy Falco with multiple sources can be found [here](https://falco.org/docs/getting-started/third-party/learning/#falco-with-multiple-sources).
|
||||
Note that **a Falco instance can handle multiple event sources in parallel**. you can deploy Falco leveraging **drivers** for syscall events and at the same time loading **plugins**. A step by step guide on how to deploy Falco with multiple sources can be found [here](https://falco.org/docs/getting-started/learning-environments/#falco-with-multiple-sources).
|
||||
|
||||
#### About Drivers
|
||||
|
||||
Falco needs a **driver** to analyze the system workload and pass security events to userspace. The supported drivers are:
|
||||
|
||||
* [Kernel module](https://falco.org/docs/event-sources/drivers/#kernel-module)
|
||||
* [eBPF probe](https://falco.org/docs/event-sources/drivers/#ebpf-probe)
|
||||
* [Modern eBPF probe](https://falco.org/docs/event-sources/drivers/#modern-ebpf-probe)
|
||||
* [Modern eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#modern-ebpf-probe)
|
||||
* [Kernel module](https://falco.org/docs/concepts/event-sources/kernel/#kernel-module)
|
||||
* [Legacy eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#legacy-ebpf-probe)
|
||||
|
||||
The driver must be loaded on the node where Falco is running. Falco now prefers the **Modern eBPF probe** by default. When using **falcoctl** with `driver.kind=auto`, it will automatically choose the best driver for your system. Specifically, it first attempts to use the Modern eBPF probe (which is shipped directly within the Falco binary) and will fall back to the _kernel module_ or the _original eBPF probe_ if the necessary BPF features are not available.
|
||||
|
||||
The driver should be installed on the node where Falco is running. The _kernel module_ (default option) and the _eBPF probe_ are installed on the node through an *init container* (i.e. `falco-driver-loader`) that tries to build drivers to download a prebuilt driver or build it on-the-fly or as a fallback. The _Modern eBPF probe_ doesn't require an init container because it is shipped directly into the Falco binary. However, the _Modern eBPF probe_ requires [recent BPF features](https://falco.org/docs/event-sources/kernel/#modern-ebpf-probe)
|
||||
|
||||
##### Pre-built drivers
|
||||
|
||||
|
@ -65,11 +68,11 @@ The discovery of a kernel version by the [kernel-crawler](https://falcosecurity.
|
|||
|
||||
##### Building the driver on the fly (fallback)
|
||||
|
||||
If a prebuilt driver is not available for your distribution/kernel, users can build the modules by them self or install the kernel headers on the nodes, and the init container (falco-driver-loader) will try and build the module on the fly.
|
||||
If a prebuilt driver is not available for your distribution/kernel, users can build the driver by them self or install the kernel headers on the nodes, and the init container (falco-driver-loader) will try and build the driver on the fly.
|
||||
|
||||
Falco needs **kernel headers** installed on the host as a prerequisite to build the driver on the fly correctly. You can find instructions for installing the kernel headers for your system under the [Install section](https://falco.org/docs/getting-started/installation/) of the official documentation.
|
||||
|
||||
##### Selecting an different driver loader image
|
||||
##### Selecting a different driver loader image
|
||||
|
||||
Note that since Falco 0.36.0 and Helm chart version 3.7.0 the driver loader image has been updated to be compatible with newer kernels (5.x and above) meaning that if you have an older kernel version and you are trying to build the kernel module you may experience issues. In that case you can use the `falco-driver-loader-legacy` image to use the previous version of the toolchain. To do so you can set the appropriate value, i.e. `--set driver.loader.initContainer.image.repository=falcosecurity/falco-driver-loader-legacy`.
|
||||
|
||||
|
@ -85,7 +88,7 @@ Note that **the driver is not required when using plugins**.
|
|||
|
||||
#### About gVisor
|
||||
gVisor is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system. For more information please consult the [official docs](https://gvisor.dev/docs/). In version `0.32.1`, Falco first introduced support for gVisor by leveraging the stream of system call information coming from gVisor.
|
||||
Falco requires the version of [runsc](https://gvisor.dev/docs/user_guide/install/) to be equal to or above `20220704.0`. The following snippet shows the gVisor configuration variables found in `values.yaml`:
|
||||
Falco requires the version of [runsc](https://gvisor.dev/docs/user_guide/install/) to be equal to or above `20220704.0`. The following snippet shows the gVisor configuration variables found in [values.yaml](./values.yaml):
|
||||
```yaml
|
||||
driver:
|
||||
gvisor:
|
||||
|
@ -108,13 +111,19 @@ A preset `values.yaml` file [values-gvisor-gke.yaml](./values-gvisor-gke.yaml) i
|
|||
If you use GKE with k8s version at least `1.24.4-gke.1800` or `1.25.0-gke.200` with gVisor sandboxed pods, you can install a Falco instance to monitor them with, e.g.:
|
||||
|
||||
```
|
||||
helm install falco-gvisor falcosecurity/falco -f https://raw.githubusercontent.com/falcosecurity/charts/master/falco/values-gvisor-gke.yaml --namespace falco-gvisor --create-namespace
|
||||
helm install falco-gvisor falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco-gvisor \
|
||||
-f https://raw.githubusercontent.com/falcosecurity/charts/master/charts/falco/values-gvisor-gke.yaml
|
||||
```
|
||||
|
||||
Note that the instance of Falco above will only monitor gVisor sandboxed workloads on gVisor-enabled node pools. If you also need to monitor regular workloads on regular node pools you can use the eBPF driver as usual:
|
||||
|
||||
```
|
||||
helm install falco falcosecurity/falco --set driver.kind=ebpf --namespace falco --create-namespace
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco \
|
||||
--set driver.kind=ebpf
|
||||
```
|
||||
|
||||
The two instances of Falco will operate independently and can be installed, uninstalled or configured as needed. If you were already monitoring your regular node pools with eBPF you don't need to reinstall it.
|
||||
|
@ -130,28 +139,32 @@ The default configuration of the chart for new installations is to use the **fal
|
|||
* `falcoctl-artifact-install` an init container that makes sure to install the configured **artifacts** before the Falco container starts;
|
||||
* `falcoctl-artifact-follow` a sidecar container that periodically checks for new artifacts (currently only *falco-rules*) and downloads them;
|
||||
|
||||
For more info on how to enable/disable and configure the **falcoctl** tool checkout the config values [here](./generated/helm-values.md) and the [upgrading notes](./BREAKING-CHANGES.md#300)
|
||||
For more info on how to enable/disable and configure the **falcoctl** tool checkout the config values [here](./README.md#Configuration) and the [upgrading notes](./BREAKING-CHANGES.md#300)
|
||||
|
||||
### Deploying Falco in Kubernetes
|
||||
After the clarification of the different [**event sources**](#falco-event-sources-and-kubernetes) and how they are consumed by Falco using the **drivers** and the **plugins**, now let us discuss how Falco is deployed in Kubernetes.
|
||||
|
||||
The chart deploys Falco using a `daemonset` or a `deployment` depending on the **event sources**.
|
||||
|
||||
#### Daemonset
|
||||
When using the [drivers](#about-the-driver), Falco is deployed as `daemonset`. By using a `daemonset`, k8s assures that a Falco instance will be running in each of our nodes even when we add new nodes to our cluster. So it is the perfect match when we need to monitor all the nodes in our cluster.
|
||||
When using the [drivers](#about-the-driver), Falco is typically deployed as a `DaemonSet`. By using a DaemonSet, Kubernetes ensures that a Falco instance is running on each node even as new nodes are added to your cluster. This makes it a perfect fit for monitoring across the entire cluster.
|
||||
|
||||
By default, with `driver.kind=auto`, the correct driver will will be automatically selected for each node. This is accomplished through the **driver loader** (implemented by `falcoctl`), which generates a new Falco configuration file and picks the right engine driver (Modern eBPF, kmod, or legacy eBPF) based on the underlying environment. If you prefer to manually force a specific driver, see the other available options below.
|
||||
|
||||
**Kernel module**
|
||||
|
||||
To run Falco with the [kernel module](https://falco.org/docs/event-sources/drivers/#kernel-module) you can use the default values of the helm chart:
|
||||
To run Falco with the [eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#kernel-module) you just need to set `driver.kind=kmod` as shown in the following snippet:
|
||||
|
||||
```bash
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco
|
||||
--set driver.kind=kmod
|
||||
```
|
||||
|
||||
**eBPF probe**
|
||||
**Legacy eBPF probe**
|
||||
|
||||
To run Falco with the [eBPF probe](https://falco.org/docs/event-sources/drivers/#ebpf-probe) you just need to set `driver.kind=ebpf` as shown in the following snippet:
|
||||
To run Falco with the [eBPF probe](http://falco.org/docs/concepts/event-sources/kernel/#legacy-ebpf-probe) you just need to set `driver.kind=ebpf` as shown in the following snippet:
|
||||
|
||||
```bash
|
||||
helm install falco falcosecurity/falco \
|
||||
|
@ -160,15 +173,18 @@ helm install falco falcosecurity/falco \
|
|||
--set driver.kind=ebpf
|
||||
```
|
||||
|
||||
There are other configurations related to the eBPF probe, for more info please check the `values.yaml` file. After you have made your changes to the configuration file you just need to run:
|
||||
There are other configurations related to the eBPF probe, for more info please check the [values.yaml](./values.yaml) file. After you have made your changes to the configuration file you just need to run:
|
||||
|
||||
```bash
|
||||
helm install falco falcosecurity/falco --namespace "your-custom-name-space" --create-namespace
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace "your-custom-name-space" \
|
||||
-f "path-to-custom-values.yaml-file"
|
||||
```
|
||||
|
||||
**modern eBPF probe**
|
||||
**Modern eBPF probe**
|
||||
|
||||
To run Falco with the [modern eBPF probe](https://falco.org/docs/event-sources/drivers/#modern-ebpf-probe-experimental) you just need to set `driver.kind=modern-bpf` as shown in the following snippet:
|
||||
To run Falco with the [modern eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#modern-ebpf-probe) you just need to set `driver.kind=modern_bpf` as shown in the following snippet:
|
||||
|
||||
```bash
|
||||
helm install falco falcosecurity/falco \
|
||||
|
@ -213,7 +229,7 @@ A scenario when we need the `-p (--previous)` flag is when we have a restart of
|
|||
|
||||
### Enabling real time logs
|
||||
By default in Falco the output is buffered. When live streaming logs we will notice delays between the logs output (rules triggering) and the event happening.
|
||||
In order to enable the logs to be emitted without delays you need to set `.Values.tty=true` in `values.yaml` file.
|
||||
In order to enable the logs to be emitted without delays you need to set `.Values.tty=true` in [values.yaml](./values.yaml) file.
|
||||
|
||||
## K8s-metacollector
|
||||
Starting from Falco `0.37` the old [k8s-client](https://github.com/falcosecurity/falco/issues/2973) has been removed.
|
||||
|
@ -328,7 +344,7 @@ The Kubernetes Audit Log is now supported via the built-in [k8saudit](https://gi
|
|||
|
||||
The following snippet shows how to deploy Falco with the [k8saudit](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit) plugin:
|
||||
```yaml
|
||||
# -- Disable the drivers since we want to deplouy only the k8saudit plugin.
|
||||
# -- Disable the drivers since we want to deploy only the k8saudit plugin.
|
||||
driver:
|
||||
enabled: false
|
||||
|
||||
|
@ -336,7 +352,7 @@ driver:
|
|||
collectors:
|
||||
enabled: false
|
||||
|
||||
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale.
|
||||
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurable.
|
||||
controller:
|
||||
kind: deployment
|
||||
deployment:
|
||||
|
@ -356,14 +372,13 @@ falcoctl:
|
|||
config:
|
||||
artifact:
|
||||
install:
|
||||
# -- Do not resolve the depenencies for artifacts. By default is true, but for our use case we disable it.
|
||||
resolveDeps: false
|
||||
# -- Resolve the dependencies for artifacts.
|
||||
resolveDeps: true
|
||||
# -- List of artifacts to be installed by the falcoctl init container.
|
||||
# Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects.
|
||||
# Only rulesfile, the plugin will be installed as a dependency.
|
||||
refs: [k8saudit-rules:0.5]
|
||||
follow:
|
||||
# -- List of artifacts to be followed by the falcoctl sidecar container.
|
||||
# Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects.
|
||||
refs: [k8saudit-rules:0.5]
|
||||
|
||||
services:
|
||||
|
@ -375,7 +390,7 @@ services:
|
|||
protocol: TCP
|
||||
|
||||
falco:
|
||||
rules_file:
|
||||
rules_files:
|
||||
- /etc/falco/k8s_audit_rules.yaml
|
||||
- /etc/falco/rules.d
|
||||
plugins:
|
||||
|
@ -396,8 +411,8 @@ falco:
|
|||
Here is the explanation of the above configuration:
|
||||
* disable the drivers by setting `driver.enabled=false`;
|
||||
* disable the collectors by setting `collectors.enabled=false`;
|
||||
* deploy the Falco using a k8s *deploment* by setting `controller.kind=deployment`;
|
||||
* makes our Falco instance reachable by the `k8s api-server` by configuring a service for it in `services`;
|
||||
* deploy the Falco using a k8s *deployment* by setting `controller.kind=deployment`;
|
||||
* make our Falco instance reachable by the `k8s api-server` by configuring a service for it in `services`;
|
||||
* enable the `falcoctl-artifact-install` init container;
|
||||
* configure `falcoctl-artifact-install` to install the required plugins;
|
||||
* disable the `falcoctl-artifact-follow` sidecar container;
|
||||
|
@ -405,12 +420,15 @@ Here is the explanation of the above configuration:
|
|||
* configure the plugins to be loaded, in this case, the `k8saudit` and `json`;
|
||||
* and finally we add our plugins in the `load_plugins` to be loaded by Falco.
|
||||
|
||||
The configuration can be found in the `values-k8saudit.yaml` file ready to be used:
|
||||
The configuration can be found in the [values-k8saudit.yaml(./values-k8saudit.yaml] file ready to be used:
|
||||
|
||||
|
||||
```bash
|
||||
#make sure the falco namespace exists
|
||||
helm install falco falcosecurity/falco --namespace falco -f ./values-k8saudit.yaml --create-namespace
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco \
|
||||
-f ./values-k8saudit.yaml
|
||||
```
|
||||
After a few minutes a Falco instance should be running on your cluster. The status of Falco pod can be inspected through *kubectl*:
|
||||
```bash
|
||||
|
@ -428,7 +446,7 @@ Furthermore you can check that Falco logs through *kubectl logs*
|
|||
```bash
|
||||
kubectl logs -n falco falco-64484d9579-qckms
|
||||
```
|
||||
In the logs you should have something similar to the following, indcating that Falco has loaded the required plugins:
|
||||
In the logs you should have something similar to the following, indicating that Falco has loaded the required plugins:
|
||||
```bash
|
||||
Fri Jul 8 16:07:24 2022: Falco version 0.32.0 (driver version 39ae7d40496793cf3d3e7890c9bbdc202263836b)
|
||||
Fri Jul 8 16:07:24 2022: Falco initialized with configuration file /etc/falco/falco.yaml
|
||||
|
@ -480,7 +498,7 @@ spec:
|
|||
- Master
|
||||
- content: |
|
||||
# ... paste audit-policy.yaml here ...
|
||||
# https://raw.githubusercontent.com/falcosecurity/evolution/master/examples/k8s_audit_config/audit-policy.yaml
|
||||
# https://raw.githubusercontent.com/falcosecurity/plugins/master/plugins/k8saudit/configs/audit-policy.yaml
|
||||
name: audit-policy.yaml
|
||||
roles:
|
||||
- Master
|
||||
|
@ -492,8 +510,6 @@ Moreover, Falco supports running a gRPC server with two main binding types:
|
|||
- Over a local **Unix socket** with no authentication
|
||||
- Over the **network** with mandatory mutual TLS authentication (mTLS)
|
||||
|
||||
> **Tip**: Once gRPC is enabled, you can deploy [falco-exporter](https://github.com/falcosecurity/falco-exporter) to export metrics to Prometheus.
|
||||
|
||||
### gRPC over unix socket (default)
|
||||
|
||||
The preferred way to use the gRPC is over a Unix socket.
|
||||
|
@ -501,10 +517,11 @@ The preferred way to use the gRPC is over a Unix socket.
|
|||
To install Falco with gRPC enabled over a **unix socket**, you have to:
|
||||
|
||||
```shell
|
||||
helm install falco \
|
||||
--set falco.grpc.enabled=true \
|
||||
--set falco.grpc_output.enabled=true \
|
||||
falcosecurity/falco
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco \
|
||||
--set falco.grpc.enabled=true \
|
||||
--set falco.grpc_output.enabled=true
|
||||
```
|
||||
|
||||
### gRPC over network
|
||||
|
@ -515,14 +532,16 @@ How to generate the certificates is [documented here](https://falco.org/docs/grp
|
|||
To install Falco with gRPC enabled over the **network**, you have to:
|
||||
|
||||
```shell
|
||||
helm install falco \
|
||||
--set falco.grpc.enabled=true \
|
||||
--set falco.grpc_output.enabled=true \
|
||||
--set falco.grpc.unixSocketPath="" \
|
||||
--set-file certs.server.key=/path/to/server.key \
|
||||
--set-file certs.server.crt=/path/to/server.crt \
|
||||
--set-file certs.ca.crt=/path/to/ca.crt \
|
||||
falcosecurity/falco
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco \
|
||||
--set falco.grpc.enabled=true \
|
||||
--set falco.grpc_output.enabled=true \
|
||||
--set falco.grpc.unixSocketPath="" \
|
||||
--set-file certs.server.key=/path/to/server.key \
|
||||
--set-file certs.server.crt=/path/to/server.crt \
|
||||
--set-file certs.ca.crt=/path/to/ca.crt
|
||||
|
||||
```
|
||||
|
||||
## Enable http_output
|
||||
|
@ -530,28 +549,30 @@ helm install falco \
|
|||
HTTP output enables Falco to send events through HTTP(S) via the following configuration:
|
||||
|
||||
```shell
|
||||
helm install falco \
|
||||
--set falco.http_output.enabled=true \
|
||||
--set falco.http_output.url="http://some.url/some/path/" \
|
||||
--set falco.json_output=true \
|
||||
--set json_include_output_property=true
|
||||
falcosecurity/falco
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco \
|
||||
--set falco.http_output.enabled=true \
|
||||
--set falco.http_output.url="http://some.url/some/path/" \
|
||||
--set falco.json_output=true \
|
||||
--set json_include_output_property=true
|
||||
```
|
||||
|
||||
Additionaly, you can enable mTLS communication and load HTTP client cryptographic material via:
|
||||
Additionally, you can enable mTLS communication and load HTTP client cryptographic material via:
|
||||
|
||||
```shell
|
||||
helm install falco \
|
||||
--set falco.http_output.enabled=true \
|
||||
--set falco.http_output.url="https://some.url/some/path/" \
|
||||
--set falco.json_output=true \
|
||||
--set json_include_output_property=true \
|
||||
--set falco.http_output.mtls=true \
|
||||
--set falco.http_output.client_cert="/etc/falco/certs/client/client.crt" \
|
||||
--set falco.http_output.client_key="/etc/falco/certs/client/client.key" \
|
||||
--set falco.http_output.ca_cert="/etc/falco/certs/client/ca.crt" \
|
||||
--set-file certs.client.key="/path/to/client.key",certs.client.crt="/path/to/client.crt",certs.ca.crt="/path/to/cacert.crt" \
|
||||
falcosecurity/falco
|
||||
helm install falco falcosecurity/falco \
|
||||
--create-namespace \
|
||||
--namespace falco \
|
||||
--set falco.http_output.enabled=true \
|
||||
--set falco.http_output.url="https://some.url/some/path/" \
|
||||
--set falco.json_output=true \
|
||||
--set json_include_output_property=true \
|
||||
--set falco.http_output.mtls=true \
|
||||
--set falco.http_output.client_cert="/etc/falco/certs/client/client.crt" \
|
||||
--set falco.http_output.client_key="/etc/falco/certs/client/client.key" \
|
||||
--set falco.http_output.ca_cert="/etc/falco/certs/client/ca.crt" \
|
||||
--set-file certs.client.key="/path/to/client.key",certs.client.crt="/path/to/client.crt",certs.ca.crt="/path/to/cacert.crt"
|
||||
```
|
||||
|
||||
Or instead of directly setting the files via `--set-file`, mounting an existing volume with the `certs.existingClientSecret` value.
|
||||
|
@ -559,13 +580,13 @@ Or instead of directly setting the files via `--set-file`, mounting an existing
|
|||
## Deploy Falcosidekick with Falco
|
||||
|
||||
[`Falcosidekick`](https://github.com/falcosecurity/falcosidekick) can be installed with `Falco` by setting `--set falcosidekick.enabled=true`. This setting automatically configures all options of `Falco` for working with `Falcosidekick`.
|
||||
All values for the configuration of `Falcosidekick` are available by prefixing them with `falcosidekick.`. The full list of available values is [here](https://github.com/falcosecurity/charts/tree/master/falcosidekick#configuration).
|
||||
All values for the configuration of `Falcosidekick` are available by prefixing them with `falcosidekick.`. The full list of available values is [here](https://github.com/falcosecurity/charts/tree/master/charts/falcosidekick#configuration).
|
||||
For example, to enable the deployment of [`Falcosidekick-UI`](https://github.com/falcosecurity/falcosidekick-ui), add `--set falcosidekick.enabled=true --set falcosidekick.webui.enabled=true`.
|
||||
|
||||
If you use a Proxy in your cluster, the requests between `Falco` and `Falcosidekick` might be captured, use the full FQDN of `Falcosidekick` by using `--set falcosidekick.fullfqdn=true` to avoid that.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following table lists the main configurable parameters of the {{ template "chart.name" . }} chart v{{ template "chart.version" . }} and their default values. See `values.yaml` for full list.
|
||||
The following table lists the main configurable parameters of the {{ template "chart.name" . }} chart v{{ template "chart.version" . }} and their default values. See [values.yaml](./values.yaml) for full list.
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,16 +0,0 @@
|
|||
# CI values for Falco.
|
||||
# The following values will bypass the installation of the kernel module
|
||||
# and disable the kernel space driver.
|
||||
|
||||
# disable the kernel space driver
|
||||
driver:
|
||||
enabled: false
|
||||
|
||||
# make Falco run in userspace only mode
|
||||
extra:
|
||||
args:
|
||||
- --userspace
|
||||
|
||||
# enforce /proc mounting since Falco still tries to scan it
|
||||
mounts:
|
||||
enforceProcMount: true
|
File diff suppressed because it is too large
Load Diff
|
@ -41,6 +41,7 @@ WARNING(drivers):
|
|||
|
||||
{{- if and (not (empty .Values.falco.load_plugins)) (or .Values.falcoctl.artifact.follow.enabled .Values.falcoctl.artifact.install.enabled) }}
|
||||
|
||||
WARNING:
|
||||
{{ printf "It seems you are loading the following plugins %v, please make sure to install them by adding the correct reference to falcoctl.config.artifact.install.refs: %v" .Values.falco.load_plugins .Values.falcoctl.config.artifact.install.refs -}}
|
||||
NOTICE:
|
||||
{{ printf "It seems you are loading the following plugins %v, please make sure to install them by specifying the correct reference to falcoctl.config.artifact.install.refs: %v" .Values.falco.load_plugins .Values.falcoctl.config.artifact.install.refs -}}
|
||||
{{ printf "Ignore this notice if the value of falcoctl.config.artifact.install.refs is correct already." -}}
|
||||
{{- end }}
|
|
@ -89,7 +89,7 @@ Return the proper Falco image name
|
|||
{{- . }}/
|
||||
{{- end -}}
|
||||
{{- .Values.image.repository }}:
|
||||
{{- .Values.image.tag | default .Chart.AppVersion -}}
|
||||
{{- .Values.image.tag | default (printf "%s" .Chart.AppVersion) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
|
@ -280,8 +280,8 @@ be temporary and will stay here until we move this logic to the falcoctl tool.
|
|||
{{- with .Values.falcoctl.artifact.install.mounts.volumeMounts }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- if .Values.falcoctl.artifact.install.env }}
|
||||
env:
|
||||
{{- include "falco.renderTemplate" ( dict "value" .Values.falcoctl.artifact.install.env "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
@ -314,8 +314,8 @@ be temporary and will stay here until we move this logic to the falcoctl tool.
|
|||
{{- with .Values.falcoctl.artifact.follow.mounts.volumeMounts }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- if .Values.falcoctl.artifact.follow.env }}
|
||||
env:
|
||||
{{- include "falco.renderTemplate" ( dict "value" .Values.falcoctl.artifact.follow.env "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
@ -361,7 +361,7 @@ be temporary and will stay here until we move this logic to the falcoctl tool.
|
|||
{{- if not $hasConfig -}}
|
||||
{{- $listenPort := default (index .Values "k8s-metacollector" "service" "ports" "broker-grpc" "port") .Values.collectors.kubernetes.collectorPort -}}
|
||||
{{- $listenPort = int $listenPort -}}
|
||||
{{- $pluginConfig := dict "name" "k8smeta" "library_path" "libk8smeta.so" "init_config" (dict "collectorHostname" $hostname "collectorPort" $listenPort "nodeName" "${FALCO_K8S_NODE_NAME}") -}}
|
||||
{{- $pluginConfig := dict "name" "k8smeta" "library_path" "libk8smeta.so" "init_config" (dict "collectorHostname" $hostname "collectorPort" $listenPort "nodeName" "${FALCO_K8S_NODE_NAME}" "verbosity" .Values.collectors.kubernetes.verbosity "hostProc" .Values.collectors.kubernetes.hostProc) -}}
|
||||
{{- $newConfig := append .Values.falco.plugins $pluginConfig -}}
|
||||
{{- $_ := set .Values.falco "plugins" ($newConfig | uniq) -}}
|
||||
{{- $loadedPlugins := append .Values.falco.load_plugins "k8smeta" -}}
|
||||
|
@ -377,7 +377,7 @@ Based on the user input it populates the driver configuration in the falco confi
|
|||
*/}}
|
||||
{{- define "falco.engineConfiguration" -}}
|
||||
{{- if .Values.driver.enabled -}}
|
||||
{{- $supportedDrivers := list "kmod" "ebpf" "modern_ebpf" "gvisor" -}}
|
||||
{{- $supportedDrivers := list "kmod" "ebpf" "modern_ebpf" "gvisor" "auto" -}}
|
||||
{{- $aliasDrivers := list "module" "modern-bpf" -}}
|
||||
{{- if and (not (has .Values.driver.kind $supportedDrivers)) (not (has .Values.driver.kind $aliasDrivers)) -}}
|
||||
{{- fail (printf "unsupported driver kind: \"%s\". Supported drivers %s, alias %s" .Values.driver.kind $supportedDrivers $aliasDrivers) -}}
|
||||
|
@ -395,6 +395,9 @@ Based on the user input it populates the driver configuration in the falco confi
|
|||
{{- $root := printf "/host%s/k8s.io" .Values.driver.gvisor.runsc.root -}}
|
||||
{{- $gvisorConfig := dict "kind" "gvisor" "gvisor" (dict "config" "/gvisor-config/pod-init.json" "root" $root) -}}
|
||||
{{- $_ := set .Values.falco "engine" $gvisorConfig -}}
|
||||
{{- else if eq .Values.driver.kind "auto" -}}
|
||||
{{- $engineConfig := dict "kind" "modern_ebpf" "kmod" (dict "buf_size_preset" .Values.driver.kmod.bufSizePreset "drop_failed_exit" .Values.driver.kmod.dropFailedExit) "ebpf" (dict "buf_size_preset" .Values.driver.ebpf.bufSizePreset "drop_failed_exit" .Values.driver.ebpf.dropFailedExit "probe" .Values.driver.ebpf.path) "modern_ebpf" (dict "buf_size_preset" .Values.driver.modernEbpf.bufSizePreset "drop_failed_exit" .Values.driver.modernEbpf.dropFailedExit "cpus_for_each_buffer" .Values.driver.modernEbpf.cpusForEachBuffer) -}}
|
||||
{{- $_ := set .Values.falco "engine" $engineConfig -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
@ -403,15 +406,156 @@ Based on the user input it populates the driver configuration in the falco confi
|
|||
It returns "true" if the driver loader has to be enabled, otherwise false.
|
||||
*/}}
|
||||
{{- define "driverLoader.enabled" -}}
|
||||
{{- if or
|
||||
(eq .Values.driver.kind "modern_ebpf")
|
||||
(eq .Values.driver.kind "modern-bpf")
|
||||
(eq .Values.driver.kind "gvisor")
|
||||
(not .Values.driver.enabled)
|
||||
(not .Values.driver.loader.enabled)
|
||||
-}}
|
||||
{{- if or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf") (eq .Values.driver.kind "gvisor") (not .Values.driver.enabled) (not .Values.driver.loader.enabled) -}}
|
||||
false
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Based on the user input it populates the metrics configuration in the falco config map.
|
||||
*/}}
|
||||
{{- define "falco.metricsConfiguration" -}}
|
||||
{{- if .Values.metrics.enabled -}}
|
||||
{{- $_ := set .Values.falco.webserver "prometheus_metrics_enabled" true -}}
|
||||
{{- $_ = set .Values.falco.webserver "enabled" true -}}
|
||||
{{- $_ = set .Values.falco.metrics "enabled" .Values.metrics.enabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "interval" .Values.metrics.interval -}}
|
||||
{{- $_ = set .Values.falco.metrics "output_rule" .Values.metrics.outputRule -}}
|
||||
{{- $_ = set .Values.falco.metrics "rules_counters_enabled" .Values.metrics.rulesCountersEnabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "resource_utilization_enabled" .Values.metrics.resourceUtilizationEnabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "state_counters_enabled" .Values.metrics.stateCountersEnabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "kernel_event_counters_enabled" .Values.metrics.kernelEventCountersEnabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "kernel_event_counters_per_cpu_enabled" .Values.metrics.kernelEventCountersPerCPUEnabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "libbpf_stats_enabled" .Values.metrics.libbpfStatsEnabled -}}
|
||||
{{- $_ = set .Values.falco.metrics "convert_memory_to_mb" .Values.metrics.convertMemoryToMB -}}
|
||||
{{- $_ = set .Values.falco.metrics "include_empty_values" .Values.metrics.includeEmptyValues -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
This helper is used to add the container plugin to the falco configuration.
|
||||
*/}}
|
||||
{{ define "falco.containerPlugin" -}}
|
||||
{{ if and .Values.driver.enabled .Values.collectors.enabled -}}
|
||||
{{ if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
|
||||
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
|
||||
{{ else if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled .Values.collectors.containerEngine.enabled -}}
|
||||
{{ if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled -}}
|
||||
{{ $_ := set .Values.collectors.containerEngine.engines.docker "enabled" .Values.collectors.docker.enabled -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.docker "sockets" (list .Values.collectors.docker.socket) -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.containerd "enabled" .Values.collectors.containerd.enabled -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.containerd "sockets" (list .Values.collectors.containerd.socket) -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.cri "enabled" .Values.collectors.crio.enabled -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.cri "sockets" (list .Values.collectors.crio.socket) -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.podman "enabled" false -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.lxc "enabled" false -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.libvirt_lxc "enabled" false -}}
|
||||
{{ $_ = set .Values.collectors.containerEngine.engines.bpm "enabled" false -}}
|
||||
{{ end -}}
|
||||
{{ $hasConfig := false -}}
|
||||
{{ range .Values.falco.plugins -}}
|
||||
{{ if eq (get . "name") "container" -}}
|
||||
{{ $hasConfig = true -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ if not $hasConfig -}}
|
||||
{{ $pluginConfig := dict -}}
|
||||
{{ with .Values.collectors.containerEngine -}}
|
||||
{{ $pluginConfig = dict "name" "container" "library_path" "libcontainer.so" "init_config" (dict "label_max_len" .labelMaxLen "with_size" .withSize "hooks" .hooks "engines" .engines) -}}
|
||||
{{ end -}}
|
||||
{{ $newConfig := append .Values.falco.plugins $pluginConfig -}}
|
||||
{{ $_ := set .Values.falco "plugins" ($newConfig | uniq) -}}
|
||||
{{ $loadedPlugins := append .Values.falco.load_plugins "container" -}}
|
||||
{{ $_ = set .Values.falco "load_plugins" ($loadedPlugins | uniq) -}}
|
||||
{{ end -}}
|
||||
{{ $_ := set .Values.falcoctl.config.artifact.install "refs" ((append .Values.falcoctl.config.artifact.install.refs .Values.collectors.containerEngine.pluginRef) | uniq) -}}
|
||||
{{ $_ = set .Values.falcoctl.config.artifact "allowedTypes" ((append .Values.falcoctl.config.artifact.allowedTypes "plugin") | uniq) -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{/*
|
||||
This helper is used to add container plugin volumes to the falco pod.
|
||||
*/}}
|
||||
{{- define "falco.containerPluginVolumes" -}}
|
||||
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
|
||||
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
|
||||
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
|
||||
{{- end -}}
|
||||
{{ $volumes := list -}}
|
||||
{{- if .Values.collectors.docker.enabled -}}
|
||||
{{ $volumes = append $volumes (dict "name" "docker-socket" "hostPath" (dict "path" .Values.collectors.docker.socket)) -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.collectors.crio.enabled -}}
|
||||
{{ $volumes = append $volumes (dict "name" "crio-socket" "hostPath" (dict "path" .Values.collectors.crio.socket)) -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.collectors.containerd.enabled -}}
|
||||
{{ $volumes = append $volumes (dict "name" "containerd-socket" "hostPath" (dict "path" .Values.collectors.containerd.socket)) -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.collectors.containerEngine.enabled -}}
|
||||
{{- $seenPaths := dict -}}
|
||||
{{- $idx := 0 -}}
|
||||
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
|
||||
{{- range $engineName := $engineOrder -}}
|
||||
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
|
||||
{{- if and $val $val.enabled -}}
|
||||
{{- range $index, $socket := $val.sockets -}}
|
||||
{{- $mountPath := print "/host" $socket -}}
|
||||
{{- if not (hasKey $seenPaths $mountPath) -}}
|
||||
{{ $volumes = append $volumes (dict "name" (printf "container-engine-socket-%d" $idx) "hostPath" (dict "path" $socket)) -}}
|
||||
{{- $idx = add $idx 1 -}}
|
||||
{{- $_ := set $seenPaths $mountPath true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if gt (len $volumes) 0 -}}
|
||||
{{ toYaml $volumes -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
This helper is used to add container plugin volumeMounts to the falco pod.
|
||||
*/}}
|
||||
{{- define "falco.containerPluginVolumeMounts" -}}
|
||||
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
|
||||
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
|
||||
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
|
||||
{{- end -}}
|
||||
{{ $volumeMounts := list -}}
|
||||
{{- if .Values.collectors.docker.enabled -}}
|
||||
{{ $volumeMounts = append $volumeMounts (dict "name" "docker-socket" "mountPath" (print "/host" .Values.collectors.docker.socket)) -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.collectors.crio.enabled -}}
|
||||
{{ $volumeMounts = append $volumeMounts (dict "name" "crio-socket" "mountPath" (print "/host" .Values.collectors.crio.socket)) -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.collectors.containerd.enabled -}}
|
||||
{{ $volumeMounts = append $volumeMounts (dict "name" "containerd-socket" "mountPath" (print "/host" .Values.collectors.containerd.socket)) -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.collectors.containerEngine.enabled -}}
|
||||
{{- $seenPaths := dict -}}
|
||||
{{- $idx := 0 -}}
|
||||
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
|
||||
{{- range $engineName := $engineOrder -}}
|
||||
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
|
||||
{{- if and $val $val.enabled -}}
|
||||
{{- range $index, $socket := $val.sockets -}}
|
||||
{{- $mountPath := print "/host" $socket -}}
|
||||
{{- if not (hasKey $seenPaths $mountPath) -}}
|
||||
{{ $volumeMounts = append $volumeMounts (dict "name" (printf "container-engine-socket-%d" $idx) "mountPath" $mountPath) -}}
|
||||
{{- $idx = add $idx 1 -}}
|
||||
{{- $_ := set $seenPaths $mountPath true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if gt (len $volumeMounts) 0 -}}
|
||||
{{ toYaml ($volumeMounts) }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -10,4 +10,6 @@ data:
|
|||
{{- include "falco.falcosidekickConfig" . }}
|
||||
{{- include "k8smeta.configuration" . -}}
|
||||
{{- include "falco.engineConfiguration" . -}}
|
||||
{{- include "falco.metricsConfiguration" . -}}
|
||||
{{- include "falco.containerPlugin" . -}}
|
||||
{{- toYaml .Values.falco | nindent 4 }}
|
||||
|
|
|
@ -6,6 +6,9 @@ metadata:
|
|||
namespace: {{ include "falco.namespace" . }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
{{- if .Values.controller.labels }}
|
||||
{{- toYaml .Values.controller.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.controller.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.controller.annotations | nindent 4 }}
|
||||
|
@ -20,4 +23,4 @@ spec:
|
|||
updateStrategy:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -6,6 +6,9 @@ metadata:
|
|||
namespace: {{ include "falco.namespace" . }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
{{- if .Values.controller.labels }}
|
||||
{{- toYaml .Values.controller.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.controller.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.controller.annotations | nindent 4 }}
|
||||
|
@ -20,4 +23,4 @@ spec:
|
|||
{{- include "falco.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
{{- include "falco.podTemplate" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
{{- if .Values.grafana.dashboards.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.grafana.dashboards.configMaps.falco.name }}
|
||||
{{ if .Values.grafana.dashboards.configMaps.falco.namespace }}
|
||||
namespace: {{ .Values.grafana.dashboards.configMaps.falco.namespace }}
|
||||
{{- else -}}
|
||||
namespace: {{ include "falco.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
grafana_dashboard: "1"
|
||||
{{- if .Values.grafana.dashboards.configMaps.falco.folder }}
|
||||
annotations:
|
||||
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.falco.folder}}
|
||||
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.falco.folder }}
|
||||
{{- end }}
|
||||
data:
|
||||
falco-dashboard.json: |-
|
||||
{{- .Files.Get "dashboards/falco-dashboard.json" | nindent 4 }}
|
||||
{{- end -}}
|
|
@ -9,5 +9,6 @@ metadata:
|
|||
data:
|
||||
falcoctl.yaml: |-
|
||||
{{- include "k8smeta.configuration" . -}}
|
||||
{{- include "falco.containerPlugin" . -}}
|
||||
{{- toYaml .Values.falcoctl.config | nindent 4 }}
|
||||
{{- end }}
|
||||
|
|
|
@ -12,10 +12,24 @@ metadata:
|
|||
{{- if and .Values.certs (not .Values.certs.existingSecret) }}
|
||||
checksum/certs: {{ include (print $.Template.BasePath "/certs-secret.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
{{- if .Values.driver.enabled }}
|
||||
{{- if (or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf")) }}
|
||||
{{- if .Values.driver.modernEbpf.leastPrivileged }}
|
||||
container.apparmor.security.beta.kubernetes.io/{{ .Chart.Name }}: unconfined
|
||||
{{- end }}
|
||||
{{- else if eq .Values.driver.kind "ebpf" }}
|
||||
{{- if .Values.driver.ebpf.leastPrivileged }}
|
||||
container.apparmor.security.beta.kubernetes.io/{{ .Chart.Name }}: unconfined
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.falco.podHostname }}
|
||||
hostname: {{ .Values.falco.podHostname }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "falco.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
|
@ -49,6 +63,7 @@ spec:
|
|||
{{- if eq .Values.driver.kind "gvisor" }}
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
|
@ -61,23 +76,16 @@ spec:
|
|||
args:
|
||||
- /usr/bin/falco
|
||||
{{- include "falco.configSyscallSource" . | indent 8 }}
|
||||
{{- with .Values.collectors }}
|
||||
{{- if .enabled }}
|
||||
{{- if .containerd.enabled }}
|
||||
- --cri
|
||||
- /run/containerd/containerd.sock
|
||||
{{- end }}
|
||||
{{- if .crio.enabled }}
|
||||
- --cri
|
||||
- /run/crio/crio.sock
|
||||
{{- end }}
|
||||
- -pk
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.extra.args }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: HOST_ROOT
|
||||
value: /host
|
||||
- name: FALCO_HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: FALCO_K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -87,6 +95,10 @@ spec:
|
|||
{{- end }}
|
||||
tty: {{ .Values.tty }}
|
||||
{{- if .Values.falco.webserver.enabled }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.falco.webserver.listen_port }}
|
||||
name: web
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: {{ .Values.healthChecks.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.healthChecks.livenessProbe.timeoutSeconds }}
|
||||
|
@ -109,6 +121,7 @@ spec:
|
|||
{{- end }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- include "falco.containerPluginVolumeMounts" . | nindent 8 -}}
|
||||
{{- if or .Values.falcoctl.artifact.install.enabled .Values.falcoctl.artifact.follow.enabled }}
|
||||
{{- if has "rulesfile" .Values.falcoctl.config.artifact.allowedTypes }}
|
||||
- mountPath: /etc/falco
|
||||
|
@ -118,13 +131,15 @@ spec:
|
|||
- mountPath: /usr/share/falco/plugins
|
||||
name: plugins-install-dir
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq (include "driverLoader.enabled" .) "true" }}
|
||||
- mountPath: /etc/falco/config.d
|
||||
name: specialized-falco-configs
|
||||
{{- end }}
|
||||
- mountPath: /root/.falco
|
||||
name: root-falco-fs
|
||||
{{- if or .Values.driver.enabled .Values.mounts.enforceProcMount }}
|
||||
- mountPath: /host/proc
|
||||
name: proc-fs
|
||||
{{- end }}
|
||||
{{- if and .Values.driver.enabled (not .Values.driver.loader.enabled) }}
|
||||
readOnly: true
|
||||
- mountPath: /host/boot
|
||||
|
@ -141,33 +156,17 @@ spec:
|
|||
name: etc-fs
|
||||
readOnly: true
|
||||
{{- end -}}
|
||||
{{- if and .Values.driver.enabled (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module")) }}
|
||||
{{- if and .Values.driver.enabled (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) }}
|
||||
- mountPath: /host/dev
|
||||
name: dev-fs
|
||||
readOnly: true
|
||||
- name: sys-fs
|
||||
mountPath: /sys/module/falco
|
||||
mountPath: /sys/module
|
||||
{{- end }}
|
||||
{{- if and .Values.driver.enabled (and (eq .Values.driver.kind "ebpf") (contains "falco-no-driver" .Values.image.repository)) }}
|
||||
- name: debugfs
|
||||
mountPath: /sys/kernel/debug
|
||||
{{- end }}
|
||||
{{- with .Values.collectors }}
|
||||
{{- if .enabled }}
|
||||
{{- if .docker.enabled }}
|
||||
- mountPath: /host/var/run/docker.sock
|
||||
name: docker-socket
|
||||
{{- end }}
|
||||
{{- if .containerd.enabled }}
|
||||
- mountPath: /host/run/containerd/containerd.sock
|
||||
name: containerd-socket
|
||||
{{- end }}
|
||||
{{- if .crio.enabled }}
|
||||
- mountPath: /host/run/crio/crio.sock
|
||||
name: crio-socket
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- mountPath: /etc/falco/falco.yaml
|
||||
name: falco-yaml
|
||||
subPath: falco.yaml
|
||||
|
@ -180,7 +179,7 @@ spec:
|
|||
name: certs-volume
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if or .Values.certs.existingSecret (and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt) }}
|
||||
{{- if or .Values.certs.existingClientSecret (and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt) }}
|
||||
- mountPath: /etc/falco/certs/client
|
||||
name: client-certs-volume
|
||||
readOnly: true
|
||||
|
@ -217,6 +216,11 @@ spec:
|
|||
{{- include "falcoctl.initContainer" . | nindent 4 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- include "falco.containerPluginVolumes" . | nindent 4 -}}
|
||||
{{- if eq (include "driverLoader.enabled" .) "true" }}
|
||||
- name: specialized-falco-configs
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if or .Values.falcoctl.artifact.install.enabled .Values.falcoctl.artifact.follow.enabled }}
|
||||
- name: plugins-install-dir
|
||||
emptyDir: {}
|
||||
|
@ -239,43 +243,22 @@ spec:
|
|||
hostPath:
|
||||
path: /etc
|
||||
{{- end }}
|
||||
{{- if and .Values.driver.enabled (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module")) }}
|
||||
{{- if and .Values.driver.enabled (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) }}
|
||||
- name: dev-fs
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: sys-fs
|
||||
hostPath:
|
||||
path: /sys/module/falco
|
||||
path: /sys/module
|
||||
{{- end }}
|
||||
{{- if and .Values.driver.enabled (and (eq .Values.driver.kind "ebpf") (contains "falco-no-driver" .Values.image.repository)) }}
|
||||
- name: debugfs
|
||||
hostPath:
|
||||
path: /sys/kernel/debug
|
||||
{{- end }}
|
||||
{{- with .Values.collectors }}
|
||||
{{- if .enabled }}
|
||||
{{- if .docker.enabled }}
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: {{ .docker.socket }}
|
||||
{{- end }}
|
||||
{{- if .containerd.enabled }}
|
||||
- name: containerd-socket
|
||||
hostPath:
|
||||
path: {{ .containerd.socket }}
|
||||
{{- end }}
|
||||
{{- if .crio.enabled }}
|
||||
- name: crio-socket
|
||||
hostPath:
|
||||
path: {{ .crio.socket }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.driver.enabled .Values.mounts.enforceProcMount }}
|
||||
- name: proc-fs
|
||||
hostPath:
|
||||
path: /proc
|
||||
{{- end }}
|
||||
{{- if eq .Values.driver.kind "gvisor" }}
|
||||
- name: runsc-path
|
||||
hostPath:
|
||||
|
@ -317,7 +300,7 @@ spec:
|
|||
secretName: {{ include "falco.fullname" . }}-certs
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.certs.existingSecret (and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt) }}
|
||||
{{- if or .Values.certs.existingClientSecret (and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt) }}
|
||||
- name: client-certs-volume
|
||||
secret:
|
||||
{{- if .Values.certs.existingClientSecret }}
|
||||
|
@ -340,9 +323,13 @@ spec:
|
|||
{{- with .Values.driver.loader.initContainer.args }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.driver.kind "ebpf" }}
|
||||
- ebpf
|
||||
{{- end }}
|
||||
{{- if eq .Values.driver.kind "module" }}
|
||||
- kmod
|
||||
{{- else if eq .Values.driver.kind "modern-bpf"}}
|
||||
- modern_ebpf
|
||||
{{- else }}
|
||||
- {{ .Values.driver.kind }}
|
||||
{{- end }}
|
||||
{{- with .Values.driver.loader.initContainer.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
|
@ -350,7 +337,7 @@ spec:
|
|||
securityContext:
|
||||
{{- if .Values.driver.loader.initContainer.securityContext }}
|
||||
{{- toYaml .Values.driver.loader.initContainer.securityContext | nindent 4 }}
|
||||
{{- else if (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module")) }}
|
||||
{{- else if (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) }}
|
||||
privileged: true
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
|
@ -370,16 +357,31 @@ spec:
|
|||
- mountPath: /host/etc
|
||||
name: etc-fs
|
||||
readOnly: true
|
||||
- mountPath: /etc/falco/config.d
|
||||
name: specialized-falco-configs
|
||||
env:
|
||||
- name: HOST_ROOT
|
||||
value: /host
|
||||
{{- if .Values.driver.loader.initContainer.env }}
|
||||
{{- include "falco.renderTemplate" ( dict "value" .Values.driver.loader.initContainer.env "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.driver.kind "auto" }}
|
||||
- name: FALCOCTL_DRIVER_CONFIG_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: FALCOCTL_DRIVER_CONFIG_CONFIGMAP
|
||||
value: {{ include "falco.fullname" . }}
|
||||
{{- else }}
|
||||
- name: FALCOCTL_DRIVER_CONFIG_UPDATE_FALCO
|
||||
value: "false"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "falco.securityContext" -}}
|
||||
{{- $securityContext := dict -}}
|
||||
{{- if .Values.driver.enabled -}}
|
||||
{{- if (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module")) -}}
|
||||
{{- if (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) -}}
|
||||
{{- $securityContext := set $securityContext "privileged" true -}}
|
||||
{{- end -}}
|
||||
{{- if eq .Values.driver.kind "ebpf" -}}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
{{- if and .Values.rbac.create (eq .Values.driver.kind "auto")}}
|
||||
kind: Role
|
||||
apiVersion: {{ include "rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ include "falco.fullname" . }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
{{- end }}
|
|
@ -0,0 +1,16 @@
|
|||
{{- if and .Values.rbac.create (eq .Values.driver.kind "auto")}}
|
||||
kind: RoleBinding
|
||||
apiVersion: {{ include "rbac.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ include "falco.fullname" . }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "falco.serviceAccountName" . }}
|
||||
namespace: {{ include "falco.namespace" . }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "falco.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
|
@ -36,8 +36,8 @@ supplementalGroups:
|
|||
users:
|
||||
- system:serviceaccount:{{ include "falco.namespace" . }}:{{ include "falco.serviceAccountName" . }}
|
||||
volumes:
|
||||
- hostPath
|
||||
- emptyDir
|
||||
- secret
|
||||
- configMap
|
||||
{{- end }}
|
||||
- emptyDir
|
||||
- hostPath
|
||||
- secret
|
||||
{{- end }}
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
{{- if and .Values.metrics.enabled .Values.metrics.service.create }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "falco.fullname" . }}-metrics
|
||||
namespace: {{ include "falco.namespace" . }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
{{- with .Values.metrics.service.labels }}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
type: "falco-metrics"
|
||||
{{- with .Values.metrics.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.metrics.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.metrics.service.ports.metrics.port }}
|
||||
targetPort: {{ .Values.metrics.service.ports.metrics.targetPort }}
|
||||
protocol: {{ .Values.metrics.service.ports.metrics.protocol }}
|
||||
name: "metrics"
|
||||
selector:
|
||||
{{- include "falco.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,51 @@
|
|||
{{- if .Values.serviceMonitor.create }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "falco.fullname" . }}
|
||||
{{- if .Values.serviceMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.serviceMonitor.namespace . }}
|
||||
{{- else }}
|
||||
namespace: {{ include "falco.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "falco.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: "{{ .Values.serviceMonitor.endpointPort }}"
|
||||
{{- with .Values.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: true
|
||||
path: {{ .Values.serviceMonitor.path }}
|
||||
scheme: {{ .Values.serviceMonitor.scheme }}
|
||||
{{- with .Values.serviceMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
jobLabel: "{{ .Release.Name }}"
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "falco.selectorLabels" . | nindent 6 }}
|
||||
{{- with .Values.serviceMonitor.selector }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
type: "falco-metrics"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ include "falco.namespace" . }}
|
||||
{{- with .Values.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,6 +1,10 @@
|
|||
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
{{- with .Values.serviceAccount.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "falco.serviceAccountName" . }}
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package unit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ChartInfo returns chart's information.
|
||||
func ChartInfo(t *testing.T, chartPath string) (map[string]interface{}, error) {
|
||||
// Get chart info.
|
||||
output, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "show", "chart", chartPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chartInfo := map[string]interface{}{}
|
||||
err = yaml.Unmarshal([]byte(output), &chartInfo)
|
||||
return chartInfo, err
|
||||
}
|
|
@ -16,7 +16,14 @@
|
|||
package unit
|
||||
|
||||
const (
|
||||
releaseName = "rendered-resources"
|
||||
patternK8sMetacollectorFiles = `# Source: falco/charts/k8s-metacollector/templates/([^\n]+)`
|
||||
k8sMetaPluginName = "k8smeta"
|
||||
// ReleaseName is the name of the release we expect in the rendered resources.
|
||||
ReleaseName = "rendered-resources"
|
||||
// PatternK8sMetacollectorFiles is the regex pattern we expect to find in the rendered resources.
|
||||
PatternK8sMetacollectorFiles = `# Source: falco/charts/k8s-metacollector/templates/([^\n]+)`
|
||||
// K8sMetaPluginName is the name of the k8smeta plugin we expect in the falco configuration.
|
||||
K8sMetaPluginName = "k8smeta"
|
||||
// ContainerPluginName name of the container plugin we expect in the falco configuration.
|
||||
ContainerPluginName = "container"
|
||||
// ChartPath is the path to the chart.
|
||||
ChartPath = "../../.."
|
||||
)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
package containerPlugin
|
||||
|
||||
var volumeNames = []string{
|
||||
"docker-socket",
|
||||
"containerd-socket",
|
||||
"crio-socket",
|
||||
"container-engine-socket-0",
|
||||
"container-engine-socket-1",
|
||||
"container-engine-socket-2",
|
||||
"container-engine-socket-3",
|
||||
"container-engine-socket-4",
|
||||
"container-engine-socket-5",
|
||||
}
|
|
@ -0,0 +1,767 @@
|
|||
package containerPlugin
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
)
|
||||
|
||||
func TestContainerPluginConfiguration(t *testing.T) {
|
||||
t.Parallel()
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(t *testing.T, config any)
|
||||
}{
|
||||
{
|
||||
"defaultValues",
|
||||
nil,
|
||||
func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
|
||||
// Check engines configurations.
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok, "checking if engines section exists")
|
||||
require.Len(t, engines, 7, "checking number of engines")
|
||||
var engineConfig ContainerEngineConfig
|
||||
// Unmarshal the engines configuration.
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
// Check the default values for each engine.
|
||||
require.True(t, engineConfig.Docker.Enabled)
|
||||
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
|
||||
|
||||
require.True(t, engineConfig.Podman.Enabled)
|
||||
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
|
||||
|
||||
require.True(t, engineConfig.Containerd.Enabled)
|
||||
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
|
||||
|
||||
require.True(t, engineConfig.CRI.Enabled)
|
||||
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
|
||||
|
||||
require.True(t, engineConfig.LXC.Enabled)
|
||||
require.True(t, engineConfig.LibvirtLXC.Enabled)
|
||||
require.True(t, engineConfig.BPM.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "changeDockerSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, engineConfig.Docker.Enabled)
|
||||
require.Equal(t, []string{"/custom/docker.sock"}, engineConfig.Docker.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "changeCriSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, engineConfig.CRI.Enabled)
|
||||
require.Equal(t, []string{"/custom/cri.sock"}, engineConfig.CRI.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disableDockerSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, engineConfig.Docker.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disableCriSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, engineConfig.CRI.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "changeContainerdSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, engineConfig.Containerd.Enabled)
|
||||
require.Equal(t, []string{"/custom/containerd.sock"}, engineConfig.Containerd.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disableContainerdSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, engineConfig.Containerd.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultContainerEngineConfig",
|
||||
values: map[string]string{},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
require.Equal(t, float64(100), initConfigMap["label_max_len"])
|
||||
require.False(t, initConfigMap["with_size"].(bool))
|
||||
|
||||
hooks := initConfigMap["hooks"].([]interface{})
|
||||
require.Len(t, hooks, 1)
|
||||
require.Contains(t, hooks, "create")
|
||||
|
||||
engines := initConfigMap["engines"].(map[string]interface{})
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check default engine configurations
|
||||
require.True(t, engineConfig.Docker.Enabled)
|
||||
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
|
||||
|
||||
require.True(t, engineConfig.Podman.Enabled)
|
||||
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
|
||||
|
||||
require.True(t, engineConfig.Containerd.Enabled)
|
||||
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
|
||||
|
||||
require.True(t, engineConfig.CRI.Enabled)
|
||||
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
|
||||
|
||||
require.True(t, engineConfig.LXC.Enabled)
|
||||
require.True(t, engineConfig.LibvirtLXC.Enabled)
|
||||
require.True(t, engineConfig.BPM.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customContainerEngineConfig",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.labelMaxLen": "200",
|
||||
"collectors.containerEngine.withSize": "true",
|
||||
"collectors.containerEngine.hooks[0]": "create",
|
||||
"collectors.containerEngine.hooks[1]": "start",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
|
||||
"collectors.containerEngine.engines.lxc.enabled": "false",
|
||||
"collectors.containerEngine.engines.libvirt_lxc.enabled": "false",
|
||||
"collectors.containerEngine.engines.bpm.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
require.Equal(t, float64(200), initConfigMap["label_max_len"])
|
||||
require.True(t, initConfigMap["with_size"].(bool))
|
||||
|
||||
hooks := initConfigMap["hooks"].([]interface{})
|
||||
require.Len(t, hooks, 2)
|
||||
require.Contains(t, hooks, "create")
|
||||
require.Contains(t, hooks, "start")
|
||||
|
||||
engines := initConfigMap["engines"].(map[string]interface{})
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check custom engine configurations
|
||||
require.False(t, engineConfig.Docker.Enabled)
|
||||
require.False(t, engineConfig.Podman.Enabled)
|
||||
|
||||
require.True(t, engineConfig.Containerd.Enabled)
|
||||
require.Equal(t, []string{"/custom/containerd.sock"}, engineConfig.Containerd.Sockets)
|
||||
|
||||
require.True(t, engineConfig.CRI.Enabled)
|
||||
require.Equal(t, []string{"/custom/crio.sock"}, engineConfig.CRI.Sockets)
|
||||
|
||||
require.False(t, engineConfig.LXC.Enabled)
|
||||
require.False(t, engineConfig.LibvirtLXC.Enabled)
|
||||
require.False(t, engineConfig.BPM.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customDockerEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
|
||||
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock2",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check Docker engine configuration
|
||||
require.False(t, engineConfig.Docker.Enabled)
|
||||
require.Equal(t, []string{"/custom/docker.sock", "/custom/docker.sock2"}, engineConfig.Docker.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customContainerdEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
|
||||
"collectors.containerEngine.engines.containerd.sockets[1]": "/custom/containerd.sock2",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check Containerd engine configuration
|
||||
require.False(t, engineConfig.Containerd.Enabled)
|
||||
require.Equal(t, []string{"/custom/containerd.sock", "/custom/containerd.sock2"}, engineConfig.Containerd.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customPodmanEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.podman.enabled": "true",
|
||||
"collectors.containerEngine.engines.podman.sockets[0]": "/custom/podman.sock",
|
||||
"collectors.containerEngine.engines.podman.sockets[1]": "/custom/podman.sock2",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check Podman engine configuration
|
||||
require.True(t, engineConfig.Podman.Enabled)
|
||||
require.Equal(t, []string{"/custom/podman.sock", "/custom/podman.sock2"}, engineConfig.Podman.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customCRIEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
|
||||
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/cri.sock2",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check CRI engine configuration
|
||||
require.True(t, engineConfig.CRI.Enabled)
|
||||
require.Equal(t, []string{"/custom/cri.sock", "/custom/cri.sock2"}, engineConfig.CRI.Sockets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customLXCEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.lxc.enabled": "true",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check LXC engine configuration
|
||||
require.True(t, engineConfig.LXC.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customLibvirtLXCEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.libvirt_lxc.enabled": "true",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check LibvirtLXC engine configuration
|
||||
require.True(t, engineConfig.LibvirtLXC.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customBPMEngineConfigInContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.bpm.enabled": "true",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
var engineConfig ContainerEngineConfig
|
||||
data, err := yaml.Marshal(engines)
|
||||
require.NoError(t, err)
|
||||
err = yaml.Unmarshal(data, &engineConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check BPM engine configuration
|
||||
require.True(t, engineConfig.BPM.Enabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allCollectorsDisabled",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
// When config is nil, it means the plugin wasn't found in the configuration
|
||||
require.Nil(t, config, "container plugin should not be present in configuration when all collectors are disabled")
|
||||
|
||||
// If somehow the config exists (which it shouldn't), verify there are no engine configurations
|
||||
if config != nil {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
if ok {
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"]
|
||||
if ok {
|
||||
engineMap := engines.(map[string]interface{})
|
||||
require.Empty(t, engineMap, "engines configuration should be empty when all collectors are disabled")
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allCollectorsDisabledTopLevel",
|
||||
values: map[string]string{
|
||||
"collectors.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, config any) {
|
||||
// When config is nil, it means the plugin wasn't found in the configuration
|
||||
require.Nil(t, config, "container plugin should not be present in configuration when all collectors are disabled")
|
||||
|
||||
// If somehow the config exists (which it shouldn't), verify there are no engine configurations
|
||||
if config != nil {
|
||||
plugin := config.(map[string]interface{})
|
||||
initConfig, ok := plugin["init_config"]
|
||||
if ok {
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
engines, ok := initConfigMap["engines"]
|
||||
if ok {
|
||||
engineMap := engines.(map[string]interface{})
|
||||
require.Empty(t, engineMap, "engines configuration should be empty when all collectors are disabled")
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
// Render the chart with the given options.
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
// Unmarshal the output into a ConfigMap object.
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
||||
|
||||
// Unmarshal the data field of the ConfigMap into a map.
|
||||
var config map[string]interface{}
|
||||
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
|
||||
|
||||
// Extract the container plugin configuration.
|
||||
plugins, ok := config["plugins"]
|
||||
require.True(t, ok, "checking if plugins section exists")
|
||||
pluginsList := plugins.([]interface{})
|
||||
found := false
|
||||
|
||||
// Get the container plugin configuration.
|
||||
for _, plugin := range pluginsList {
|
||||
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.ContainerPluginName {
|
||||
testCase.expected(t, plugin)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
// Check that the plugin has been added to the ones that are enabled.
|
||||
loadPlugins := config["load_plugins"]
|
||||
require.True(t, slices.Contains(loadPlugins.([]interface{}), unit.ContainerPluginName))
|
||||
} else {
|
||||
testCase.expected(t, nil)
|
||||
loadPlugins := config["load_plugins"]
|
||||
require.False(t, slices.Contains(loadPlugins.([]interface{}), unit.ContainerPluginName))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCollectorConfiguration(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "dockerAndContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "true",
|
||||
"collectoars.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
},
|
||||
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
|
||||
},
|
||||
{
|
||||
name: "containerdAndContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "true",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
},
|
||||
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
|
||||
},
|
||||
{
|
||||
name: "crioAndContainerEngine",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectoars.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "true",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
},
|
||||
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: tc.values,
|
||||
}
|
||||
|
||||
// Attempt to render the template, expect an error
|
||||
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectedErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the helper does not overwrite user's configuration.
|
||||
// And that the container reference is added to the configmap.
|
||||
func TestFalcoctlRefs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
refShouldBeSet := func(t *testing.T, config any) {
|
||||
// Get artifact configuration map.
|
||||
configMap := config.(map[string]interface{})
|
||||
artifactConfig := (configMap["artifact"]).(map[string]interface{})
|
||||
// Test allowed types.
|
||||
allowedTypes := artifactConfig["allowedTypes"]
|
||||
require.Len(t, allowedTypes, 2)
|
||||
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
|
||||
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
|
||||
// Test plugin reference.
|
||||
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
|
||||
require.Len(t, refs, 2)
|
||||
require.True(t, slices.Contains(refs, "falco-rules:4"))
|
||||
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
|
||||
}
|
||||
|
||||
refShouldNotBeSet := func(t *testing.T, config any) {
|
||||
// Get artifact configuration map.
|
||||
configMap := config.(map[string]interface{})
|
||||
artifactConfig := (configMap["artifact"]).(map[string]interface{})
|
||||
// Test allowed types.
|
||||
allowedTypes := artifactConfig["allowedTypes"]
|
||||
require.Len(t, allowedTypes, 2)
|
||||
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
|
||||
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
|
||||
// Test plugin reference.
|
||||
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
|
||||
require.Len(t, refs, 1)
|
||||
require.True(t, slices.Contains(refs, "falco-rules:4"))
|
||||
require.False(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(t *testing.T, config any)
|
||||
}{
|
||||
{
|
||||
"defaultValues",
|
||||
nil,
|
||||
refShouldBeSet,
|
||||
},
|
||||
{
|
||||
"setPluginConfiguration",
|
||||
map[string]string{
|
||||
"collectors.enabled": "false",
|
||||
},
|
||||
refShouldNotBeSet,
|
||||
},
|
||||
{
|
||||
"driver disabled",
|
||||
map[string]string{
|
||||
"driver.enabled": "false",
|
||||
},
|
||||
refShouldNotBeSet,
|
||||
},
|
||||
}
|
||||
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/falcoctl-configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
||||
var config map[string]interface{}
|
||||
helm.UnmarshalK8SYaml(t, cm.Data["falcoctl.yaml"], &config)
|
||||
testCase.expected(t, config)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type ContainerEngineSocket struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
Sockets []string `yaml:"sockets,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerEngineConfig struct {
|
||||
Docker ContainerEngineSocket `yaml:"docker"`
|
||||
Podman ContainerEngineSocket `yaml:"podman"`
|
||||
Containerd ContainerEngineSocket `yaml:"containerd"`
|
||||
CRI ContainerEngineSocket `yaml:"cri"`
|
||||
LXC ContainerEngineSocket `yaml:"lxc"`
|
||||
LibvirtLXC ContainerEngineSocket `yaml:"libvirt_lxc"`
|
||||
BPM ContainerEngineSocket `yaml:"bpm"`
|
||||
}
|
|
@ -0,0 +1,310 @@
|
|||
package containerPlugin
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestContainerPluginVolumeMounts(t *testing.T) {
|
||||
t.Parallel()
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(t *testing.T, volumeMounts []corev1.VolumeMount)
|
||||
}{
|
||||
{
|
||||
name: "defaultValues",
|
||||
values: nil,
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 6)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
|
||||
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
|
||||
require.Equal(t, "/host/run/podman/podman.sock", volumeMounts[1].MountPath)
|
||||
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
|
||||
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[2].MountPath)
|
||||
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
|
||||
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[3].MountPath)
|
||||
require.Equal(t, "container-engine-socket-4", volumeMounts[4].Name)
|
||||
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[4].MountPath)
|
||||
require.Equal(t, "container-engine-socket-5", volumeMounts[5].Name)
|
||||
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[5].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultDockerVolumeMount",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customDockerSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/custom/docker.sock", volumeMounts[0].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultCriVolumeMount",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 4)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[0].MountPath)
|
||||
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
|
||||
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[1].MountPath)
|
||||
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
|
||||
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[2].MountPath)
|
||||
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
|
||||
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[3].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customCriSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/custom/crio.sock", volumeMounts[0].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultContainerdVolumeMount",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[0].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customContainerdSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
|
||||
require.Equal(t, "/host/custom/containerd.sock", volumeMounts[0].MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesDefaultValues",
|
||||
values: map[string]string{},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 6)
|
||||
|
||||
// dockerV := findVolumeMount("docker-socket-0", volumeMounts)
|
||||
// require.NotNil(t, dockerV)
|
||||
// require.Equal(t, "/host/var/run/docker.sock", dockerV.MountPath)
|
||||
|
||||
// podmanV := findVolumeMount("podman-socket-0", volumeMounts)
|
||||
// require.NotNil(t, podmanV)
|
||||
// require.Equal(t, "/host/run/podman/podman.sock", podmanV.MountPath)
|
||||
|
||||
// containerdV := findVolumeMount("containerd-socket-0", volumeMounts)
|
||||
// require.NotNil(t, containerdV)
|
||||
// require.Equal(t, "/host/run/host-containerd/containerd.sock", containerdV.MountPath)
|
||||
|
||||
// crioV0 := findVolumeMount("cri-socket-0", volumeMounts)
|
||||
// require.NotNil(t, crioV0)
|
||||
// require.Equal(t, "/host/run/containerd/containerd.sock", crioV0.MountPath)
|
||||
|
||||
// crioV1 := findVolumeMount("cri-socket-1", volumeMounts)
|
||||
// require.NotNil(t, crioV1)
|
||||
// require.Equal(t, "/host/run/crio/crio.sock", crioV1.MountPath)
|
||||
|
||||
// crioV2 := findVolumeMount("cri-socket-2", volumeMounts)
|
||||
// require.NotNil(t, crioV2)
|
||||
// require.Equal(t, "/host/run/k3s/containerd/containerd.sock", crioV2.MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesDockerWithMultipleSockets",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
|
||||
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 2)
|
||||
|
||||
dockerV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
|
||||
require.NotNil(t, dockerV0)
|
||||
require.Equal(t, "/host/var/run/docker.sock", dockerV0.MountPath)
|
||||
|
||||
dockerV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
|
||||
require.NotNil(t, dockerV1)
|
||||
require.Equal(t, "/host/custom/docker.sock", dockerV1.MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesCrioWithMultipleSockets",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/run/crio/crio.sock",
|
||||
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/crio.sock",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 2)
|
||||
|
||||
crioV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
|
||||
require.NotNil(t, crioV0)
|
||||
require.Equal(t, "/host/run/crio/crio.sock", crioV0.MountPath)
|
||||
|
||||
crioV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
|
||||
require.NotNil(t, crioV1)
|
||||
require.Equal(t, "/host/custom/crio.sock", crioV1.MountPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noVolumeMountsWhenCollectorsDisabled",
|
||||
values: map[string]string{
|
||||
"collectors.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noVolumeMountsWhenDriverDisabled",
|
||||
values: map[string]string{
|
||||
"driver.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
|
||||
require.Len(t, volumeMounts, 0)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: tc.values,
|
||||
}
|
||||
|
||||
// Render the template
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
|
||||
|
||||
// Parse the YAML output
|
||||
var daemonset appsv1.DaemonSet
|
||||
helm.UnmarshalK8SYaml(t, output, &daemonset)
|
||||
|
||||
// Find volumeMounts in the falco container
|
||||
var pluginVolumeMounts []corev1.VolumeMount
|
||||
for _, container := range daemonset.Spec.Template.Spec.Containers {
|
||||
if container.Name == "falco" {
|
||||
for _, volumeMount := range container.VolumeMounts {
|
||||
if slices.Contains(volumeNames, volumeMount.Name) {
|
||||
pluginVolumeMounts = append(pluginVolumeMounts, volumeMount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the test case's assertions
|
||||
tc.expected(t, pluginVolumeMounts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidVolumeMountConfiguration(t *testing.T) {
|
||||
t.Parallel()
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "bothOldAndNewConfigEnabled",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "true",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
},
|
||||
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: tc.values,
|
||||
}
|
||||
|
||||
// Attempt to render the template, expect an error
|
||||
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectedErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func findVolumeMount(name string, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount {
|
||||
for _, v := range volumeMounts {
|
||||
if v.Name == name {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,373 @@
|
|||
package containerPlugin
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestContainerPluginVolumes(t *testing.T) {
|
||||
t.Parallel()
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(t *testing.T, volumes []corev1.Volume)
|
||||
}{
|
||||
{
|
||||
name: "defaultValues",
|
||||
values: nil,
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 6)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
|
||||
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
|
||||
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
|
||||
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
|
||||
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultDockerVolume",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customDockerSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/custom/docker.sock", volumes[0].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultCriVolume",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 4)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/run/crio/crio.sock", volumes[1].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
|
||||
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[2].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
|
||||
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[3].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customCrioSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/custom/crio.sock", volumes[0].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "defaultContainerdVolume",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[0].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "customContainerdSocket",
|
||||
values: map[string]string{
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/custom/containerd.sock", volumes[0].HostPath.Path)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "ContainerEnginesDefaultValues",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 6)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
|
||||
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
|
||||
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
|
||||
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
|
||||
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesDockerWithMultipleSockets",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
|
||||
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 2)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/custom/docker.sock", volumes[1].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesCrioWithMultipleSockets",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/run/crio/crio.sock",
|
||||
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/crio.sock",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 2)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/run/crio/crio.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/custom/crio.sock", volumes[1].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesPodmanWithMultipleSockets",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "false",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "true",
|
||||
"collectors.containerEngine.engines.podman.sockets[0]": "/run/podman/podman.sock",
|
||||
"collectors.containerEngine.engines.podman.sockets[1]": "/custom/podman.sock",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 2)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/run/podman/podman.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/custom/podman.sock", volumes[1].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesContainerdWithMultipleSockets",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.enabled": "false",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.containerd.sockets[0]": "/run/containerd/containerd.sock",
|
||||
"collectors.containerEngine.engines.containerd.sockets[1]": "/custom/containerd.sock",
|
||||
"collectors.containerEngine.engines.cri.enabled": "false",
|
||||
"collectors.containerEngine.engines.podman.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 2)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/custom/containerd.sock", volumes[1].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ContainerEnginesMultipleWithCustomSockets",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "false",
|
||||
"collectors.containerd.enabled": "false",
|
||||
"collectors.crio.enabled": "false",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.enabled": "true",
|
||||
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker/socket.sock",
|
||||
"collectors.containerEngine.engines.containerd.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.enabled": "true",
|
||||
"collectors.containerEngine.engines.cri.sockets[0]": "/var/custom/crio.sock",
|
||||
"collectors.containerEngine.engines.podman.enabled": "true",
|
||||
"collectors.containerEngine.engines.podman.sockets[0]": "/run/podman/podman.sock",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 4)
|
||||
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
|
||||
require.Equal(t, "/custom/docker/socket.sock", volumes[0].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
|
||||
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
|
||||
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
|
||||
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
|
||||
require.Equal(t, "/var/custom/crio.sock", volumes[3].HostPath.Path)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noVolumesWhenCollectorsDisabled",
|
||||
values: map[string]string{
|
||||
"collectors.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 0)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noVolumesWhenDriverDisabled",
|
||||
values: map[string]string{
|
||||
"driver.enabled": "false",
|
||||
},
|
||||
expected: func(t *testing.T, volumes []corev1.Volume) {
|
||||
require.Len(t, volumes, 0)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: tc.values,
|
||||
}
|
||||
|
||||
// Render the template
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
|
||||
|
||||
// Parse the YAML output
|
||||
var daemonset appsv1.DaemonSet
|
||||
helm.UnmarshalK8SYaml(t, output, &daemonset)
|
||||
|
||||
// Find volumes that match our container plugin pattern
|
||||
var pluginVolumes []corev1.Volume
|
||||
for _, volume := range daemonset.Spec.Template.Spec.Volumes {
|
||||
// Check if the volume is for container sockets
|
||||
if volume.HostPath != nil && slices.Contains(volumeNames, volume.Name) {
|
||||
pluginVolumes = append(pluginVolumes, volume)
|
||||
}
|
||||
}
|
||||
|
||||
// Run the test case's assertions
|
||||
tc.expected(t, pluginVolumes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidVolumeConfiguration(t *testing.T) {
|
||||
t.Parallel()
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "bothOldAndNewConfigEnabled",
|
||||
values: map[string]string{
|
||||
"collectors.docker.enabled": "true",
|
||||
"collectors.containerEngine.enabled": "true",
|
||||
},
|
||||
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: tc.values,
|
||||
}
|
||||
|
||||
// Attempt to render the template, expect an error
|
||||
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectedErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func findVolume(name string, volumes []corev1.Volume) *corev1.Volume {
|
||||
for _, v := range volumes {
|
||||
if v.Name == name {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package unit
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
)
|
||||
|
||||
// TestDriverLoaderEnabled tests the helper that enables the driver loader based on the configuration.
|
||||
func TestDriverLoaderEnabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
"defaultValues",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"driver.kind=modern-bpf",
|
||||
map[string]string{
|
||||
"driver.kind": "modern-bpf",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"driver.kind=modern_ebpf",
|
||||
map[string]string{
|
||||
"driver.kind": "modern_ebpf",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"driver.kind=gvisor",
|
||||
map[string]string{
|
||||
"driver.kind": "gvisor",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"driver.disabled",
|
||||
map[string]string{
|
||||
"driver.enabled": "false",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"driver.loader.disabled",
|
||||
map[string]string{
|
||||
"driver.loader.enabled": "false",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"driver.kind=kmod",
|
||||
map[string]string{
|
||||
"driver.kind": "kmod",
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"driver.kind=module",
|
||||
map[string]string{
|
||||
"driver.kind": "module",
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"driver.kind=ebpf",
|
||||
map[string]string{
|
||||
"driver.kind": "ebpf",
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"driver.kind=kmod&driver.loader.disabled",
|
||||
map[string]string{
|
||||
"driver.kind": "kmod",
|
||||
"driver.loader.enabled": "false",
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/daemonset.yaml"})
|
||||
|
||||
var ds appsv1.DaemonSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ds)
|
||||
found := false
|
||||
for i := range ds.Spec.Template.Spec.InitContainers {
|
||||
if ds.Spec.Template.Spec.InitContainers[i].Name == "falco-driver-loader" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, testCase.expected, found)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -13,10 +13,11 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package unit
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -29,7 +30,7 @@ import (
|
|||
func TestDriverConfigInFalcoConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -41,10 +42,10 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
"defaultValues",
|
||||
nil,
|
||||
func(t *testing.T, config any) {
|
||||
require.Len(t, config, 2, "should have only two items")
|
||||
require.Len(t, config, 4, "should have four items")
|
||||
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "kmod", kind)
|
||||
require.Equal(t, "modern_ebpf", kind)
|
||||
require.Equal(t, float64(4), bufSizePreset)
|
||||
require.False(t, dropFailedExit)
|
||||
},
|
||||
|
@ -78,10 +79,11 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"kmod=onfig",
|
||||
"kmod=config",
|
||||
map[string]string{
|
||||
"driver.kmod.bufSizePreset": "6",
|
||||
"driver.kmod.dropFailedExit": "true",
|
||||
"driver.kind": "module",
|
||||
},
|
||||
func(t *testing.T, config any) {
|
||||
require.Len(t, config, 2, "should have only two items")
|
||||
|
@ -93,7 +95,7 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"kind=ebpf",
|
||||
"ebpf=config",
|
||||
map[string]string{
|
||||
"driver.kind": "ebpf",
|
||||
"driver.ebpf.bufSizePreset": "6",
|
||||
|
@ -111,7 +113,7 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"ebpf=config",
|
||||
"kind=ebpf",
|
||||
map[string]string{
|
||||
"driver.kind": "ebpf",
|
||||
},
|
||||
|
@ -202,6 +204,35 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
require.Equal(t, "/host/my/root/test/k8s.io", root)
|
||||
},
|
||||
},
|
||||
{
|
||||
"kind=auto",
|
||||
map[string]string{
|
||||
"driver.kind": "auto",
|
||||
},
|
||||
func(t *testing.T, config any) {
|
||||
require.Len(t, config, 4, "should have four items")
|
||||
// Check that configuration for kmod has been set.
|
||||
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "modern_ebpf", kind)
|
||||
require.Equal(t, float64(4), bufSizePreset)
|
||||
require.False(t, dropFailedExit)
|
||||
// Check that configuration for ebpf has been set.
|
||||
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "modern_ebpf", kind)
|
||||
require.Equal(t, "${HOME}/.falco/falco-bpf.o", path)
|
||||
require.Equal(t, float64(4), bufSizePreset)
|
||||
require.False(t, dropFailedExit)
|
||||
// Check that configuration for modern_ebpf has been set.
|
||||
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "modern_ebpf", kind)
|
||||
require.Equal(t, float64(4), bufSizePreset)
|
||||
require.Equal(t, float64(2), cpusForEachBuffer)
|
||||
require.False(t, dropFailedExit)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
@ -211,7 +242,7 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/configmap.yaml"})
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
||||
|
@ -227,16 +258,17 @@ func TestDriverConfigInFalcoConfig(t *testing.T) {
|
|||
func TestDriverConfigWithUnsupportedDriver(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
values := map[string]string{
|
||||
"driver.kind": "notExisting",
|
||||
}
|
||||
options := &helm.Options{SetValues: values}
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/configmap.yaml"})
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.Contains(err.Error(), "unsupported driver kind: \"notExisting\". Supported drivers [kmod ebpf modern_ebpf gvisor], alias [module modern-bpf]"))
|
||||
require.True(t, strings.Contains(err.Error(),
|
||||
"unsupported driver kind: \"notExisting\". Supported drivers [kmod ebpf modern_ebpf gvisor auto], alias [module modern-bpf]"))
|
||||
}
|
||||
|
||||
func getKmodConfig(config interface{}) (kind string, bufSizePreset float64, dropFailedExit bool, err error) {
|
|
@ -0,0 +1,266 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
namespaceEnvVar = v1.EnvVar{
|
||||
Name: "FALCOCTL_DRIVER_CONFIG_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "",
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
}}
|
||||
|
||||
configmapEnvVar = v1.EnvVar{
|
||||
Name: "FALCOCTL_DRIVER_CONFIG_CONFIGMAP",
|
||||
Value: unit.ReleaseName + "-falco",
|
||||
}
|
||||
|
||||
updateConfigMapEnvVar = v1.EnvVar{
|
||||
Name: "FALCOCTL_DRIVER_CONFIG_UPDATE_FALCO",
|
||||
Value: "false",
|
||||
}
|
||||
)
|
||||
|
||||
// TestDriverLoaderEnabled tests the helper that enables the driver loader based on the configuration.
|
||||
func TestDriverLoaderEnabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(t *testing.T, initContainer any)
|
||||
}{
|
||||
{
|
||||
"defaultValues",
|
||||
nil,
|
||||
func(t *testing.T, initContainer any) {
|
||||
container, ok := initContainer.(v1.Container)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Contains(t, container.Args, "auto")
|
||||
require.True(t, *container.SecurityContext.Privileged)
|
||||
require.Contains(t, container.Env, namespaceEnvVar)
|
||||
require.Contains(t, container.Env, configmapEnvVar)
|
||||
require.NotContains(t, container.Env, updateConfigMapEnvVar)
|
||||
|
||||
// Check that the expected volumes are there.
|
||||
volumeMounts(t, container.VolumeMounts)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=modern-bpf",
|
||||
map[string]string{
|
||||
"driver.kind": "modern-bpf",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
require.Equal(t, initContainer, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=modern_ebpf",
|
||||
map[string]string{
|
||||
"driver.kind": "modern_ebpf",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
require.Equal(t, initContainer, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=gvisor",
|
||||
map[string]string{
|
||||
"driver.kind": "gvisor",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
require.Equal(t, initContainer, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.disabled",
|
||||
map[string]string{
|
||||
"driver.enabled": "false",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
require.Equal(t, initContainer, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.loader.disabled",
|
||||
map[string]string{
|
||||
"driver.loader.enabled": "false",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
require.Equal(t, initContainer, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=kmod",
|
||||
map[string]string{
|
||||
"driver.kind": "kmod",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
container, ok := initContainer.(v1.Container)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Contains(t, container.Args, "kmod")
|
||||
require.True(t, *container.SecurityContext.Privileged)
|
||||
require.NotContains(t, container.Env, namespaceEnvVar)
|
||||
require.NotContains(t, container.Env, configmapEnvVar)
|
||||
require.Contains(t, container.Env, updateConfigMapEnvVar)
|
||||
|
||||
// Check that the expected volumes are there.
|
||||
volumeMounts(t, container.VolumeMounts)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=module",
|
||||
map[string]string{
|
||||
"driver.kind": "module",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
container, ok := initContainer.(v1.Container)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Contains(t, container.Args, "kmod")
|
||||
require.True(t, *container.SecurityContext.Privileged)
|
||||
require.NotContains(t, container.Env, namespaceEnvVar)
|
||||
require.NotContains(t, container.Env, configmapEnvVar)
|
||||
require.Contains(t, container.Env, updateConfigMapEnvVar)
|
||||
|
||||
// Check that the expected volumes are there.
|
||||
volumeMounts(t, container.VolumeMounts)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=ebpf",
|
||||
map[string]string{
|
||||
"driver.kind": "ebpf",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
container, ok := initContainer.(v1.Container)
|
||||
require.True(t, ok)
|
||||
|
||||
require.Contains(t, container.Args, "ebpf")
|
||||
require.Nil(t, container.SecurityContext)
|
||||
require.NotContains(t, container.Env, namespaceEnvVar)
|
||||
require.Contains(t, container.Env, updateConfigMapEnvVar)
|
||||
require.NotContains(t, container.Env, configmapEnvVar)
|
||||
|
||||
// Check that the expected volumes are there.
|
||||
volumeMounts(t, container.VolumeMounts)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver.kind=kmod&driver.loader.disabled",
|
||||
map[string]string{
|
||||
"driver.kind": "kmod",
|
||||
"driver.loader.enabled": "false",
|
||||
},
|
||||
func(t *testing.T, initContainer any) {
|
||||
require.Equal(t, initContainer, nil)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
|
||||
|
||||
var ds appsv1.DaemonSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ds)
|
||||
for i := range ds.Spec.Template.Spec.InitContainers {
|
||||
if ds.Spec.Template.Spec.InitContainers[i].Name == "falco-driver-loader" {
|
||||
testCase.expected(t, ds.Spec.Template.Spec.InitContainers[i])
|
||||
return
|
||||
}
|
||||
}
|
||||
testCase.expected(t, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// volumenMounts checks that the expected volume mounts have been configured.
|
||||
func volumeMounts(t *testing.T, volumeMounts []v1.VolumeMount) {
|
||||
rootFalcoFS := v1.VolumeMount{
|
||||
Name: "root-falco-fs",
|
||||
ReadOnly: false,
|
||||
MountPath: "/root/.falco",
|
||||
}
|
||||
require.Contains(t, volumeMounts, rootFalcoFS)
|
||||
|
||||
procFS := v1.VolumeMount{
|
||||
Name: "proc-fs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/host/proc",
|
||||
}
|
||||
require.Contains(t, volumeMounts, procFS)
|
||||
|
||||
bootFS := v1.VolumeMount{
|
||||
Name: "boot-fs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/host/boot",
|
||||
}
|
||||
require.Contains(t, volumeMounts, bootFS)
|
||||
|
||||
libModulesFS := v1.VolumeMount{
|
||||
Name: "lib-modules",
|
||||
ReadOnly: false,
|
||||
MountPath: "/host/lib/modules",
|
||||
}
|
||||
require.Contains(t, volumeMounts, libModulesFS)
|
||||
|
||||
usrFS := v1.VolumeMount{
|
||||
Name: "usr-fs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/host/usr",
|
||||
}
|
||||
require.Contains(t, volumeMounts, usrFS)
|
||||
|
||||
etcFS := v1.VolumeMount{
|
||||
Name: "etc-fs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/host/etc",
|
||||
}
|
||||
require.Contains(t, volumeMounts, etcFS)
|
||||
|
||||
specializedFalcoConfigs := v1.VolumeMount{
|
||||
Name: "specialized-falco-configs",
|
||||
ReadOnly: false,
|
||||
MountPath: "/etc/falco/config.d",
|
||||
}
|
||||
require.Contains(t, volumeMounts, specializedFalcoConfigs)
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type grafanaDashboardsTemplateTest struct {
|
||||
suite.Suite
|
||||
chartPath string
|
||||
releaseName string
|
||||
namespace string
|
||||
templates []string
|
||||
}
|
||||
|
||||
func TestGrafanaDashboardsTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
chartFullPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
suite.Run(t, &grafanaDashboardsTemplateTest{
|
||||
Suite: suite.Suite{},
|
||||
chartPath: chartFullPath,
|
||||
releaseName: "falco-test-dashboard",
|
||||
namespace: "falco-test-dashboard",
|
||||
templates: []string{"templates/falco-dashboard-grafana.yaml"},
|
||||
})
|
||||
}
|
||||
|
||||
func (g *grafanaDashboardsTemplateTest) TestCreationDefaultValues() {
|
||||
// Render the dashboard configmap and check that it has not been rendered.
|
||||
_, err := helm.RenderTemplateE(g.T(), &helm.Options{}, g.chartPath, g.releaseName, g.templates, fmt.Sprintf("--namespace=%s", g.namespace))
|
||||
g.Error(err, "should error")
|
||||
g.Equal("error while running command: exit status 1; Error: could not find template templates/falco-dashboard-grafana.yaml in chart", err.Error())
|
||||
}
|
||||
|
||||
func (g *grafanaDashboardsTemplateTest) TestConfig() {
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(cm *corev1.ConfigMap)
|
||||
}{
|
||||
{"dashboard enabled",
|
||||
map[string]string{
|
||||
"grafana.dashboards.enabled": "true",
|
||||
},
|
||||
func(cm *corev1.ConfigMap) {
|
||||
// Check that the name is the expected one.
|
||||
g.Equal("falco-grafana-dashboard", cm.Name)
|
||||
// Check the namespace.
|
||||
g.Equal(g.namespace, cm.Namespace)
|
||||
g.Nil(cm.Annotations)
|
||||
},
|
||||
},
|
||||
{"namespace",
|
||||
map[string]string{
|
||||
"grafana.dashboards.enabled": "true",
|
||||
"grafana.dashboards.configMaps.falco.namespace": "custom-namespace",
|
||||
},
|
||||
func(cm *corev1.ConfigMap) {
|
||||
// Check that the name is the expected one.
|
||||
g.Equal("falco-grafana-dashboard", cm.Name)
|
||||
// Check the namespace.
|
||||
g.Equal("custom-namespace", cm.Namespace)
|
||||
g.Nil(cm.Annotations)
|
||||
},
|
||||
},
|
||||
{"folder",
|
||||
map[string]string{
|
||||
"grafana.dashboards.enabled": "true",
|
||||
"grafana.dashboards.configMaps.falco.folder": "custom-folder",
|
||||
},
|
||||
func(cm *corev1.ConfigMap) {
|
||||
// Check that the name is the expected one.
|
||||
g.Equal("falco-grafana-dashboard", cm.Name)
|
||||
g.NotNil(cm.Annotations)
|
||||
g.Len(cm.Annotations, 2)
|
||||
// Check sidecar annotation.
|
||||
val, ok := cm.Annotations["k8s-sidecar-target-directory"]
|
||||
g.True(ok)
|
||||
g.Equal("/tmp/dashboards/custom-folder", val)
|
||||
// Check grafana annotation.
|
||||
val, ok = cm.Annotations["grafana_dashboard_folder"]
|
||||
g.True(ok)
|
||||
g.Equal("custom-folder", val)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
|
||||
g.Run(testCase.name, func() {
|
||||
subT := g.T()
|
||||
subT.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
|
||||
// Render the configmap unmarshal it.
|
||||
output, err := helm.RenderTemplateE(subT, options, g.chartPath, g.releaseName, g.templates, "--namespace="+g.namespace)
|
||||
g.NoError(err, "should succeed")
|
||||
var cfgMap corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(subT, output, &cfgMap)
|
||||
|
||||
// Common checks
|
||||
// Check that contains the right label.
|
||||
g.Contains(cfgMap.Labels, "grafana_dashboard")
|
||||
// Check that the dashboard is contained in the config map.
|
||||
file, err := os.Open("../../../dashboards/falco-dashboard.json")
|
||||
g.NoError(err)
|
||||
content, err := io.ReadAll(file)
|
||||
g.NoError(err)
|
||||
cfgData, ok := cfgMap.Data["falco-dashboard.json"]
|
||||
g.True(ok)
|
||||
g.Equal(strings.TrimRight(string(content), "\n"), cfgData)
|
||||
testCase.expected(&cfgMap)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,210 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type metricsConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
ConvertMemoryToMB bool `yaml:"convert_memory_to_mb"`
|
||||
IncludeEmptyValues bool `yaml:"include_empty_values"`
|
||||
KernelEventCountersEnabled bool `yaml:"kernel_event_counters_enabled"`
|
||||
KernelEventCountersPerCPUEnabled bool `yaml:"kernel_event_counters_per_cpu_enabled"`
|
||||
ResourceUtilizationEnabled bool `yaml:"resource_utilization_enabled"`
|
||||
RulesCountersEnabled bool `yaml:"rules_counters_enabled"`
|
||||
LibbpfStatsEnabled bool `yaml:"libbpf_stats_enabled"`
|
||||
OutputRule bool `yaml:"output_rule"`
|
||||
StateCountersEnabled bool `yaml:"state_counters_enabled"`
|
||||
Interval string `yaml:"interval"`
|
||||
}
|
||||
|
||||
type webServerConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
K8sHealthzEndpoint string `yaml:"k8s_healthz_endpoint"`
|
||||
ListenPort string `yaml:"listen_port"`
|
||||
PrometheusMetricsEnabled bool `yaml:"prometheus_metrics_enabled"`
|
||||
SSLCertificate string `yaml:"ssl_certificate"`
|
||||
SSLEnabled bool `yaml:"ssl_enabled"`
|
||||
Threadiness int `yaml:"threadiness"`
|
||||
}
|
||||
|
||||
func TestMetricsConfigInFalcoConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
values map[string]string
|
||||
expected func(t *testing.T, metricsConfig, webServerConfig any)
|
||||
}{
|
||||
{
|
||||
"defaultValues",
|
||||
nil,
|
||||
func(t *testing.T, metricsConfig, webServerConfig any) {
|
||||
require.Len(t, metricsConfig, 11, "should have ten items")
|
||||
|
||||
metrics, err := getMetricsConfig(metricsConfig)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, metrics)
|
||||
require.True(t, metrics.ConvertMemoryToMB)
|
||||
require.False(t, metrics.Enabled)
|
||||
require.False(t, metrics.IncludeEmptyValues)
|
||||
require.True(t, metrics.KernelEventCountersEnabled)
|
||||
require.True(t, metrics.ResourceUtilizationEnabled)
|
||||
require.True(t, metrics.RulesCountersEnabled)
|
||||
require.Equal(t, "1h", metrics.Interval)
|
||||
require.True(t, metrics.LibbpfStatsEnabled)
|
||||
require.True(t, metrics.OutputRule)
|
||||
require.True(t, metrics.StateCountersEnabled)
|
||||
require.False(t, metrics.KernelEventCountersPerCPUEnabled)
|
||||
|
||||
webServer, err := getWebServerConfig(webServerConfig)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, webServer)
|
||||
require.True(t, webServer.Enabled)
|
||||
require.False(t, webServer.PrometheusMetricsEnabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
"metricsEnabled",
|
||||
map[string]string{
|
||||
"metrics.enabled": "true",
|
||||
},
|
||||
func(t *testing.T, metricsConfig, webServerConfig any) {
|
||||
require.Len(t, metricsConfig, 11, "should have ten items")
|
||||
|
||||
metrics, err := getMetricsConfig(metricsConfig)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, metrics)
|
||||
require.True(t, metrics.ConvertMemoryToMB)
|
||||
require.True(t, metrics.Enabled)
|
||||
require.False(t, metrics.IncludeEmptyValues)
|
||||
require.True(t, metrics.KernelEventCountersEnabled)
|
||||
require.True(t, metrics.ResourceUtilizationEnabled)
|
||||
require.True(t, metrics.RulesCountersEnabled)
|
||||
require.Equal(t, "1h", metrics.Interval)
|
||||
require.True(t, metrics.LibbpfStatsEnabled)
|
||||
require.False(t, metrics.OutputRule)
|
||||
require.True(t, metrics.StateCountersEnabled)
|
||||
require.False(t, metrics.KernelEventCountersPerCPUEnabled)
|
||||
|
||||
webServer, err := getWebServerConfig(webServerConfig)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, webServer)
|
||||
require.True(t, webServer.Enabled)
|
||||
require.True(t, webServer.PrometheusMetricsEnabled)
|
||||
},
|
||||
},
|
||||
{
|
||||
"Flip/Change Values",
|
||||
map[string]string{
|
||||
"metrics.enabled": "true",
|
||||
"metrics.convertMemoryToMB": "false",
|
||||
"metrics.includeEmptyValues": "true",
|
||||
"metrics.kernelEventCountersEnabled": "false",
|
||||
"metrics.resourceUtilizationEnabled": "false",
|
||||
"metrics.rulesCountersEnabled": "false",
|
||||
"metrics.libbpfStatsEnabled": "false",
|
||||
"metrics.outputRule": "false",
|
||||
"metrics.stateCountersEnabled": "false",
|
||||
"metrics.interval": "1s",
|
||||
"metrics.kernelEventCountersPerCPUEnabled": "true",
|
||||
},
|
||||
func(t *testing.T, metricsConfig, webServerConfig any) {
|
||||
require.Len(t, metricsConfig, 11, "should have ten items")
|
||||
|
||||
metrics, err := getMetricsConfig(metricsConfig)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, metrics)
|
||||
require.False(t, metrics.ConvertMemoryToMB)
|
||||
require.True(t, metrics.Enabled)
|
||||
require.True(t, metrics.IncludeEmptyValues)
|
||||
require.False(t, metrics.KernelEventCountersEnabled)
|
||||
require.False(t, metrics.ResourceUtilizationEnabled)
|
||||
require.False(t, metrics.RulesCountersEnabled)
|
||||
require.Equal(t, "1s", metrics.Interval)
|
||||
require.False(t, metrics.LibbpfStatsEnabled)
|
||||
require.False(t, metrics.OutputRule)
|
||||
require.False(t, metrics.StateCountersEnabled)
|
||||
require.True(t, metrics.KernelEventCountersPerCPUEnabled)
|
||||
|
||||
webServer, err := getWebServerConfig(webServerConfig)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, webServer)
|
||||
require.True(t, webServer.Enabled)
|
||||
require.True(t, webServer.PrometheusMetricsEnabled)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
||||
var config map[string]interface{}
|
||||
|
||||
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
|
||||
metrics := config["metrics"]
|
||||
webServer := config["webserver"]
|
||||
testCase.expected(t, metrics, webServer)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getMetricsConfig(config any) (*metricsConfig, error) {
|
||||
var metrics metricsConfig
|
||||
|
||||
metricsByte, err := yaml.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(metricsByte, &metrics); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metrics, nil
|
||||
}
|
||||
|
||||
func getWebServerConfig(config any) (*webServerConfig, error) {
|
||||
var webServer webServerConfig
|
||||
webServerByte, err := yaml.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := yaml.Unmarshal(webServerByte, &webServer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &webServer, nil
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package unit
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -12,7 +13,7 @@ import (
|
|||
func TestServiceAccount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -24,16 +25,16 @@ func TestServiceAccount(t *testing.T) {
|
|||
"defaultValues",
|
||||
nil,
|
||||
func(t *testing.T, sa *corev1.ServiceAccount) {
|
||||
require.Equal(t, sa.Name, "")
|
||||
require.Equal(t, sa.Name, "rendered-resources-falco")
|
||||
},
|
||||
},
|
||||
{
|
||||
"kind=kmod",
|
||||
"kind=auto",
|
||||
map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.create": "false",
|
||||
},
|
||||
func(t *testing.T, sa *corev1.ServiceAccount) {
|
||||
require.Equal(t, sa.Name, "rendered-resources-falco")
|
||||
require.Equal(t, sa.Name, "")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -45,7 +46,7 @@ func TestServiceAccount(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output, err := helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
|
||||
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/serviceaccount.yaml"})
|
||||
if err != nil {
|
||||
require.True(t, strings.Contains(err.Error(), "Error: could not find template templates/serviceaccount.yaml in chart"))
|
||||
}
|
|
@ -0,0 +1,160 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type serviceMonitorTemplateTest struct {
|
||||
suite.Suite
|
||||
chartPath string
|
||||
releaseName string
|
||||
namespace string
|
||||
templates []string
|
||||
}
|
||||
|
||||
func TestServiceMonitorTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
chartFullPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
suite.Run(t, &serviceMonitorTemplateTest{
|
||||
Suite: suite.Suite{},
|
||||
chartPath: chartFullPath,
|
||||
releaseName: "falco-test",
|
||||
namespace: "falco-namespace-test",
|
||||
templates: []string{"templates/serviceMonitor.yaml"},
|
||||
})
|
||||
}
|
||||
|
||||
func (s *serviceMonitorTemplateTest) TestCreationDefaultValues() {
|
||||
// Render the servicemonitor and check that it has not been rendered.
|
||||
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
|
||||
s.Error(err, "should error")
|
||||
s.Equal("error while running command: exit status 1; Error: could not find template templates/serviceMonitor.yaml in chart", err.Error())
|
||||
}
|
||||
|
||||
func (s *serviceMonitorTemplateTest) TestEndpoint() {
|
||||
defaultEndpointsJSON := `[
|
||||
{
|
||||
"port": "metrics",
|
||||
"interval": "15s",
|
||||
"scrapeTimeout": "10s",
|
||||
"honorLabels": true,
|
||||
"path": "/metrics",
|
||||
"scheme": "http"
|
||||
}
|
||||
]`
|
||||
var defaultEndpoints []monitoringv1.Endpoint
|
||||
err := json.Unmarshal([]byte(defaultEndpointsJSON), &defaultEndpoints)
|
||||
s.NoError(err)
|
||||
|
||||
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"}}
|
||||
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
|
||||
var svcMonitor monitoringv1.ServiceMonitor
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
|
||||
|
||||
s.Len(svcMonitor.Spec.Endpoints, 1, "should have only one endpoint")
|
||||
s.True(reflect.DeepEqual(svcMonitor.Spec.Endpoints[0], defaultEndpoints[0]))
|
||||
}
|
||||
|
||||
func (s *serviceMonitorTemplateTest) TestNamespaceSelector() {
|
||||
selectorsLabelJson := `{
|
||||
"app.kubernetes.io/instance": "my-falco",
|
||||
"foo": "bar"
|
||||
}`
|
||||
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"},
|
||||
SetJsonValues: map[string]string{"serviceMonitor.selector": selectorsLabelJson}}
|
||||
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
|
||||
var svcMonitor monitoringv1.ServiceMonitor
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
|
||||
s.Len(svcMonitor.Spec.NamespaceSelector.MatchNames, 1)
|
||||
s.Equal("default", svcMonitor.Spec.NamespaceSelector.MatchNames[0])
|
||||
}
|
||||
|
||||
func (s *serviceMonitorTemplateTest) TestServiceMonitorSelector() {
|
||||
testCases := []struct {
|
||||
name string
|
||||
values string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
"defaultValues",
|
||||
"",
|
||||
map[string]string{
|
||||
"app.kubernetes.io/instance": "falco-test",
|
||||
"app.kubernetes.io/name": "falco",
|
||||
"type": "falco-metrics",
|
||||
},
|
||||
},
|
||||
{
|
||||
"customValues",
|
||||
`{
|
||||
"foo": "bar"
|
||||
}`,
|
||||
map[string]string{
|
||||
"app.kubernetes.io/instance": "falco-test",
|
||||
"app.kubernetes.io/name": "falco",
|
||||
"foo": "bar",
|
||||
"type": "falco-metrics",
|
||||
},
|
||||
},
|
||||
{
|
||||
"overwriteDefaultValues",
|
||||
`{
|
||||
"app.kubernetes.io/instance": "falco-overwrite",
|
||||
"foo": "bar"
|
||||
}`,
|
||||
map[string]string{
|
||||
"app.kubernetes.io/instance": "falco-overwrite",
|
||||
"app.kubernetes.io/name": "falco",
|
||||
"foo": "bar",
|
||||
"type": "falco-metrics",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
|
||||
s.Run(testCase.name, func() {
|
||||
subT := s.T()
|
||||
subT.Parallel()
|
||||
|
||||
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"},
|
||||
SetJsonValues: map[string]string{"serviceMonitor.selector": testCase.values}}
|
||||
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
|
||||
var svcMonitor monitoringv1.ServiceMonitor
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
|
||||
|
||||
s.Equal(testCase.expected, svcMonitor.Spec.Selector.MatchLabels, "should be the same")
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright 2024 The Falco Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package falcoTemplates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type serviceTemplateTest struct {
|
||||
suite.Suite
|
||||
chartPath string
|
||||
releaseName string
|
||||
namespace string
|
||||
templates []string
|
||||
}
|
||||
|
||||
func TestServiceTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
chartFullPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
suite.Run(t, &serviceTemplateTest{
|
||||
Suite: suite.Suite{},
|
||||
chartPath: chartFullPath,
|
||||
releaseName: "falco-test",
|
||||
namespace: "falco-namespace-test",
|
||||
templates: []string{"templates/service.yaml"},
|
||||
})
|
||||
}
|
||||
|
||||
func (s *serviceTemplateTest) TestCreationDefaultValues() {
|
||||
// Render the service and check that it has not been rendered.
|
||||
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
|
||||
s.Error(err, "should error")
|
||||
s.Equal("error while running command: exit status 1; Error: could not find template templates/service.yaml in chart", err.Error())
|
||||
}
|
||||
|
||||
func (s *serviceTemplateTest) TestDefaultLabelsValues() {
|
||||
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true"}}
|
||||
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
s.NoError(err, "should render template")
|
||||
|
||||
cInfo, err := unit.ChartInfo(s.T(), s.chartPath)
|
||||
s.NoError(err)
|
||||
// Get app version.
|
||||
appVersion, found := cInfo["appVersion"]
|
||||
s.True(found, "should find app version in chart info")
|
||||
appVersion = appVersion.(string)
|
||||
// Get chart version.
|
||||
chartVersion, found := cInfo["version"]
|
||||
s.True(found, "should find chart version in chart info")
|
||||
// Get chart name.
|
||||
chartName, found := cInfo["name"]
|
||||
s.True(found, "should find chart name in chart info")
|
||||
chartName = chartName.(string)
|
||||
expectedLabels := map[string]string{
|
||||
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
|
||||
"app.kubernetes.io/name": chartName.(string),
|
||||
"app.kubernetes.io/instance": s.releaseName,
|
||||
"app.kubernetes.io/version": appVersion.(string),
|
||||
"app.kubernetes.io/managed-by": "Helm",
|
||||
"type": "falco-metrics",
|
||||
}
|
||||
var svc corev1.Service
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svc)
|
||||
labels := svc.GetLabels()
|
||||
for key, value := range labels {
|
||||
expectedVal := expectedLabels[key]
|
||||
s.Equal(expectedVal, value)
|
||||
}
|
||||
|
||||
for key, value := range expectedLabels {
|
||||
expectedVal := labels[key]
|
||||
s.Equal(expectedVal, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serviceTemplateTest) TestCustomLabelsValues() {
|
||||
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true",
|
||||
"metrics.service.labels.customLabel": "customLabelValues"}}
|
||||
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
|
||||
s.NoError(err, "should render template")
|
||||
|
||||
cInfo, err := unit.ChartInfo(s.T(), s.chartPath)
|
||||
s.NoError(err)
|
||||
// Get app version.
|
||||
appVersion, found := cInfo["appVersion"]
|
||||
s.True(found, "should find app version in chart info")
|
||||
appVersion = appVersion.(string)
|
||||
// Get chart version.
|
||||
chartVersion, found := cInfo["version"]
|
||||
s.True(found, "should find chart version in chart info")
|
||||
// Get chart name.
|
||||
chartName, found := cInfo["name"]
|
||||
s.True(found, "should find chart name in chart info")
|
||||
chartName = chartName.(string)
|
||||
expectedLabels := map[string]string{
|
||||
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
|
||||
"app.kubernetes.io/name": chartName.(string),
|
||||
"app.kubernetes.io/instance": s.releaseName,
|
||||
"app.kubernetes.io/version": appVersion.(string),
|
||||
"app.kubernetes.io/managed-by": "Helm",
|
||||
"type": "falco-metrics",
|
||||
"customLabel": "customLabelValues",
|
||||
}
|
||||
var svc corev1.Service
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svc)
|
||||
labels := svc.GetLabels()
|
||||
for key, value := range labels {
|
||||
expectedVal := expectedLabels[key]
|
||||
s.Equal(expectedVal, value)
|
||||
}
|
||||
|
||||
for key, value := range expectedLabels {
|
||||
expectedVal := labels[key]
|
||||
s.Equal(expectedVal, value)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *serviceTemplateTest) TestDefaultAnnotationsValues() {
|
||||
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true"}}
|
||||
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
|
||||
s.NoError(err)
|
||||
|
||||
var svc corev1.Service
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svc)
|
||||
s.Nil(svc.Annotations, "should be nil")
|
||||
}
|
||||
|
||||
func (s *serviceTemplateTest) TestCustomAnnotationsValues() {
|
||||
values := map[string]string{
|
||||
"metrics.enabled": "true",
|
||||
"metrics.service.annotations.annotation1": "customAnnotation1",
|
||||
"metrics.service.annotations.annotation2": "customAnnotation2",
|
||||
}
|
||||
annotations := map[string]string{
|
||||
"annotation1": "customAnnotation1",
|
||||
"annotation2": "customAnnotation2",
|
||||
}
|
||||
options := &helm.Options{SetValues: values}
|
||||
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
|
||||
s.NoError(err)
|
||||
|
||||
var svc corev1.Service
|
||||
helm.UnmarshalK8SYaml(s.T(), output, &svc)
|
||||
s.Len(svc.Annotations, 2)
|
||||
|
||||
for key, value := range svc.Annotations {
|
||||
expectedVal := annotations[key]
|
||||
s.Equal(expectedVal, value)
|
||||
}
|
||||
}
|
|
@ -13,7 +13,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package unit
|
||||
package k8smetaPlugin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -23,28 +23,29 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/falcosecurity/charts/charts/falco/tests/unit"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const chartPath = "../../"
|
||||
|
||||
// Using the default values we want to test that all the expected resources for the k8s-metacollector are rendered.
|
||||
func TestRenderedResourcesWithDefaultValues(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
options := &helm.Options{}
|
||||
// Template the chart using the default values.yaml file.
|
||||
output, err := helm.RenderTemplateE(t, options, helmChartPath, releaseName, nil)
|
||||
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract all rendered files from the output.
|
||||
re := regexp.MustCompile(patternK8sMetacollectorFiles)
|
||||
re := regexp.MustCompile(unit.PatternK8sMetacollectorFiles)
|
||||
matches := re.FindAllStringSubmatch(output, -1)
|
||||
require.Len(t, matches, 0)
|
||||
|
||||
|
@ -53,7 +54,7 @@ func TestRenderedResourcesWithDefaultValues(t *testing.T) {
|
|||
func TestRenderedResourcesWhenNotEnabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Template files that we expect to be rendered.
|
||||
|
@ -72,11 +73,11 @@ func TestRenderedResourcesWhenNotEnabled(t *testing.T) {
|
|||
}}
|
||||
|
||||
// Template the chart using the default values.yaml file.
|
||||
output, err := helm.RenderTemplateE(t, options, helmChartPath, releaseName, nil)
|
||||
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract all rendered files from the output.
|
||||
re := regexp.MustCompile(patternK8sMetacollectorFiles)
|
||||
re := regexp.MustCompile(unit.PatternK8sMetacollectorFiles)
|
||||
matches := re.FindAllStringSubmatch(output, -1)
|
||||
|
||||
var renderedTemplates []string
|
||||
|
@ -98,7 +99,7 @@ func TestRenderedResourcesWhenNotEnabled(t *testing.T) {
|
|||
func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -114,6 +115,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
|
@ -123,8 +125,13 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
|
||||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", releaseName), hostName.(string))
|
||||
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
require.Equal(t, "libk8smeta.so", libPath)
|
||||
|
@ -140,6 +147,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
|
@ -149,7 +157,13 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
|
||||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.test.svc", releaseName), hostName.(string))
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.test.svc", unit.ReleaseName), hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
|
@ -166,6 +180,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
|
@ -176,6 +191,12 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, "collector.default.svc", hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
|
@ -194,6 +215,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
|
@ -204,6 +226,12 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, "collector.test.svc", hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
|
@ -220,6 +248,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
|
@ -230,6 +259,12 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, "test", hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
|
@ -249,6 +284,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
|
@ -259,6 +295,12 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, "test-with-override", hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
|
@ -285,7 +327,13 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
|
||||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", releaseName), hostName.(string))
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "info", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host", hostProc.(string))
|
||||
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
|
@ -293,7 +341,40 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"drive disabled",
|
||||
"set collector logger level and hostProc",
|
||||
map[string]string{
|
||||
"collectors.kubernetes.verbosity": "trace",
|
||||
"collectors.kubernetes.hostProc": "/host/test",
|
||||
},
|
||||
func(t *testing.T, config any) {
|
||||
plugin := config.(map[string]interface{})
|
||||
// Get init config.
|
||||
initConfig, ok := plugin["init_config"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
|
||||
initConfigMap := initConfig.(map[string]interface{})
|
||||
// Check that the collector port is correctly set.
|
||||
port := initConfigMap["collectorPort"]
|
||||
require.Equal(t, float64(45000), port.(float64))
|
||||
// Check that the collector nodeName is correctly set.
|
||||
nodeName := initConfigMap["nodeName"]
|
||||
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
|
||||
// Check that the collector hostname is correctly set.
|
||||
hostName := initConfigMap["collectorHostname"]
|
||||
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
|
||||
// Check that the loglevel has been set.
|
||||
verbosity := initConfigMap["verbosity"]
|
||||
require.Equal(t, "trace", verbosity.(string))
|
||||
// Check that host proc fs has been set.
|
||||
hostProc := initConfigMap["hostProc"]
|
||||
require.Equal(t, "/host/test", hostProc.(string))
|
||||
// Check that the library path is set.
|
||||
libPath := plugin["library_path"]
|
||||
require.Equal(t, "libk8smeta.so", libPath)
|
||||
},
|
||||
},
|
||||
{
|
||||
"driver disabled",
|
||||
map[string]string{
|
||||
"driver.enabled": "false",
|
||||
},
|
||||
|
@ -317,7 +398,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
options := &helm.Options{SetValues: testCase.values}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/configmap.yaml"})
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
||||
|
@ -329,7 +410,7 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
found := false
|
||||
// Find the k8smeta plugin configuration.
|
||||
for _, plugin := range pluginsArray {
|
||||
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == k8sMetaPluginName {
|
||||
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.K8sMetaPluginName {
|
||||
testCase.expected(t, plugin)
|
||||
found = true
|
||||
}
|
||||
|
@ -337,11 +418,11 @@ func TestPluginConfigurationInFalcoConfig(t *testing.T) {
|
|||
if found {
|
||||
// Check that the plugin has been added to the ones that need to be loaded.
|
||||
loadplugins := config["load_plugins"]
|
||||
require.True(t, slices.Contains(loadplugins.([]interface{}), k8sMetaPluginName))
|
||||
require.True(t, slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
|
||||
} else {
|
||||
testCase.expected(t, nil)
|
||||
loadplugins := config["load_plugins"]
|
||||
require.True(t, !slices.Contains(loadplugins.([]interface{}), k8sMetaPluginName))
|
||||
require.True(t, !slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -375,21 +456,68 @@ func TestPluginConfigurationUniqueEntries(t *testing.T) {
|
|||
},
|
||||
"library_path": "libk8smeta.so",
|
||||
"name": "k8smeta"
|
||||
},
|
||||
{
|
||||
"init_config": {
|
||||
"engines": {
|
||||
"bpm": {
|
||||
"enabled": false
|
||||
},
|
||||
"containerd": {
|
||||
"enabled": true,
|
||||
"sockets": [
|
||||
"/run/containerd/containerd.sock"
|
||||
]
|
||||
},
|
||||
"cri": {
|
||||
"enabled": true,
|
||||
"sockets": [
|
||||
"/run/crio/crio.sock"
|
||||
]
|
||||
},
|
||||
"docker": {
|
||||
"enabled": true,
|
||||
"sockets": [
|
||||
"/var/run/docker.sock"
|
||||
]
|
||||
},
|
||||
"libvirt_lxc": {
|
||||
"enabled": false
|
||||
},
|
||||
"lxc": {
|
||||
"enabled": false
|
||||
},
|
||||
"podman": {
|
||||
"enabled": false,
|
||||
"sockets": [
|
||||
"/run/podman/podman.sock"
|
||||
]
|
||||
}
|
||||
},
|
||||
"hooks": [
|
||||
"create"
|
||||
],
|
||||
"label_max_len": 100,
|
||||
"with_size": false
|
||||
},
|
||||
"library_path": "libcontainer.so",
|
||||
"name": "container"
|
||||
}
|
||||
]`
|
||||
|
||||
loadPluginsJSON := `[
|
||||
"k8smeta",
|
||||
"k8saudit"
|
||||
"k8saudit",
|
||||
"container"
|
||||
]`
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
options := &helm.Options{SetJsonValues: map[string]string{
|
||||
"falco.plugins": pluginsJSON,
|
||||
"falco.load_plugins": loadPluginsJSON,
|
||||
}, SetValues: map[string]string{"collectors.kubernetes.enabled": "true"}}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/configmap.yaml"})
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
||||
|
@ -405,7 +533,7 @@ func TestPluginConfigurationUniqueEntries(t *testing.T) {
|
|||
// Find the k8smeta plugin configuration.
|
||||
numConfigK8smeta := 0
|
||||
for _, plugin := range pluginsArray {
|
||||
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == k8sMetaPluginName {
|
||||
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.K8sMetaPluginName {
|
||||
numConfigK8smeta++
|
||||
}
|
||||
}
|
||||
|
@ -414,8 +542,8 @@ func TestPluginConfigurationUniqueEntries(t *testing.T) {
|
|||
|
||||
// Check that the plugin has been added to the ones that need to be loaded.
|
||||
loadplugins := config["load_plugins"]
|
||||
require.Len(t, loadplugins.([]interface{}), 2)
|
||||
require.True(t, slices.Contains(loadplugins.([]interface{}), k8sMetaPluginName))
|
||||
require.Len(t, loadplugins.([]interface{}), 3)
|
||||
require.True(t, slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
|
||||
}
|
||||
|
||||
// Test that the helper does not overwrite user's configuration.
|
||||
|
@ -460,9 +588,10 @@ func TestFalcoctlRefs(t *testing.T) {
|
|||
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
|
||||
// Test plugin reference.
|
||||
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
|
||||
require.Len(t, refs, 2)
|
||||
require.True(t, slices.Contains(refs, "falco-rules:3"))
|
||||
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0"))
|
||||
require.Len(t, refs, 3)
|
||||
require.True(t, slices.Contains(refs, "falco-rules:4"))
|
||||
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"))
|
||||
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -498,7 +627,7 @@ func TestFalcoctlRefs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
helmChartPath, err := filepath.Abs(chartPath)
|
||||
helmChartPath, err := filepath.Abs(unit.ChartPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
@ -508,7 +637,7 @@ func TestFalcoctlRefs(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
options := &helm.Options{SetJsonValues: testCase.valuesJSON, SetValues: map[string]string{"collectors.kubernetes.enabled": "true"}}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/falcoctl-configmap.yaml"})
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/falcoctl-configmap.yaml"})
|
||||
|
||||
var cm corev1.ConfigMap
|
||||
helm.UnmarshalK8SYaml(t, output, &cm)
|
|
@ -53,11 +53,11 @@ falcoctl:
|
|||
install:
|
||||
# -- List of artifacts to be installed by the falcoctl init container.
|
||||
# We do not recommend installing (or following) plugins for security reasons since they are executable objects.
|
||||
refs: [falco-rules:3]
|
||||
refs: [falco-rules:4]
|
||||
follow:
|
||||
# -- List of artifacts to be followed by the falcoctl sidecar container.
|
||||
# We do not recommend installing (or following) plugins for security reasons since they are executable objects.
|
||||
refs: [falco-rules:3]
|
||||
refs: [falco-rules:4]
|
||||
|
||||
# Set this to true to force Falco so output the logs as soon as they are emmitted.
|
||||
tty: false
|
||||
|
|
|
@ -14,7 +14,6 @@ controller:
|
|||
# For more info check the section on Plugins in the README.md file.
|
||||
replicas: 1
|
||||
|
||||
|
||||
falcoctl:
|
||||
artifact:
|
||||
install:
|
||||
|
@ -27,10 +26,10 @@ falcoctl:
|
|||
artifact:
|
||||
install:
|
||||
# -- List of artifacts to be installed by the falcoctl init container.
|
||||
refs: [k8saudit-rules:0.7]
|
||||
refs: [k8saudit-rules:0.11, k8saudit:0.11]
|
||||
follow:
|
||||
# -- List of artifacts to be followed by the falcoctl sidecar container.
|
||||
refs: [k8saudit-rules:0.7]
|
||||
refs: [k8saudit-rules:0.11]
|
||||
|
||||
services:
|
||||
- name: k8saudit-webhook
|
||||
|
@ -41,7 +40,7 @@ services:
|
|||
protocol: TCP
|
||||
|
||||
falco:
|
||||
rules_file:
|
||||
rules_files:
|
||||
- /etc/falco/k8s_audit_rules.yaml
|
||||
- /etc/falco/rules.d
|
||||
plugins:
|
||||
|
|
|
@ -30,10 +30,10 @@ falcoctl:
|
|||
artifact:
|
||||
install:
|
||||
# -- List of artifacts to be installed by the falcoctl init container.
|
||||
refs: [falco-rules:3, k8saudit-rules:0.7]
|
||||
refs: [falco-rules:4, k8saudit-rules:0.11, k8saudit:0.11]
|
||||
follow:
|
||||
# -- List of artifacts to be followed by the falcoctl sidecar container.
|
||||
refs: [falco-rules:3, k8saudit-rules:0.7]
|
||||
refs: [falco-rules:4, k8saudit-rules:0.11, k8saudit:0.11]
|
||||
|
||||
services:
|
||||
- name: k8saudit-webhook
|
||||
|
@ -44,7 +44,7 @@ services:
|
|||
protocol: TCP
|
||||
|
||||
falco:
|
||||
rules_file:
|
||||
rules_files:
|
||||
- /etc/falco/falco_rules.yaml
|
||||
- /etc/falco/k8s_audit_rules.yaml
|
||||
- /etc/falco/rules.d
|
||||
|
|
|
@ -10,7 +10,7 @@ image:
|
|||
# -- The image registry to pull from.
|
||||
registry: docker.io
|
||||
# -- The image repository to pull from
|
||||
repository: falcosecurity/falco-no-driver
|
||||
repository: falcosecurity/falco
|
||||
# -- The image tag to pull. Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
|
@ -27,14 +27,20 @@ namespaceOverride: ""
|
|||
podAnnotations: {}
|
||||
|
||||
serviceAccount:
|
||||
# -- Secrets containing credentials when pulling from private/secure registries.
|
||||
imagePullSecrets: []
|
||||
# -- Specifies whether a service account should be created.
|
||||
create: false
|
||||
create: true
|
||||
# -- Annotations to add to the service account.
|
||||
annotations: {}
|
||||
# -- The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
rbac:
|
||||
# Create and use rbac resources when set to true. Needed to list and update configmaps in Falco's namespace.
|
||||
create: true
|
||||
|
||||
# -- Add additional pod labels
|
||||
podLabels: {}
|
||||
|
||||
|
@ -135,6 +141,8 @@ controller:
|
|||
kind: daemonset
|
||||
# Annotations to add to the daemonset or deployment
|
||||
annotations: {}
|
||||
# -- Extra labels to add to the daemonset or deployment
|
||||
labels: {}
|
||||
daemonset:
|
||||
updateStrategy:
|
||||
# You can also customize maxUnavailable or minReadySeconds if you
|
||||
|
@ -160,14 +168,111 @@ services:
|
|||
# nodePort: 30007
|
||||
# protocol: TCP
|
||||
|
||||
# -- metrics configures Falco to enable and expose the metrics.
|
||||
metrics:
|
||||
# -- enabled specifies whether the metrics should be enabled.
|
||||
enabled: false
|
||||
# -- interval is stats interval in Falco follows the time duration definitions
|
||||
# used by Prometheus.
|
||||
# https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations
|
||||
# Time durations are specified as a number, followed immediately by one of the
|
||||
# following units:
|
||||
# ms - millisecond
|
||||
# s - second
|
||||
# m - minute
|
||||
# h - hour
|
||||
# d - day - assuming a day has always 24h
|
||||
# w - week - assuming a week has always 7d
|
||||
# y - year - assuming a year has always 365d
|
||||
# Example of a valid time duration: 1h30m20s10ms
|
||||
# A minimum interval of 100ms is enforced for metric collection. However, for
|
||||
# production environments, we recommend selecting one of the following intervals
|
||||
# for optimal monitoring:
|
||||
# 15m
|
||||
# 30m
|
||||
# 1h
|
||||
# 4h
|
||||
# 6h
|
||||
interval: 1h
|
||||
# -- outputRule enables seamless metrics and performance monitoring, we
|
||||
# recommend emitting metrics as the rule "Falco internal: metrics snapshot".
|
||||
# This option is particularly useful when Falco logs are preserved in a data
|
||||
# lake. Please note that to use this option, the Falco rules config `priority`
|
||||
# must be set to `info` at a minimum.
|
||||
outputRule: false
|
||||
# -- rulesCountersEnabled specifies whether the counts for each rule should be emitted.
|
||||
rulesCountersEnabled: true
|
||||
# -- resourceUtilizationEnabled`: Emit CPU and memory usage metrics. CPU usage
|
||||
# is reported as a percentage of one CPU and can be normalized to the total
|
||||
# number of CPUs to determine overall usage. Memory metrics are provided in raw
|
||||
# units (`kb` for `RSS`, `PSS` and `VSZ` or `bytes` for `container_memory_used`)
|
||||
# and can be uniformly converted to megabytes (MB) using the
|
||||
# `convert_memory_to_mb` functionality. In environments such as Kubernetes when
|
||||
# deployed as daemonset, it is crucial to track Falco's container memory usage.
|
||||
# To customize the path of the memory metric file, you can create an environment
|
||||
# variable named `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By
|
||||
# default, Falco uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to
|
||||
# monitor container memory usage, which aligns with Kubernetes'
|
||||
# `container_memory_working_set_bytes` metric. Finally, we emit the overall host
|
||||
# CPU and memory usages, along with the total number of processes and open file
|
||||
# descriptors (fds) on the host, obtained from the proc file system unrelated to
|
||||
# Falco's monitoring. These metrics help assess Falco's usage in relation to the
|
||||
# server's workload intensity.
|
||||
resourceUtilizationEnabled: true
|
||||
# stateCountersEnabled emits counters related to Falco's state engine, including
|
||||
# added, removed threads or file descriptors (fds), and failed lookup, store, or
|
||||
# retrieve actions in relation to Falco's underlying process cache table (threadtable).
|
||||
# We also log the number of currently cached containers if applicable.
|
||||
stateCountersEnabled: true
|
||||
# kernelEventCountersEnabled emits kernel side event and drop counters, as
|
||||
# an alternative to `syscall_event_drops`, but with some differences. These
|
||||
# counters reflect monotonic values since Falco's start and are exported at a
|
||||
# constant stats interval.
|
||||
kernelEventCountersEnabled: true
|
||||
# -- libbpfStatsEnabled exposes statistics similar to `bpftool prog show`,
|
||||
# providing information such as the number of invocations of each BPF program
|
||||
# attached by Falco and the time spent in each program measured in nanoseconds.
|
||||
# To enable this feature, the kernel must be >= 5.1, and the kernel
|
||||
# configuration `/proc/sys/kernel/bpf_stats_enabled` must be set. This option,
|
||||
# or an equivalent statistics feature, is not available for non `*bpf*` drivers.
|
||||
# Additionally, please be aware that the current implementation of `libbpf` does
|
||||
# not support granularity of statistics at the bpf tail call level.
|
||||
libbpfStatsEnabled: true
|
||||
# -- convertMemoryToMB specifies whether the memory should be converted to mb.
|
||||
convertMemoryToMB: true
|
||||
# -- includeEmptyValues specifies whether the empty values should be included in the metrics.
|
||||
includeEmptyValues: false
|
||||
# -- kernelEventCountersPerCPUEnabled specifies whether the event counters per cpu should be enabled.
|
||||
kernelEventCountersPerCPUEnabled: false
|
||||
# -- service exposes the metrics service to be accessed from within the cluster.
|
||||
# ref: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# -- create specifies whether a service should be created.
|
||||
create: true
|
||||
# -- type denotes the service type. Setting it to "ClusterIP" we ensure that are accessible
|
||||
# from within the cluster.
|
||||
type: ClusterIP
|
||||
# -- labels to add to the service.
|
||||
labels: {}
|
||||
# -- annotations to add to the service.
|
||||
annotations: {}
|
||||
# -- ports denotes all the ports on which the Service will listen.
|
||||
ports:
|
||||
# -- metrics denotes a listening service named "metrics".
|
||||
metrics:
|
||||
# -- port is the port on which the Service will listen.
|
||||
port: 8765
|
||||
# -- targetPort is the port on which the Pod is listening.
|
||||
targetPort: 8765
|
||||
# -- protocol specifies the network protocol that the Service should use for the associated port.
|
||||
protocol: "TCP"
|
||||
|
||||
# File access configuration (scenario requirement)
|
||||
mounts:
|
||||
# -- A list of volumes you want to add to the Falco pods.
|
||||
volumes: []
|
||||
# -- A list of volumes you want to add to the Falco pods.
|
||||
volumeMounts: []
|
||||
# -- By default, `/proc` from the host is only mounted into the Falco pod when `driver.enabled` is set to `true`. This flag allows it to override this behaviour for edge cases where `/proc` is needed but syscall data source is not enabled at the same time (e.g. for specific plugins).
|
||||
enforceProcMount: false
|
||||
|
||||
# Driver settings (scenario requirement)
|
||||
driver:
|
||||
|
@ -175,7 +280,7 @@ driver:
|
|||
# Always set it to false when using Falco with plugins.
|
||||
enabled: true
|
||||
# -- kind tells Falco which driver to use. Available options: kmod (kernel driver), ebpf (eBPF probe), modern_ebpf (modern eBPF probe).
|
||||
kind: kmod
|
||||
kind: auto
|
||||
# -- kmod holds the configuration for the kernel module.
|
||||
kmod:
|
||||
# -- bufSizePreset determines the size of the shared space between Falco and its drivers.
|
||||
|
@ -196,7 +301,7 @@ driver:
|
|||
# Capabilities used: {CAP_SYS_RESOURCE, CAP_SYS_ADMIN, CAP_SYS_PTRACE}.
|
||||
# On kernel versions >= 5.8 'CAP_PERFMON' and 'CAP_BPF' could replace 'CAP_SYS_ADMIN' but please pay attention to the 'kernel.perf_event_paranoid' value on your system.
|
||||
# Usually 'kernel.perf_event_paranoid>2' means that you cannot use 'CAP_PERFMON' and you should fallback to 'CAP_SYS_ADMIN', but the behavior changes across different distros.
|
||||
# Read more on that here: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-1
|
||||
# Read more on that here: https://falco.org/docs/setup/container/#docker-least-privileged-ebpf-probe
|
||||
leastPrivileged: false
|
||||
# -- bufSizePreset determines the size of the shared space between Falco and its drivers.
|
||||
# This shared space serves as a temporary storage for syscall events.
|
||||
|
@ -207,7 +312,7 @@ driver:
|
|||
# -- Constrain Falco with capabilities instead of running a privileged container.
|
||||
# Ensure the modern bpf driver is enabled (i.e., setting the `driver.kind` option to `modern-bpf`).
|
||||
# Capabilities used: {CAP_SYS_RESOURCE, CAP_BPF, CAP_PERFMON, CAP_SYS_PTRACE}.
|
||||
# Read more on that here: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-2
|
||||
# Read more on that here: https://falco.org/docs/setup/container/#docker-least-privileged-ebpf-probe
|
||||
leastPrivileged: false
|
||||
# -- bufSizePreset determines the size of the shared space between Falco and its drivers.
|
||||
# This shared space serves as a temporary storage for syscall events.
|
||||
|
@ -257,24 +362,73 @@ collectors:
|
|||
# -- Enable/disable all the metadata collectors.
|
||||
enabled: true
|
||||
|
||||
# -- This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead.
|
||||
docker:
|
||||
# -- Enable Docker support.
|
||||
enabled: true
|
||||
enabled: false
|
||||
# -- The path of the Docker daemon socket.
|
||||
socket: /var/run/docker.sock
|
||||
|
||||
# -- This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead.
|
||||
containerd:
|
||||
# -- Enable ContainerD support.
|
||||
enabled: true
|
||||
enabled: false
|
||||
# -- The path of the ContainerD socket.
|
||||
socket: /run/containerd/containerd.sock
|
||||
socket: /run/host-containerd/containerd.sock
|
||||
|
||||
# -- This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead.
|
||||
crio:
|
||||
# -- Enable CRI-O support.
|
||||
enabled: true
|
||||
enabled: false
|
||||
# -- The path of the CRI-O socket.
|
||||
socket: /run/crio/crio.sock
|
||||
|
||||
# -- This collector is the new container engine collector that replaces the old docker, containerd, crio and podman collectors.
|
||||
# It is designed to collect metadata from various container engines and provide a unified interface through the container plugin.
|
||||
# When enabled, it will deploy the container plugin and use it to collect metadata from the container engines.
|
||||
# Keep in mind that the old collectors (docker, containerd, crio, podman) will use the container plugin to collect metadata under the hood.
|
||||
containerEngine:
|
||||
# -- Enable Container Engine support.
|
||||
enabled: true
|
||||
# -- pluginRef is the OCI reference for the container plugin. It could be a full reference such as
|
||||
# "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5". Or just name + tag: container:0.3.5.
|
||||
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"
|
||||
# -- labelMaxLen is the maximum length of the labels that can be used in the container plugin.
|
||||
# container labels larger than this value won't be collected.
|
||||
labelMaxLen: 100
|
||||
# -- withSize specifies whether to enable container size inspection, which is inherently slow.
|
||||
withSize: false
|
||||
# -- hooks specify the hooks that will be used to collect metadata from the container engine.
|
||||
# The available hooks are: create, start.
|
||||
hooks: ["create"]
|
||||
# -- engines specify the container engines that will be used to collect metadata.
|
||||
# See https://github.com/falcosecurity/plugins/blob/main/plugins/container/README.md#configuration
|
||||
engines:
|
||||
docker:
|
||||
enabled: true
|
||||
sockets: ["/var/run/docker.sock"]
|
||||
podman:
|
||||
enabled: true
|
||||
sockets: ["/run/podman/podman.sock"]
|
||||
containerd:
|
||||
enabled: true
|
||||
sockets: ["/run/host-containerd/containerd.sock"]
|
||||
cri:
|
||||
enabled: true
|
||||
sockets:
|
||||
[
|
||||
"/run/containerd/containerd.sock",
|
||||
"/run/crio/crio.sock",
|
||||
"/run/k3s/containerd/containerd.sock",
|
||||
"/run/host-containerd/containerd.sock",
|
||||
]
|
||||
lxc:
|
||||
enabled: true
|
||||
libvirt_lxc:
|
||||
enabled: true
|
||||
bpm:
|
||||
enabled: true
|
||||
|
||||
# -- kubernetes holds the configuration for the kubernetes collector. Starting from version 0.37.0 of Falco, the legacy
|
||||
# kubernetes client has been removed. A new standalone component named k8s-metacollector and a Falco plugin have been developed
|
||||
# to solve the issues that were present in the old implementation. More info here: https://github.com/falcosecurity/falco/issues/2973
|
||||
|
@ -289,7 +443,7 @@ collectors:
|
|||
enabled: false
|
||||
# --pluginRef is the OCI reference for the k8smeta plugin. It could be a full reference such as:
|
||||
# "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0". Or just name + tag: k8smeta:0.1.0.
|
||||
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0"
|
||||
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"
|
||||
# -- collectorHostname is the address of the k8s-metacollector. When not specified it will be set to match
|
||||
# k8s-metacollector service. e.x: falco-k8smetacollecto.falco.svc. If for any reason you need to override
|
||||
# it, make sure to set here the address of the k8s-metacollector.
|
||||
|
@ -299,7 +453,14 @@ collectors:
|
|||
# the value of the port named `broker-grpc` in k8s-metacollector.service.ports is used. The default values is 45000.
|
||||
# It is used by the k8smeta plugin to connect to the k8s-metacollector.
|
||||
collectorPort: ""
|
||||
|
||||
# verbosity level for the plugin logger: trace, debug, info, warning, error, critical.
|
||||
verbosity: info
|
||||
# The plugin needs to scan the '/proc' of the host on which is running.
|
||||
# In Falco usually we put the host '/proc' folder under '/host/proc' so
|
||||
# the default for this config is '/host'.
|
||||
# The path used here must not have a final '/'.
|
||||
# Deprecated since falco 0.41.0 and k8smeta 0.3.0.
|
||||
hostProc: /host
|
||||
|
||||
###########################
|
||||
# Extras and customization #
|
||||
|
@ -313,6 +474,9 @@ extra:
|
|||
# -- Additional initContainers for Falco pods.
|
||||
initContainers: []
|
||||
|
||||
# -- Override hostname in falco pod
|
||||
podHostname:
|
||||
|
||||
# -- certificates used by webserver and grpc server.
|
||||
# paste certificate content or use helm with --set-file
|
||||
# or use existing secret containing key, crt, ca as well as pem bundle
|
||||
|
@ -351,7 +515,7 @@ customRules:
|
|||
# Falco integrations #
|
||||
########################
|
||||
|
||||
# -- For configuration values, see https://github.com/falcosecurity/charts/blob/master/falcosidekick/values.yaml
|
||||
# -- For configuration values, see https://github.com/falcosecurity/charts/blob/master/charts/falcosidekick/values.yaml
|
||||
falcosidekick:
|
||||
# -- Enable falcosidekick deployment.
|
||||
enabled: false
|
||||
|
@ -360,6 +524,14 @@ falcosidekick:
|
|||
# -- Listen port. Default value: 2801
|
||||
listenPort: ""
|
||||
|
||||
# -- Enable the response actions using Falco Talon.
|
||||
responseActions:
|
||||
enabled: false
|
||||
|
||||
# -- For configuration values, see https://github.com/falcosecurity/charts/blob/master/charts/falco-talon/values.yaml
|
||||
# -- It must be used in conjunction with the response_actions.enabled option.
|
||||
falco-talon: {}
|
||||
|
||||
####################
|
||||
# falcoctl config #
|
||||
####################
|
||||
|
@ -372,7 +544,7 @@ falcoctl:
|
|||
# -- The image repository to pull from.
|
||||
repository: falcosecurity/falcoctl
|
||||
# -- The image tag to pull.
|
||||
tag: "0.7.1"
|
||||
tag: "0.11.2"
|
||||
artifact:
|
||||
# -- Runs "falcoctl artifact install" command as an init container. It is used to install artfacts before
|
||||
# Falco starts. It provides them to Falco by using an emptyDir volume.
|
||||
|
@ -412,8 +584,8 @@ falcoctl:
|
|||
# -- List of indexes that falcoctl downloads and uses to locate and download artiafcts. For more info see:
|
||||
# https://github.com/falcosecurity/falcoctl/blob/main/proposals/20220916-rules-and-plugin-distribution.md#index-file-overview
|
||||
indexes:
|
||||
- name: falcosecurity
|
||||
url: https://falcosecurity.github.io/falcoctl/index.yaml
|
||||
- name: falcosecurity
|
||||
url: https://falcosecurity.github.io/falcoctl/index.yaml
|
||||
# -- Configuration used by the artifact commands.
|
||||
artifact:
|
||||
# -- List of artifact types that falcoctl will handle. If the configured refs resolves to an artifact whose type is not contained
|
||||
|
@ -425,7 +597,7 @@ falcoctl:
|
|||
# -- Resolve the dependencies for artifacts.
|
||||
resolveDeps: true
|
||||
# -- List of artifacts to be installed by the falcoctl init container.
|
||||
refs: [falco-rules:3]
|
||||
refs: [falco-rules:4]
|
||||
# -- Directory where the rulesfiles are saved. The path is relative to the container, which in this case is an emptyDir
|
||||
# mounted also by the Falco pod.
|
||||
rulesfilesDir: /rulesfiles
|
||||
|
@ -433,7 +605,7 @@ falcoctl:
|
|||
pluginsDir: /plugins
|
||||
follow:
|
||||
# -- List of artifacts to be followed by the falcoctl sidecar container.
|
||||
refs: [falco-rules:3]
|
||||
refs: [falco-rules:4]
|
||||
# -- How often the tool checks for new versions of the followed artifacts.
|
||||
every: 6h
|
||||
# -- HTTP endpoint that serves the api versions of the Falco instance. It is used to check if the new versions are compatible
|
||||
|
@ -444,6 +616,65 @@ falcoctl:
|
|||
# -- See the fields of the artifact.install section.
|
||||
pluginsDir: /plugins
|
||||
|
||||
# -- serviceMonitor holds the configuration for the ServiceMonitor CRD.
|
||||
# A ServiceMonitor is a custom resource definition (CRD) used to configure how Prometheus should
|
||||
# discover and scrape metrics from the Falco service.
|
||||
serviceMonitor:
|
||||
# -- create specifies whether a ServiceMonitor CRD should be created for a prometheus operator.
|
||||
# https://github.com/coreos/prometheus-operator
|
||||
# Enable it only if the ServiceMonitor CRD is installed in your cluster.
|
||||
create: false
|
||||
# -- path at which the metrics are exposed by Falco.
|
||||
path: /metrics
|
||||
# -- labels set of labels to be applied to the ServiceMonitor resource.
|
||||
# If your Prometheus deployment is configured to use serviceMonitorSelector, then add the right
|
||||
# label here in order for the ServiceMonitor to be selected for target discovery.
|
||||
labels: {}
|
||||
# -- selector set of labels that should match the labels on the Service targeted by the current serviceMonitor.
|
||||
selector: {}
|
||||
# -- interval specifies the time interval at which Prometheus should scrape metrics from the service.
|
||||
interval: 15s
|
||||
# -- scheme specifies network protocol used by the metrics endpoint. In this case HTTP.
|
||||
scheme: http
|
||||
# -- tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when
|
||||
# scraping metrics from a service. It allows you to define the details of the TLS connection, such as
|
||||
# CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support
|
||||
# TLS configuration for the metrics endpoint.
|
||||
tlsConfig:
|
||||
{}
|
||||
# insecureSkipVerify: false
|
||||
# caFile: /path/to/ca.crt
|
||||
# certFile: /path/to/client.crt
|
||||
# keyFile: /path/to/client.key
|
||||
# -- scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request.
|
||||
# If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for
|
||||
# that target.
|
||||
scrapeTimeout: 10s
|
||||
# -- relabelings configures the relabeling rules to apply the target’s metadata labels.
|
||||
relabelings: []
|
||||
# -- targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics.
|
||||
targetLabels: []
|
||||
# -- endpointPort is the port in the Falco service that exposes the metrics service. Change the value if you deploy a custom service
|
||||
# for Falco's metrics.
|
||||
endpointPort: "metrics"
|
||||
|
||||
# -- grafana contains the configuration related to grafana.
|
||||
grafana:
|
||||
# -- dashboards contains configuration for grafana dashboards.
|
||||
dashboards:
|
||||
# -- enabled specifies whether the dashboards should be deployed.
|
||||
enabled: false
|
||||
# --configmaps to be deployed that contain a grafana dashboard.
|
||||
configMaps:
|
||||
# -- falco contains the configuration for falco's dashboard.
|
||||
falco:
|
||||
# -- name specifies the name for the configmap.
|
||||
name: falco-grafana-dashboard
|
||||
# -- namespace specifies the namespace for the configmap.
|
||||
namespace: ""
|
||||
# -- folder where the dashboard is stored by grafana.
|
||||
folder: ""
|
||||
|
||||
######################
|
||||
# falco.yaml config #
|
||||
######################
|
||||
|
@ -480,12 +711,45 @@ falco:
|
|||
# desired customizations and rule behaviors are prioritized and applied as
|
||||
# intended.
|
||||
# -- The location of the rules files that will be consumed by Falco.
|
||||
rules_file:
|
||||
rules_files:
|
||||
- /etc/falco/falco_rules.yaml
|
||||
- /etc/falco/falco_rules.local.yaml
|
||||
- /etc/falco/rules.d
|
||||
|
||||
# [Experimental] `rule_matching`
|
||||
# [Incubating] `rules`
|
||||
#
|
||||
# --- [Description]
|
||||
#
|
||||
# Falco rules can be enabled or disabled by name (with wildcards *) and/or by tag.
|
||||
#
|
||||
# This configuration is applied after all rules files have been loaded, including
|
||||
# their overrides, and will take precedence over the enabled/disabled configuration
|
||||
# specified or overridden in the rules files.
|
||||
#
|
||||
# The ordering matters and selections are evaluated in order. For instance, if you
|
||||
# need to only enable a rule you would first disable all of them and then only
|
||||
# enable what you need, regardless of the enabled status in the files.
|
||||
#
|
||||
# --- [Examples]
|
||||
#
|
||||
# Only enable two rules:
|
||||
#
|
||||
# rules:
|
||||
# - disable:
|
||||
# rule: "*"
|
||||
# - enable:
|
||||
# rule: Netcat Remote Code Execution in Container
|
||||
# - enable:
|
||||
# rule: Delete or rename shell history
|
||||
#
|
||||
# Disable all rules with a specific tag:
|
||||
#
|
||||
# rules:
|
||||
# - disable:
|
||||
# tag: network
|
||||
#
|
||||
|
||||
# [Incubating] `rule_matching`
|
||||
#
|
||||
# - Falco has to be performant when evaluating rules against events. To quickly
|
||||
# understand which rules could trigger on a specific event, Falco maintains
|
||||
|
@ -503,12 +767,11 @@ falco:
|
|||
|
||||
rule_matching: first
|
||||
|
||||
|
||||
# [Experimental] `outputs_queue`
|
||||
# [Incubating] `outputs_queue`
|
||||
#
|
||||
# -- Falco utilizes tbb::concurrent_bounded_queue for handling outputs, and this parameter
|
||||
# allows you to customize the queue capacity. Please refer to the official documentation:
|
||||
# https://oneapi-src.github.io/oneTBB/main/tbb_userguide/Concurrent_Queue_Classes.html.
|
||||
# https://uxlfoundation.github.io/oneTBB/main/tbb_userguide/Concurrent_Queue_Classes.html.
|
||||
# On a healthy system with optimized Falco rules, the queue should not fill up.
|
||||
# If it does, it is most likely happening due to the entire event flow being too slow,
|
||||
# indicating that the server is under heavy load.
|
||||
|
@ -525,7 +788,6 @@ falco:
|
|||
outputs_queue:
|
||||
capacity: 0
|
||||
|
||||
|
||||
#################
|
||||
# Falco plugins #
|
||||
#################
|
||||
|
@ -589,6 +851,40 @@ falco:
|
|||
# Falco config files #
|
||||
######################
|
||||
|
||||
# [Stable] `config_files`
|
||||
#
|
||||
# Falco will load additional configs files specified here.
|
||||
# Their loading is assumed to be made *after* main config file has been processed,
|
||||
# exactly in the order they are specified.
|
||||
# Therefore, loaded config files *can* override values from main config file.
|
||||
# Also, nested include is not allowed, ie: included config files won't be able to include other config files.
|
||||
#
|
||||
# Like for 'rules_files', specifying a folder will load all the configs files present in it in a lexicographical order.
|
||||
#
|
||||
# 3 merge-strategies are available:
|
||||
# `append` (default):
|
||||
# * existing sequence keys will be appended
|
||||
# * existing scalar keys will be overridden
|
||||
# * non-existing keys will be added
|
||||
# `override`:
|
||||
# * existing keys will be overridden
|
||||
# * non-existing keys will be added
|
||||
# `add-only`:
|
||||
# * existing keys will be ignored
|
||||
# * non-existing keys will be added
|
||||
#
|
||||
# Each item on the list can be either a yaml map or a simple string.
|
||||
# The simple string will be interpreted as the config file path, and the `append` merge-strategy will be enforced.
|
||||
# When the item is a yaml map instead, it will be of the form: ` path: foo\n strategy: X`.
|
||||
# When `strategy` is omitted, once again `append` is used.
|
||||
#
|
||||
# When a merge-strategy is enabled for a folder entry, all the included config files will use that merge-strategy.
|
||||
config_files:
|
||||
- /etc/falco/config.d
|
||||
# Example of config file specified as yaml map with strategy made explicit.
|
||||
# - path: $HOME/falco_local_configs/
|
||||
# strategy: add-only
|
||||
|
||||
# [Stable] `watch_config_files`
|
||||
#
|
||||
# Falco monitors configuration and rule files for changes and automatically
|
||||
|
@ -648,6 +944,22 @@ falco:
|
|||
# be added at a later stage, it is recommended to turn it off.
|
||||
json_include_tags_property: true
|
||||
|
||||
# [Incubating] `json_include_message_property`
|
||||
#
|
||||
# When using JSON output in Falco, you have the option to include the formatted
|
||||
# rule output without timestamp or priority. For instance, if a rule specifies
|
||||
# an "output" property like "Opened process %proc.name" the "message" field will
|
||||
# only contain "Opened process bash" whereas the "output" field will contain more
|
||||
# information.
|
||||
json_include_message_property: false
|
||||
|
||||
# [Incubating] `json_include_output_fields_property`
|
||||
#
|
||||
# When using JSON output in Falco, you have the option to include the individual
|
||||
# output fields for easier access. To reduce the logging volume, it is recommended
|
||||
# to turn it off if it's not necessary for your use case.
|
||||
json_include_output_fields_property: true
|
||||
|
||||
# [Stable] `buffered_outputs`
|
||||
#
|
||||
# -- Enabling buffering for the output queue can offer performance optimization,
|
||||
|
@ -655,30 +967,55 @@ falco:
|
|||
# output mechanism. By default, buffering is disabled (false).
|
||||
buffered_outputs: false
|
||||
|
||||
# [Stable] `outputs`
|
||||
# [Sandbox] `append_output`
|
||||
#
|
||||
# -- A throttling mechanism, implemented as a token bucket, can be used to control
|
||||
# the rate of Falco outputs. Each event source has its own rate limiter,
|
||||
# ensuring that alerts from one source do not affect the throttling of others.
|
||||
# The following options control the mechanism:
|
||||
# - rate: the number of tokens (i.e. right to send a notification) gained per
|
||||
# second. When 0, the throttling mechanism is disabled. Defaults to 0.
|
||||
# - max_burst: the maximum number of tokens outstanding. Defaults to 1000.
|
||||
# Add information to the Falco output.
|
||||
# With this setting you can add more information to the Falco output message, customizable by
|
||||
# rule, tag or source.
|
||||
# You can also add additional data that will appear in the output_fields property
|
||||
# of JSON formatted messages or gRPC output but will not be part of the regular output message.
|
||||
# This allows you to add custom fields that can help you filter your Falco events without
|
||||
# polluting the message text.
|
||||
#
|
||||
# For example, setting the rate to 1 allows Falco to send up to 1000
|
||||
# notifications initially, followed by 1 notification per second. The burst
|
||||
# capacity is fully restored after 1000 seconds of no activity.
|
||||
# Each append_output entry has an optional `match` map which specifies which rules will be
|
||||
# affected.
|
||||
# `match`:
|
||||
# `rule`: append output only to a specific rule
|
||||
# `source`: append output only to a specific source
|
||||
# `tags`: append output only to rules that have all of the specified tags
|
||||
# If none of the above are specified (or `match` is omitted)
|
||||
# output is appended to all events.
|
||||
# If more than one match condition is specified output will be appended to events
|
||||
# that match all conditions.
|
||||
# And several options to add output:
|
||||
# `extra_output`: add output to the Falco message
|
||||
# `extra_fields`: add new fields to the JSON output and structured output, which will not
|
||||
# affect the regular Falco message in any way. These can be specified as a
|
||||
# custom name with a custom format or as any supported field
|
||||
# (see: https://falco.org/docs/reference/rules/supported-fields/)
|
||||
# `suggested_output`: enable the use of extractor plugins suggested fields for the matching source output.
|
||||
#
|
||||
# Throttling can be useful in various scenarios, such as preventing notification
|
||||
# floods, managing system load, controlling event processing, or complying with
|
||||
# rate limits imposed by external systems or APIs. It allows for better resource
|
||||
# utilization, avoids overwhelming downstream systems, and helps maintain a
|
||||
# balanced and controlled flow of notifications.
|
||||
# Example:
|
||||
#
|
||||
# With the default settings, the throttling mechanism is disabled.
|
||||
outputs:
|
||||
rate: 0
|
||||
max_burst: 1000
|
||||
# append_output:
|
||||
# - match:
|
||||
# source: syscall
|
||||
# extra_output: "on CPU %evt.cpu"
|
||||
# extra_fields:
|
||||
# - home_directory: "${HOME}"
|
||||
# - evt.hostname
|
||||
#
|
||||
# In the example above every event coming from the syscall source will get an extra message
|
||||
# at the end telling the CPU number. In addition, if `json_output` is true, in the "output_fields"
|
||||
# property you will find three new ones: "evt.cpu", "home_directory" which will contain the value of the
|
||||
# environment variable $HOME, and "evt.hostname" which will contain the hostname.
|
||||
|
||||
# By default, we enable suggested_output for any source.
|
||||
# This means that any extractor plugin that indicates some of its fields
|
||||
# as suggested output formats, will see these fields in the output
|
||||
# in the form "foo_bar=$foo.bar"
|
||||
append_output:
|
||||
- suggested_output: true
|
||||
|
||||
##########################
|
||||
# Falco outputs channels #
|
||||
|
@ -743,6 +1080,8 @@ falco:
|
|||
compress_uploads: false
|
||||
# -- keep_alive whether to keep alive the connection.
|
||||
keep_alive: false
|
||||
# Maximum consecutive timeouts of libcurl to ignore
|
||||
max_consecutive_timeouts: 5
|
||||
|
||||
# [Stable] `program_output`
|
||||
#
|
||||
|
@ -857,6 +1196,11 @@ falco:
|
|||
threadiness: 0
|
||||
listen_port: 8765
|
||||
k8s_healthz_endpoint: /healthz
|
||||
# [Incubating] `prometheus_metrics_enabled`
|
||||
#
|
||||
# Enable the metrics endpoint providing Prometheus values
|
||||
# It will only have an effect if metrics.enabled is set to true as well.
|
||||
prometheus_metrics_enabled: false
|
||||
ssl_enabled: false
|
||||
ssl_certificate: /etc/falco/falco.pem
|
||||
|
||||
|
@ -900,8 +1244,8 @@ falco:
|
|||
# "alert", "critical", "error", "warning", "notice", "info", "debug". It is not
|
||||
# recommended for production use.
|
||||
libs_logger:
|
||||
enabled: false
|
||||
severity: debug
|
||||
enabled: true
|
||||
severity: info
|
||||
|
||||
#################################################################################
|
||||
# Falco logging / alerting / metrics related to software functioning (advanced) #
|
||||
|
@ -1103,6 +1447,25 @@ falco:
|
|||
# Falco's monitoring. These metrics help assess Falco's usage in relation to the
|
||||
# server's workload intensity.
|
||||
#
|
||||
# `rules_counters_enabled`: Emit counts for each rule.
|
||||
#
|
||||
# `resource_utilization_enabled`: Emit CPU and memory usage metrics. CPU usage
|
||||
# is reported as a percentage of one CPU and can be normalized to the total
|
||||
# number of CPUs to determine overall usage. Memory metrics are provided in raw
|
||||
# units (`kb` for `RSS`, `PSS` and `VSZ` or `bytes` for `container_memory_used`)
|
||||
# and can be uniformly converted to megabytes (MB) using the
|
||||
# `convert_memory_to_mb` functionality. In environments such as Kubernetes when
|
||||
# deployed as daemonset, it is crucial to track Falco's container memory usage.
|
||||
# To customize the path of the memory metric file, you can create an environment
|
||||
# variable named `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By
|
||||
# default, Falco uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to
|
||||
# monitor container memory usage, which aligns with Kubernetes'
|
||||
# `container_memory_working_set_bytes` metric. Finally, we emit the overall host
|
||||
# CPU and memory usages, along with the total number of processes and open file
|
||||
# descriptors (fds) on the host, obtained from the proc file system unrelated to
|
||||
# Falco's monitoring. These metrics help assess Falco's usage in relation to the
|
||||
# server's workload intensity.
|
||||
#
|
||||
# `state_counters_enabled`: Emit counters related to Falco's state engine, including
|
||||
# added, removed threads or file descriptors (fds), and failed lookup, store, or
|
||||
# retrieve actions in relation to Falco's underlying process cache table (threadtable).
|
||||
|
@ -1113,6 +1476,9 @@ falco:
|
|||
# counters reflect monotonic values since Falco's start and are exported at a
|
||||
# constant stats interval.
|
||||
#
|
||||
# `kernel_event_counters_per_cpu_enabled`: Detailed kernel event and drop counters
|
||||
# per CPU. Typically used when debugging and not in production.
|
||||
#
|
||||
# `libbpf_stats_enabled`: Exposes statistics similar to `bpftool prog show`,
|
||||
# providing information such as the number of invocations of each BPF program
|
||||
# attached by Falco and the time spent in each program measured in nanoseconds.
|
||||
|
@ -1135,13 +1501,14 @@ falco:
|
|||
interval: 1h
|
||||
output_rule: true
|
||||
# output_file: /tmp/falco_stats.jsonl
|
||||
rules_counters_enabled: true
|
||||
resource_utilization_enabled: true
|
||||
state_counters_enabled: true
|
||||
kernel_event_counters_enabled: true
|
||||
libbpf_stats_enabled: true
|
||||
convert_memory_to_mb: true
|
||||
include_empty_values: false
|
||||
|
||||
kernel_event_counters_per_cpu_enabled: false
|
||||
|
||||
#######################################
|
||||
# Falco performance tuning (advanced) #
|
||||
|
@ -1262,37 +1629,71 @@ falco:
|
|||
custom_set: []
|
||||
repair: false
|
||||
|
||||
#################################################
|
||||
# Falco cloud orchestration systems integration #
|
||||
#################################################
|
||||
##############
|
||||
# Falco libs #
|
||||
##############
|
||||
|
||||
# [Stable] Guidance for Kubernetes container engine command-line args settings
|
||||
# [Experimental] `falco_libs` - Potentially subject to more frequent changes
|
||||
#
|
||||
# Modern cloud environments, particularly Kubernetes, heavily rely on
|
||||
# containerized workload deployments. When capturing events with Falco, it
|
||||
# becomes essential to identify the owner of the workload for which events are
|
||||
# being captured, such as syscall events. Falco integrates with the container
|
||||
# runtime to enrich its events with container information, including fields like
|
||||
# `container.image.repository`, `container.image.tag`, ... , `k8s.ns.name`,
|
||||
# `k8s.pod.name`, `k8s.pod.*` in the Falco output (Falco retrieves Kubernetes
|
||||
# namespace and pod name directly from the container runtime, see
|
||||
# https://falco.org/docs/reference/rules/supported-fields/#field-class-container).
|
||||
# `thread_table_size`
|
||||
#
|
||||
# Furthermore, Falco exposes container events themselves as a data source for
|
||||
# alerting. To achieve this integration with the container runtime, Falco
|
||||
# requires access to the runtime socket. By default, for Kubernetes, Falco
|
||||
# attempts to connect to the following sockets:
|
||||
# "/run/containerd/containerd.sock", "/run/crio/crio.sock",
|
||||
# "/run/k3s/containerd/containerd.sock". If you have a custom path, you can use
|
||||
# the `--cri` option to specify the correct location.
|
||||
# Set the maximum number of entries (the absolute maximum value can only be MAX UINT32)
|
||||
# for Falco's internal threadtable (process cache). Please note that Falco operates at a
|
||||
# granular level, focusing on individual threads. Falco rules reference the thread leader
|
||||
# as the process. The size of the threadtable should typically be much higher than the
|
||||
# number of currently alive processes. The default value should work well on modern
|
||||
# infrastructures and be sufficient to absorb bursts.
|
||||
#
|
||||
# In some cases, you may encounter empty fields for container metadata. To
|
||||
# address this, you can explore the `--disable-cri-async` option, which disables
|
||||
# asynchronous fetching if the fetch operation is not completing quickly enough.
|
||||
# Reducing its size can help in better memory management, but as a consequence, your
|
||||
# process tree may be more frequently disrupted due to missing threads. You can explore
|
||||
# `metrics.state_counters_enabled` to measure how the internal state handling is performing,
|
||||
# and the fields called `n_drops_full_threadtable` or `n_store_evts_drops` will inform you
|
||||
# if you should increase this value for optimal performance.
|
||||
falco_libs:
|
||||
thread_table_size: 262144
|
||||
|
||||
# [Incubating] `container_engines`
|
||||
#
|
||||
# To get more information on these command-line arguments, you can run `falco
|
||||
# --help` in your terminal to view their current descriptions.
|
||||
# This option allows you to explicitly enable or disable API lookups against container
|
||||
# runtime sockets for each supported container runtime.
|
||||
# Access to these sockets enables Falco to retrieve container and Kubernetes fields,
|
||||
# helping identify workload owners in modern containerized environments.
|
||||
# Refer to the fields docs:
|
||||
#
|
||||
# !!! The options mentioned here are not available in the falco.yaml
|
||||
# configuration file. Instead, they can can be used as a command-line argument
|
||||
# when running the Falco command.
|
||||
# - [Kubernetes fields](https://falco.org/docs/reference/rules/supported-fields/#field-class-k8s)
|
||||
# - [Container fields](https://falco.org/docs/reference/rules/supported-fields/#container)
|
||||
#
|
||||
# Additionally, Falco can use container events as a data source for alerting (evt.type = container).
|
||||
#
|
||||
# For most container engines, you can solely enable or disable them, and Falco will search the
|
||||
# default (hard-coded) container runtime socket paths, such as `/var/run/docker.sock` for Docker.
|
||||
#
|
||||
# However, for Kubernetes settings, you can customize the CRI socket paths:
|
||||
#
|
||||
# - `container_engines.cri.sockets`: Pass a list of container runtime sockets.
|
||||
# - `container_engines.cri.disable_async`: Since API lookups may not always be quick or
|
||||
# perfect, resulting in empty fields for container metadata, you can use this option option
|
||||
# to disable asynchronous fetching. Note that missing fields may still occasionally occur.
|
||||
|
||||
# Please use the collectors section to configure the container engines.
|
||||
|
||||
container_engines:
|
||||
docker:
|
||||
enabled: false
|
||||
cri:
|
||||
enabled: false
|
||||
sockets:
|
||||
[
|
||||
"/run/containerd/containerd.sock",
|
||||
"/run/crio/crio.sock",
|
||||
"/run/k3s/containerd/containerd.sock",
|
||||
]
|
||||
disable_async: false
|
||||
podman:
|
||||
enabled: false
|
||||
lxc:
|
||||
enabled: false
|
||||
libvirt_lxc:
|
||||
enabled: false
|
||||
bpm:
|
||||
enabled: false
|
||||
|
|
|
@ -5,604 +5,749 @@ numbering uses [semantic versioning](http://semver.org).
|
|||
|
||||
Before release 0.1.20, the helm chart can be found in `falcosidekick` [repository](https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick).
|
||||
|
||||
## 0.10.2
|
||||
|
||||
- Add type information to `volumeClaimTemplates`.
|
||||
|
||||
## 0.10.1
|
||||
|
||||
- Add an "or" condition for `configmap-ui`
|
||||
|
||||
## 0.10.0
|
||||
|
||||
- Add new features to the Loki dashboard
|
||||
|
||||
## 0.9.11
|
||||
|
||||
- Add `customtags` setting
|
||||
|
||||
## 0.9.10
|
||||
|
||||
- Fix missing values in the README
|
||||
|
||||
## 0.9.9
|
||||
|
||||
- Added Azure Workload Identity for Falcosidekick
|
||||
|
||||
## 0.9.8
|
||||
|
||||
- Ugrade to Falcosidekick 2.31.1 (fix last release)
|
||||
|
||||
## 0.9.7
|
||||
|
||||
- Ugrade to Falcosidekick 2.31.1
|
||||
|
||||
## 0.9.6
|
||||
|
||||
- Ugrade to Falcosidekick 2.31.0
|
||||
|
||||
## 0.9.5
|
||||
|
||||
- Move the `prometheus.io/scrape` annotation to the default values, to allow overrides.
|
||||
|
||||
## 0.9.4
|
||||
|
||||
- Fix Prometheus metrics names in Prometheus Rule
|
||||
|
||||
## 0.9.3
|
||||
|
||||
- Add a Grafana dashboard for the Prometheus metrics
|
||||
|
||||
## 0.9.2
|
||||
|
||||
- Add new dashboard with Loki
|
||||
|
||||
## 0.9.1
|
||||
|
||||
- Ugrade to Falcosidekick 2.30.0
|
||||
|
||||
## 0.8.9
|
||||
|
||||
- Fix customConfig mount path for webui redis
|
||||
|
||||
## 0.8.8
|
||||
|
||||
- Fix customConfig template for webui redis
|
||||
|
||||
## 0.8.7
|
||||
|
||||
- Fix securityContext for webui initContainer
|
||||
|
||||
## 0.8.6
|
||||
|
||||
- Use of `redis-cli` by the initContainer of Falcosidekick-UI to wait til the redis is up and running
|
||||
- Add the possibility to override the default redis server settings
|
||||
- Allow to set up a password to use with an external redis
|
||||
- Fix wrong value used for `OTLP_TRACES_PROTOCOL` env var
|
||||
- Used names for the priorities in the prometheus rules
|
||||
|
||||
## 0.8.5
|
||||
|
||||
- Fix an issue with the by default missing custom CA cert
|
||||
|
||||
## 0.8.4
|
||||
|
||||
- Fix falcosidekick chart ignoring custom service type for webui redis
|
||||
|
||||
## 0.8.3
|
||||
|
||||
- Add a condition to create the secrets for the redis only if the webui is deployed
|
||||
|
||||
## 0.8.2
|
||||
|
||||
- Fix redis-availability check of the UI init-container in case externalRedis is enabled
|
||||
|
||||
## 0.8.1
|
||||
|
||||
- Allow to set resources, securityContext and image overwrite for wait-redis initContainer
|
||||
|
||||
## 0.8.0
|
||||
|
||||
- Ugrade to Falcosidekick 2.29.0
|
||||
- Allow to set custom labels and annotations to set to all resources
|
||||
- Allow to use an existing secrets and values for the env vars at the same time
|
||||
- Fix missing ingressClassName settings in the values.yaml
|
||||
- Add of an initContainer to check if the redis for falcosidekick-ui is up
|
||||
|
||||
## 0.7.22
|
||||
|
||||
- Upgrade redis-stack image to 7.2.0-v11
|
||||
|
||||
## 0.7.21
|
||||
|
||||
- Fix the Falco Sidekick WEBUI_URL secret value.
|
||||
|
||||
## 0.7.20
|
||||
|
||||
- Align Web UI service port from values.yaml file with Falco Sidekick WEBUI_URL secret value.
|
||||
|
||||
## 0.7.19
|
||||
|
||||
- Enhanced the service Monitor to support additional Properties.
|
||||
- Fix the promql query for prometheusRules: FalcoErrorOutputEventsRateHigh.
|
||||
|
||||
## 0.7.18
|
||||
|
||||
- Fix PrometheusRule duplicate alert name
|
||||
|
||||
## 0.7.17
|
||||
|
||||
- Fix the labels for the serviceMonitor
|
||||
|
||||
## 0.7.16
|
||||
|
||||
- Fix the error with the `NOTES` (`index of untyped nil Use`) when the ingress is enabled to falcosidekick-ui
|
||||
|
||||
## 0.7.15
|
||||
|
||||
- Fix ServiceMonitor selector labels
|
||||
|
||||
## 0.7.14
|
||||
|
||||
- Fix duplicate component labels
|
||||
|
||||
## 0.7.13
|
||||
|
||||
- Fix ServiceMonitor port name and selector labels
|
||||
|
||||
## 0.7.12
|
||||
|
||||
* Align README values with the values.yaml file
|
||||
- Align README values with the values.yaml file
|
||||
|
||||
## 0.7.11
|
||||
|
||||
* Fix a link in the falcosidekick README to the policy report output documentation
|
||||
- Fix a link in the falcosidekick README to the policy report output documentation
|
||||
|
||||
## 0.7.10
|
||||
|
||||
* Set Helm recommended labels (`app.kubernetes.io/name`, `app.kubernetes.io/instance`, `app.kubernetes.io/version`, `helm.sh/chart`, `app.kubernetes.io/part-of`, `app.kubernetes.io/managed-by`) using helpers.tpl
|
||||
- Set Helm recommended labels (`app.kubernetes.io/name`, `app.kubernetes.io/instance`, `app.kubernetes.io/version`, `helm.sh/chart`, `app.kubernetes.io/part-of`, `app.kubernetes.io/managed-by`) using helpers.tpl
|
||||
|
||||
## 0.7.9
|
||||
|
||||
* noop change to the chart itself. Updated makefile.
|
||||
- noop change to the chart itself. Updated makefile.
|
||||
|
||||
## 0.7.8
|
||||
|
||||
* Fix the condition for missing cert files
|
||||
- Fix the condition for missing cert files
|
||||
|
||||
## 0.7.7
|
||||
|
||||
* Support extraArgs in the helm chart
|
||||
- Support extraArgs in the helm chart
|
||||
|
||||
## 0.7.6
|
||||
|
||||
* Fix the behavior with the `AWS IRSA` with a new value `aws.config.useirsa`
|
||||
* Add a section in the README to describe how to use a subpath for `Falcosidekick-ui` ingress
|
||||
* Add a `ServiceMonitor` for prometheus-operator
|
||||
* Add a `PrometheusRule` for prometheus-operator
|
||||
- Fix the behavior with the `AWS IRSA` with a new value `aws.config.useirsa`
|
||||
- Add a section in the README to describe how to use a subpath for `Falcosidekick-ui` ingress
|
||||
- Add a `ServiceMonitor` for prometheus-operator
|
||||
- Add a `PrometheusRule` for prometheus-operator
|
||||
|
||||
## 0.7.5
|
||||
|
||||
* noop change just to test the ci
|
||||
- noop change just to test the ci
|
||||
|
||||
## 0.7.4
|
||||
|
||||
* Fix volume mount when `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables are defined.
|
||||
- Fix volume mount when `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables are defined.
|
||||
|
||||
## 0.7.3
|
||||
|
||||
* Allow to set (m)TLS Server cryptographic material via `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables or through `config.tlsserver.existingSecret` variables.
|
||||
- Allow to set (m)TLS Server cryptographic material via `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables or through `config.tlsserver.existingSecret` variables.
|
||||
|
||||
## 0.7.2
|
||||
|
||||
* Fix the wrong key of the secret for the user
|
||||
- Fix the wrong key of the secret for the user
|
||||
|
||||
## 0.7.1
|
||||
|
||||
* Allow to set a password `webui.redis.password` for Redis for `Falcosidekick-UI`
|
||||
* The user for `Falcosidekick-UI` is now set with an env var from a secret
|
||||
- Allow to set a password `webui.redis.password` for Redis for `Falcosidekick-UI`
|
||||
- The user for `Falcosidekick-UI` is now set with an env var from a secret
|
||||
|
||||
## 0.7.0
|
||||
|
||||
* Support configuration of revisionHistoryLimit of the deployments
|
||||
- Support configuration of revisionHistoryLimit of the deployments
|
||||
|
||||
## 0.6.3
|
||||
|
||||
* Update Falcosidekick to 2.28.0
|
||||
* Add Mutual TLS Client config
|
||||
* Add TLS Server config
|
||||
* Add `bracketreplacer` config
|
||||
* Add `customseveritymap` to `alertmanager` output
|
||||
* Add Drop Event config to `alertmanager` output
|
||||
* Add `customheaders` to `elasticsearch` output
|
||||
* Add `customheaders` to `loki` output
|
||||
* Add `customheaders` to `grafana` output
|
||||
* Add `rolearn` and `externalid` for `aws` outputs
|
||||
* Add `method` to `webhook` output
|
||||
* Add `customattributes` to `gcp.pubsub` output
|
||||
* Add `region` to `pargerduty` output
|
||||
* Add `topiccreation` and `tls` to `kafka` output
|
||||
* Add `Grafana OnCall` output
|
||||
* Add `Redis` output
|
||||
* Add `Telegram` output
|
||||
* Add `N8N` output
|
||||
* Add `OpenObserver` output
|
||||
- Update Falcosidekick to 2.28.0
|
||||
- Add Mutual TLS Client config
|
||||
- Add TLS Server config
|
||||
- Add `bracketreplacer` config
|
||||
- Add `customseveritymap` to `alertmanager` output
|
||||
- Add Drop Event config to `alertmanager` output
|
||||
- Add `customheaders` to `elasticsearch` output
|
||||
- Add `customheaders` to `loki` output
|
||||
- Add `customheaders` to `grafana` output
|
||||
- Add `rolearn` and `externalid` for `aws` outputs
|
||||
- Add `method` to `webhook` output
|
||||
- Add `customattributes` to `gcp.pubsub` output
|
||||
- Add `region` to `pargerduty` output
|
||||
- Add `topiccreation` and `tls` to `kafka` output
|
||||
- Add `Grafana OnCall` output
|
||||
- Add `Redis` output
|
||||
- Add `Telegram` output
|
||||
- Add `N8N` output
|
||||
- Add `OpenObserver` output
|
||||
|
||||
## 0.6.2
|
||||
|
||||
* Fix interpolation of `SYSLOG_PORT`
|
||||
- Fix interpolation of `SYSLOG_PORT`
|
||||
|
||||
## 0.6.1
|
||||
|
||||
* Add `webui.allowcors` value for `Falcosidekick-UI`
|
||||
- Add `webui.allowcors` value for `Falcosidekick-UI`
|
||||
|
||||
## 0.6.0
|
||||
|
||||
* Change the docker image for the redis pod for falcosidekick-ui
|
||||
- Change the docker image for the redis pod for falcosidekick-ui
|
||||
|
||||
## 0.5.16
|
||||
|
||||
* Add `affinity`, `nodeSelector` and `tolerations` values for the Falcosidekick test-connection pod
|
||||
- Add `affinity`, `nodeSelector` and `tolerations` values for the Falcosidekick test-connection pod
|
||||
|
||||
## 0.5.15
|
||||
|
||||
* Set extra labels and annotations for `AlertManager` only if they're not empty
|
||||
- Set extra labels and annotations for `AlertManager` only if they're not empty
|
||||
|
||||
## 0.5.14
|
||||
|
||||
* Fix Prometheus extralabels configuration in Falcosidekick
|
||||
- Fix Prometheus extralabels configuration in Falcosidekick
|
||||
|
||||
## 0.5.13
|
||||
|
||||
* Fix missing quotes in Falcosidekick-UI ttl argument
|
||||
- Fix missing quotes in Falcosidekick-UI ttl argument
|
||||
|
||||
## 0.5.12
|
||||
|
||||
* Fix missing space in Falcosidekick-UI ttl argument
|
||||
- Fix missing space in Falcosidekick-UI ttl argument
|
||||
|
||||
## 0.5.11
|
||||
|
||||
* Fix missing space in Falcosidekick-UI arguments
|
||||
- Fix missing space in Falcosidekick-UI arguments
|
||||
|
||||
## 0.5.10
|
||||
|
||||
* upgrade Falcosidekick image to 2.27.0
|
||||
* upgrade Falcosidekick-UI image to 2.1.0
|
||||
* Add `Yandex Data Streams` output
|
||||
* Add `Node-Red` output
|
||||
* Add `MQTT` output
|
||||
* Add `Zincsearch` output
|
||||
* Add `Gotify` output
|
||||
* Add `Spyderbat` output
|
||||
* Add `Tekton` output
|
||||
* Add `TimescaleDB` output
|
||||
* Add `AWS Security Lake` output
|
||||
* Add `config.templatedfields` to set templated fields
|
||||
* Add `config.slack.channel` to override `Slack` channel
|
||||
* Add `config.alertmanager.extralabels` and `config.alertmanager.extraannotations` for `AlertManager` output
|
||||
* Add `config.influxdb.token`, `config.influxdb.organization` and `config.influxdb.precision` for `InfluxDB` output
|
||||
* Add `config.aws.checkidentity` to disallow STS checks
|
||||
* Add `config.smtp.authmechanism`, `config.smtp.token`, `config.smtp.identity`, `config.smtp.trace` to manage `SMTP` auth
|
||||
* Update default doc type for `Elastichsearch`
|
||||
* Add `config.loki.user`, `config.loki.apikey` to manage auth to Grafana Cloud for `Loki` output
|
||||
* Add `config.kafka.sasl`, `config.kafka.async`, `config.kafka.compression`, `config.kafka.balancer`, `config.kafka.clientid` to manage auth and communication for `Kafka` output
|
||||
* Add `config.syslog.format` to manage the format of `Syslog` payload
|
||||
* Add `webui.ttl` to set TTL of keys in Falcosidekick-UI
|
||||
* Add `webui.loglevel` to set log level in Falcosidekick-UI
|
||||
* Add `webui.user` to set log user:password in Falcosidekick-UI
|
||||
- upgrade Falcosidekick image to 2.27.0
|
||||
- upgrade Falcosidekick-UI image to 2.1.0
|
||||
- Add `Yandex Data Streams` output
|
||||
- Add `Node-Red` output
|
||||
- Add `MQTT` output
|
||||
- Add `Zincsearch` output
|
||||
- Add `Gotify` output
|
||||
- Add `Spyderbat` output
|
||||
- Add `Tekton` output
|
||||
- Add `TimescaleDB` output
|
||||
- Add `AWS Security Lake` output
|
||||
- Add `config.templatedfields` to set templated fields
|
||||
- Add `config.slack.channel` to override `Slack` channel
|
||||
- Add `config.alertmanager.extralabels` and `config.alertmanager.extraannotations` for `AlertManager` output
|
||||
- Add `config.influxdb.token`, `config.influxdb.organization` and `config.influxdb.precision` for `InfluxDB` output
|
||||
- Add `config.aws.checkidentity` to disallow STS checks
|
||||
- Add `config.smtp.authmechanism`, `config.smtp.token`, `config.smtp.identity`, `config.smtp.trace` to manage `SMTP` auth
|
||||
- Update default doc type for `Elastichsearch`
|
||||
- Add `config.loki.user`, `config.loki.apikey` to manage auth to Grafana Cloud for `Loki` output
|
||||
- Add `config.kafka.sasl`, `config.kafka.async`, `config.kafka.compression`, `config.kafka.balancer`, `config.kafka.clientid` to manage auth and communication for `Kafka` output
|
||||
- Add `config.syslog.format` to manage the format of `Syslog` payload
|
||||
- Add `webui.ttl` to set TTL of keys in Falcosidekick-UI
|
||||
- Add `webui.loglevel` to set log level in Falcosidekick-UI
|
||||
- Add `webui.user` to set log user:password in Falcosidekick-UI
|
||||
|
||||
## 0.5.9
|
||||
|
||||
* Fix: remove `namespace` from `clusterrole` and `clusterrolebinding` metadata
|
||||
- Fix: remove `namespace` from `clusterrole` and `clusterrolebinding` metadata
|
||||
|
||||
## 0.5.8
|
||||
|
||||
* Support `storageEnabled` for `redis` to allow ephemeral installs
|
||||
- Support `storageEnabled` for `redis` to allow ephemeral installs
|
||||
|
||||
## 0.5.7
|
||||
|
||||
* Removing unused Kafka config values
|
||||
- Removing unused Kafka config values
|
||||
|
||||
## 0.5.6
|
||||
|
||||
* Fixing Syslog's port import in `secrets.yaml`
|
||||
- Fixing Syslog's port import in `secrets.yaml`
|
||||
|
||||
## 0.5.5
|
||||
|
||||
* Add `webui.externalRedis` with `enabled`, `url` and `port` to values to set an external Redis database with RediSearch > v2 for the WebUI
|
||||
* Add `webui.redis.enabled` option to disable the deployment of the database.
|
||||
* `webui.redis.enabled ` and `webui.externalRedis.enabled` are mutually exclusive
|
||||
- Add `webui.externalRedis` with `enabled`, `url` and `port` to values to set an external Redis database with RediSearch > v2 for the WebUI
|
||||
- Add `webui.redis.enabled` option to disable the deployment of the database.
|
||||
- `webui.redis.enabled ` and `webui.externalRedis.enabled` are mutually exclusive
|
||||
|
||||
## 0.5.4
|
||||
|
||||
* Upgrade image to fix Panic of `Prometheus` output when `customfields` is set
|
||||
* Add `extralabels` for `Loki` and `Prometheus` outputs to set fields to use as labels
|
||||
* Add `expiresafter` for `AlertManager` output
|
||||
- Upgrade image to fix Panic of `Prometheus` output when `customfields` is set
|
||||
- Add `extralabels` for `Loki` and `Prometheus` outputs to set fields to use as labels
|
||||
- Add `expiresafter` for `AlertManager` output
|
||||
|
||||
## 0.5.3
|
||||
|
||||
* Support full configuration of `securityContext` blocks in falcosidekick and falcosidekick-ui deployments, and redis statefulset.
|
||||
- Support full configuration of `securityContext` blocks in falcosidekick and falcosidekick-ui deployments, and redis statefulset.
|
||||
|
||||
## 0.5.2
|
||||
|
||||
* Update Falcosidekick-UI image (fix wrong redirect to localhost when an ingress is used)
|
||||
- Update Falcosidekick-UI image (fix wrong redirect to localhost when an ingress is used)
|
||||
|
||||
## 0.5.1
|
||||
|
||||
* Support `ingressClassName` field in falcosidekick ingresses.
|
||||
- Support `ingressClassName` field in falcosidekick ingresses.
|
||||
|
||||
## 0.5.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `Policy Report` output
|
||||
* Add `Syslog` output
|
||||
* Add `AWS Kinesis` output
|
||||
* Add `Zoho Cliq` output
|
||||
* Support IRSA for AWS authentication
|
||||
* Upgrade Falcosidekick-UI to v2.0.1
|
||||
- Add `Policy Report` output
|
||||
- Add `Syslog` output
|
||||
- Add `AWS Kinesis` output
|
||||
- Add `Zoho Cliq` output
|
||||
- Support IRSA for AWS authentication
|
||||
- Upgrade Falcosidekick-UI to v2.0.1
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Allow to set custom Labels for pods
|
||||
- Allow to set custom Labels for pods
|
||||
|
||||
## 0.4.5
|
||||
|
||||
* Allow additional service-ui annotations
|
||||
- Allow additional service-ui annotations
|
||||
|
||||
## 0.4.4
|
||||
|
||||
* Fix output after chart installation when ingress is enable
|
||||
- Fix output after chart installation when ingress is enable
|
||||
|
||||
## 0.4.3
|
||||
|
||||
* Support `annotation` block in service
|
||||
- Support `annotation` block in service
|
||||
|
||||
## 0.4.2
|
||||
|
||||
* Fix: Added the rule to use the podsecuritypolicy
|
||||
* Fix: Added `ServiceAccountName` to the UI deployment
|
||||
- Fix: Added the rule to use the podsecuritypolicy
|
||||
- Fix: Added `ServiceAccountName` to the UI deployment
|
||||
|
||||
## 0.4.1
|
||||
|
||||
* Removes duplicate `Fission` keys from secret
|
||||
- Removes duplicate `Fission` keys from secret
|
||||
|
||||
## 0.4.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Support Ingress API version `networking.k8s.io/v1`, see `ingress.hosts` and `webui.ingress.hosts` in [values.yaml](values.yaml) for a breaking change in the `path` parameter
|
||||
- Support Ingress API version `networking.k8s.io/v1`, see `ingress.hosts` and `webui.ingress.hosts` in [values.yaml](values.yaml) for a breaking change in the `path` parameter
|
||||
|
||||
## 0.3.17
|
||||
|
||||
* Fix: Remove the value for bucket of `Yandex S3`, it enabled the output by default
|
||||
- Fix: Remove the value for bucket of `Yandex S3`, it enabled the output by default
|
||||
|
||||
## 0.3.16
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Fix: set correct new image 2.24.0
|
||||
- Fix: set correct new image 2.24.0
|
||||
|
||||
## 0.3.15
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `Fission` output
|
||||
- Add `Fission` output
|
||||
|
||||
## 0.3.14
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `Grafana` output
|
||||
* Add `Yandex Cloud S3` output
|
||||
* Add `Kafka REST` output
|
||||
- Add `Grafana` output
|
||||
- Add `Yandex Cloud S3` output
|
||||
- Add `Kafka REST` output
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Docker image is now available on AWS ECR Public Gallery (`--set image.registry=public.ecr.aws`)
|
||||
- Docker image is now available on AWS ECR Public Gallery (`--set image.registry=public.ecr.aws`)
|
||||
|
||||
## 0.3.13
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Enable extra volumes and volumemounts for `falcosidekick` via values
|
||||
- Enable extra volumes and volumemounts for `falcosidekick` via values
|
||||
|
||||
## 0.3.12
|
||||
|
||||
* Add AWS configuration field `config.aws.rolearn`
|
||||
- Add AWS configuration field `config.aws.rolearn`
|
||||
|
||||
## 0.3.11
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Make image registries for `falcosidekick` and `falcosidekick-ui` configurable
|
||||
- Make image registries for `falcosidekick` and `falcosidekick-ui` configurable
|
||||
|
||||
## 0.3.10
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Fix table formatting in `README.md`
|
||||
- Fix table formatting in `README.md`
|
||||
|
||||
## 0.3.9
|
||||
|
||||
### Fixes
|
||||
|
||||
* Add missing `imagePullSecrets` in `falcosidekick/templates/deployment-ui.yaml`
|
||||
- Add missing `imagePullSecrets` in `falcosidekick/templates/deployment-ui.yaml`
|
||||
|
||||
## 0.3.8
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `GCP Cloud Run` output
|
||||
* Add `GCP Cloud Functions` output
|
||||
* Add `Wavefront` output
|
||||
* Allow MutualTLS for some outputs
|
||||
* Add basic auth for Elasticsearch output
|
||||
- Add `GCP Cloud Run` output
|
||||
- Add `GCP Cloud Functions` output
|
||||
- Add `Wavefront` output
|
||||
- Allow MutualTLS for some outputs
|
||||
- Add basic auth for Elasticsearch output
|
||||
|
||||
## 0.3.7
|
||||
|
||||
### Minor changes
|
||||
|
||||
* Fix table formatting in `README.md`
|
||||
* Fix `config.azure.eventHub` parameter name in `README.md`
|
||||
- Fix table formatting in `README.md`
|
||||
- Fix `config.azure.eventHub` parameter name in `README.md`
|
||||
|
||||
## 0.3.6
|
||||
|
||||
### Fixes
|
||||
|
||||
* Point to the correct name of aadpodidentnity
|
||||
- Point to the correct name of aadpodidentnity
|
||||
|
||||
## 0.3.5
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix link to Falco in the `README.md`
|
||||
- Fix link to Falco in the `README.md`
|
||||
|
||||
## 0.3.4
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Bump up version (`v1.0.1`) of image for `falcosidekick-ui`
|
||||
- Bump up version (`v1.0.1`) of image for `falcosidekick-ui`
|
||||
|
||||
## 0.3.3
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Set default values for `OpenFaaS` output type parameters
|
||||
* Fixes of documentation
|
||||
- Set default values for `OpenFaaS` output type parameters
|
||||
- Fixes of documentation
|
||||
|
||||
## 0.3.2
|
||||
|
||||
### Fixes
|
||||
|
||||
* Add config checksum annotation to deployment pods to restart pods on config change
|
||||
* Fix statsd config options in the secret to make them match the docs
|
||||
- Add config checksum annotation to deployment pods to restart pods on config change
|
||||
- Fix statsd config options in the secret to make them match the docs
|
||||
|
||||
## 0.3.1
|
||||
|
||||
### Fixes
|
||||
|
||||
* Fix for `s3.bucket`, it should be empty
|
||||
- Fix for `s3.bucket`, it should be empty
|
||||
|
||||
## 0.3.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `AWS S3` output
|
||||
* Add `GCP Storage` output
|
||||
* Add `RabbitMQ` output
|
||||
* Add `OpenFaas` output
|
||||
- Add `AWS S3` output
|
||||
- Add `GCP Storage` output
|
||||
- Add `RabbitMQ` output
|
||||
- Add `OpenFaas` output
|
||||
|
||||
## 0.2.9
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Updated falcosidekuck-ui default image version to `v0.2.0`
|
||||
- Updated falcosidekuck-ui default image version to `v0.2.0`
|
||||
|
||||
## 0.2.8
|
||||
|
||||
### Fixes
|
||||
|
||||
* Fixed to specify `kafka.hostPort` instead of `kafka.url`
|
||||
- Fixed to specify `kafka.hostPort` instead of `kafka.url`
|
||||
|
||||
## 0.2.7
|
||||
|
||||
### Fixes
|
||||
|
||||
* Fixed missing hyphen in podidentity
|
||||
- Fixed missing hyphen in podidentity
|
||||
|
||||
## 0.2.6
|
||||
|
||||
### Fixes
|
||||
|
||||
* Fix repo and tag for `ui` image
|
||||
- Fix repo and tag for `ui` image
|
||||
|
||||
## 0.2.5
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `CLOUDEVENTS` output
|
||||
* Add `WEBUI` output
|
||||
- Add `CLOUDEVENTS` output
|
||||
- Add `WEBUI` output
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add details about syntax for adding `custom_fields`
|
||||
- Add details about syntax for adding `custom_fields`
|
||||
|
||||
## 0.2.4
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Add `DATADOG_HOST` to secret
|
||||
- Add `DATADOG_HOST` to secret
|
||||
|
||||
## 0.2.3
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Allow additional pod annotations
|
||||
* Remove namespace condition in aad-pod-identity
|
||||
- Allow additional pod annotations
|
||||
- Remove namespace condition in aad-pod-identity
|
||||
|
||||
## 0.2.2
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `Kubeless` output
|
||||
- Add `Kubeless` output
|
||||
|
||||
## 0.2.1
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `PagerDuty` output
|
||||
- Add `PagerDuty` output
|
||||
|
||||
## 0.2.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add option to use an existing secret
|
||||
* Add option to add extra environment variables
|
||||
* Add `Stan` output
|
||||
- Add option to use an existing secret
|
||||
- Add option to add extra environment variables
|
||||
- Add `Stan` output
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Use the Existing secret resource and add all possible variables to there, and make it simpler to read and less error-prone in the deployment resource
|
||||
- Use the Existing secret resource and add all possible variables to there, and make it simpler to read and less error-prone in the deployment resource
|
||||
|
||||
## 0.1.37
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix aws keys not being added to the deployment
|
||||
- Fix aws keys not being added to the deployment
|
||||
|
||||
## 0.1.36
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix helm test
|
||||
- Fix helm test
|
||||
|
||||
## 0.1.35
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Update image to use release 2.19.1
|
||||
- Update image to use release 2.19.1
|
||||
|
||||
## 0.1.34
|
||||
|
||||
* New outputs can be set : `Kafka`, `AWS CloudWatchLogs`
|
||||
- New outputs can be set : `Kafka`, `AWS CloudWatchLogs`
|
||||
|
||||
## 0.1.33
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fixed GCP Pub/Sub values references in `deployment.yaml`
|
||||
- Fixed GCP Pub/Sub values references in `deployment.yaml`
|
||||
|
||||
## 0.1.32
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Support release namespace configuration
|
||||
- Support release namespace configuration
|
||||
|
||||
## 0.1.31
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New outputs can be set : `Googlechat`
|
||||
- New outputs can be set : `Googlechat`
|
||||
|
||||
## 0.1.30
|
||||
|
||||
### Major changes
|
||||
|
||||
* New output can be set : `GCP PubSub`
|
||||
* Custom Headers can be set for `Webhook` output
|
||||
* Fix typo `aipKey` for OpsGenie output
|
||||
- New output can be set : `GCP PubSub`
|
||||
- Custom Headers can be set for `Webhook` output
|
||||
- Fix typo `aipKey` for OpsGenie output
|
||||
|
||||
## 0.1.29
|
||||
|
||||
* Fix falcosidekick configuration table to use full path of configuration properties in the `README.md`
|
||||
- Fix falcosidekick configuration table to use full path of configuration properties in the `README.md`
|
||||
|
||||
## 0.1.28
|
||||
|
||||
### Major changes
|
||||
|
||||
* New output can be set : `AWS SNS`
|
||||
* Metrics in `prometheus` format can be scrapped from `/metrics` URI
|
||||
- New output can be set : `AWS SNS`
|
||||
- Metrics in `prometheus` format can be scrapped from `/metrics` URI
|
||||
|
||||
## 0.1.27
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Replace extensions apiGroup/apiVersion because of deprecation
|
||||
- Replace extensions apiGroup/apiVersion because of deprecation
|
||||
|
||||
## 0.1.26
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Allow the creation of a PodSecurityPolicy, disabled by default
|
||||
- Allow the creation of a PodSecurityPolicy, disabled by default
|
||||
|
||||
## 0.1.25
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Allow the configuration of the Pod securityContext, set default runAsUser and fsGroup values
|
||||
- Allow the configuration of the Pod securityContext, set default runAsUser and fsGroup values
|
||||
|
||||
## 0.1.24
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Remove duplicated `webhook` block in `values.yaml`
|
||||
- Remove duplicated `webhook` block in `values.yaml`
|
||||
|
||||
## 0.1.23
|
||||
|
||||
* fake release for triggering CI for auto-publishing
|
||||
- fake release for triggering CI for auto-publishing
|
||||
|
||||
## 0.1.22
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add `imagePullSecrets`
|
||||
- Add `imagePullSecrets`
|
||||
|
||||
## 0.1.21
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix `Azure Indentity` case sensitive value
|
||||
- Fix `Azure Indentity` case sensitive value
|
||||
|
||||
## 0.1.20
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New outputs can be set : `Azure Event Hubs`, `Discord`
|
||||
- New outputs can be set : `Azure Event Hubs`, `Discord`
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix wrong port name in output
|
||||
- Fix wrong port name in output
|
||||
|
||||
## 0.1.17
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New outputs can be set : `Mattermost`, `Rocketchat`
|
||||
- New outputs can be set : `Mattermost`, `Rocketchat`
|
||||
|
||||
## 0.1.11
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add Pod Security Policy
|
||||
- Add Pod Security Policy
|
||||
|
||||
## 0.1.11
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Fix wrong value reference for Elasticsearch output in deployment.yaml
|
||||
- Fix wrong value reference for Elasticsearch output in deployment.yaml
|
||||
|
||||
## 0.1.10
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New output can be set : `DogStatsD`
|
||||
- New output can be set : `DogStatsD`
|
||||
|
||||
## 0.1.9
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New output can be set : `StatsD`
|
||||
- New output can be set : `StatsD`
|
||||
|
||||
## 0.1.7
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New output can be set : `Opsgenie`
|
||||
- New output can be set : `Opsgenie`
|
||||
|
||||
## 0.1.6
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New output can be set : `NATS`
|
||||
- New output can be set : `NATS`
|
||||
|
||||
## 0.1.5
|
||||
|
||||
### Major Changes
|
||||
|
||||
* `Falcosidekick` and its chart are now part of `falcosecurity` organization
|
||||
- `Falcosidekick` and its chart are now part of `falcosecurity` organization
|
||||
|
||||
## 0.1.4
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Use more recent image with `Golang` 1.14
|
||||
- Use more recent image with `Golang` 1.14
|
||||
|
||||
## 0.1.3
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New output can be set : `Loki`
|
||||
- New output can be set : `Loki`
|
||||
|
||||
## 0.1.2
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New output can be set : `SMTP`
|
||||
- New output can be set : `SMTP`
|
||||
|
||||
## 0.1.1
|
||||
|
||||
### Major Changes
|
||||
|
||||
* New outputs can be set : `AWS Lambda`, `AWS SQS`, `Teams`
|
||||
- New outputs can be set : `AWS Lambda`, `AWS SQS`, `Teams`
|
||||
|
||||
## 0.1.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Initial release of Falcosidekick Helm Chart
|
||||
- Initial release of Falcosidekick Helm Chart
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
apiVersion: v1
|
||||
appVersion: 2.28.0
|
||||
appVersion: 2.31.1
|
||||
description: Connect Falco to your ecosystem
|
||||
icon: https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png
|
||||
name: falcosidekick
|
||||
version: 0.7.12
|
||||
version: 0.10.2
|
||||
keywords:
|
||||
- monitoring
|
||||
- security
|
||||
|
|
|
@ -181,7 +181,7 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.alertmanager.expireafter | string | `""` | if set to a non-zero value, alert expires after that time in seconds (default: 0) |
|
||||
| config.alertmanager.extraannotations | string | `""` | comma separated list of annotations composed of a ':' separated name and value that is added to the Alerts. Example: my_annotation_1:my_value_1, my_annotation_1:my_value_2 |
|
||||
| config.alertmanager.extralabels | string | `""` | comma separated list of labels composed of a ':' separated name and value that is added to the Alerts. Example: my_label_1:my_value_1, my_label_1:my_value_2 |
|
||||
| config.alertmanager.hostport | string | `""` | AlertManager <http://host:port>, if not `empty`, AlertManager is *enabled* |
|
||||
| config.alertmanager.hostport | string | `""` | Comma separated list of http://{domain or ip}:{port} that will all receive the payload, if not empty, Alertmanager output is enabled |
|
||||
| config.alertmanager.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.alertmanager.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| config.aws.accesskeyid | string | `""` | AWS Access Key Id (optionnal if you use EC2 Instance Profile) |
|
||||
|
@ -197,7 +197,9 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.aws.region | string | `""` | AWS Region (optionnal if you use EC2 Instance Profile) |
|
||||
| config.aws.rolearn | string | `""` | AWS IAM role ARN for falcosidekick service account to associate with (optionnal if you use EC2 Instance Profile) |
|
||||
| config.aws.s3.bucket | string | `""` | AWS S3, bucket name |
|
||||
| config.aws.s3.endpoint | string | `""` | Endpoint URL that overrides the default generated endpoint, use this for S3 compatible APIs |
|
||||
| config.aws.s3.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.aws.s3.objectcannedacl | string | `"bucket-owner-full-control"` | Canned ACL (x-amz-acl) to use when creating the object |
|
||||
| config.aws.s3.prefix | string | `""` | AWS S3, name of prefix, keys will have format: s3://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json |
|
||||
| config.aws.secretaccesskey | string | `""` | AWS Secret Access Key (optionnal if you use EC2 Instance Profile) |
|
||||
| config.aws.securitylake.accountid | string | `""` | Account ID |
|
||||
|
@ -220,6 +222,7 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.azure.podIdentityName | string | `""` | Azure Identity name |
|
||||
| config.azure.resourceGroupName | string | `""` | Azure Resource Group name |
|
||||
| config.azure.subscriptionID | string | `""` | Azure Subscription ID |
|
||||
| config.azure.workloadIdentityClientID | string | `""` | Azure Workload Identity Client ID |
|
||||
| config.bracketreplacer | string | `""` | if not empty, the brackets in keys of Output Fields are replaced |
|
||||
| config.cliq.icon | string | `""` | Cliq icon (avatar) |
|
||||
| config.cliq.messageformat | string | `""` | a Go template to format Google Chat Text above Attachment, displayed in addition to the output from `cliq.outputformat`. If empty, no Text is displayed before sections. |
|
||||
|
@ -231,9 +234,14 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.cloudevents.extension | string | `""` | Extensions to add in the outbound Event, useful for routing |
|
||||
| config.cloudevents.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.customfields | string | `""` | a list of escaped comma separated custom fields to add to falco events, syntax is "key:value\,key:value" |
|
||||
| config.customtags | string | `""` | a list of escaped comma separated custom tags to add to falco events, syntax is "tag\,tag" |
|
||||
| config.datadog.apikey | string | `""` | Datadog API Key, if not `empty`, Datadog output is *enabled* |
|
||||
| config.datadog.host | string | `""` | Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "<https://api.datadoghq.com>" |
|
||||
| config.datadog.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.datadoglogs.apikey | string | `""` | Datadog API Key, if not empty, Datadog Logs output is enabled |
|
||||
| config.datadoglogs.host | string | `""` | Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://http-intake.logs.datadoghq.com/" |
|
||||
| config.datadoglogs.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default) |
|
||||
| config.datadoglogs.service | string | `""` | The name of the application or service generating the log events. |
|
||||
| config.debug | bool | `false` | DEBUG environment variable |
|
||||
| config.discord.icon | string | `""` | Discord icon (avatar) |
|
||||
| config.discord.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
|
@ -241,16 +249,32 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.dogstatsd.forwarder | string | `""` | The address for the DogStatsD forwarder, in the form <http://host:port>, if not empty DogStatsD is *enabled* |
|
||||
| config.dogstatsd.namespace | string | `"falcosidekick."` | A prefix for all metrics |
|
||||
| config.dogstatsd.tags | string | `""` | A comma-separated list of tags to add to all metrics |
|
||||
| config.dynatrace.apitoken | string | `""` | Dynatrace API token with the "logs.ingest" scope, more info : https://dt-url.net/8543sda, if not empty, Dynatrace output is enabled |
|
||||
| config.dynatrace.apiurl | string | `""` | Dynatrace API url, use https://ENVIRONMENTID.live.dynatrace.com/api for Dynatrace SaaS and https://YOURDOMAIN/e/ENVIRONMENTID/api for Dynatrace Managed, more info : https://dt-url.net/ej43qge |
|
||||
| config.dynatrace.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.dynatrace.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
|
||||
| config.elasticsearch.apikey | string | `""` | Use this APIKey to authenticate to Elasticsearch if the APIKey is not empty (default: "") |
|
||||
| config.elasticsearch.batching | object | `{"batchsize":"5242880","enabled":true,"flushinterval":"1s"}` | batching configuration, improves throughput dramatically utilizing _bulk Elasticsearch API |
|
||||
| config.elasticsearch.batching.batchsize | string | `"5242880"` | batch size in bytes (default: 5 MB) (use string to avoid the conversion into float64 by helm) |
|
||||
| config.elasticsearch.batching.enabled | bool | `true` | if true enables batching |
|
||||
| config.elasticsearch.batching.flushinterval | string | `"1s"` | batch fush interval (default: 1s) |
|
||||
| config.elasticsearch.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.elasticsearch.createindextemplate | bool | `false` | Create an index template (default: false) |
|
||||
| config.elasticsearch.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
|
||||
| config.elasticsearch.enablecompression | bool | `false` | if true enables gzip compression for http requests (default: false) |
|
||||
| config.elasticsearch.flattenfields | bool | `false` | Replace . by _ to avoid mapping conflicts, force to true if createindextemplate==true (default: false) |
|
||||
| config.elasticsearch.hostport | string | `""` | Elasticsearch <http://host:port>, if not `empty`, Elasticsearch is *enabled* |
|
||||
| config.elasticsearch.index | string | `"falco"` | Elasticsearch index |
|
||||
| config.elasticsearch.maxconcurrentrequests | int | `1` | max number of concurrent http requests (default: 1) |
|
||||
| config.elasticsearch.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.elasticsearch.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| config.elasticsearch.password | string | `""` | use this password to authenticate to Elasticsearch if the password is not empty |
|
||||
| config.elasticsearch.suffix | string | `"daily"` | |
|
||||
| config.elasticsearch.numberofreplicas | int | `3` | Number of replicas set by the index template (default: 3) |
|
||||
| config.elasticsearch.numberofshards | int | `3` | Number of shards set by the index template (default: 3) |
|
||||
| config.elasticsearch.password | string | `""` | Use this password to authenticate to Elasticsearch if the password is not empty |
|
||||
| config.elasticsearch.pipeline | string | `""` | Optional ingest pipeline name |
|
||||
| config.elasticsearch.suffix | string | `"daily"` | Date suffix for index rotation : daily, monthly, annually, none |
|
||||
| config.elasticsearch.type | string | `"_doc"` | Elasticsearch document type |
|
||||
| config.elasticsearch.username | string | `""` | use this username to authenticate to Elasticsearch if the username is not empty |
|
||||
| config.elasticsearch.username | string | `""` | Use this username to authenticate to Elasticsearch if the username is not empty |
|
||||
| config.existingSecret | string | `""` | Existing secret with configuration |
|
||||
| config.extraArgs | list | `[]` | Extra command-line arguments |
|
||||
| config.extraEnv | list | `[]` | Extra environment variables |
|
||||
|
@ -336,6 +360,13 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.loki.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
|
||||
| config.loki.endpoint | string | `"/loki/api/v1/push"` | Loki endpoint URL path, more info: <https://grafana.com/docs/loki/latest/api/#post-apiprompush> |
|
||||
| config.loki.extralabels | string | `""` | comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields |
|
||||
| config.loki.format | string | `"text"` | Format for the log entry value: json, text (default) |
|
||||
| config.loki.grafanaDashboard | object | `{"configMap":{"folder":"","name":"falcosidekick-loki-dashboard-grafana","namespace":""},"enabled":true}` | dashboard for Grafana |
|
||||
| config.loki.grafanaDashboard.configMap | object | `{"folder":"","name":"falcosidekick-loki-dashboard-grafana","namespace":""}` | configmaps to be deployed that contain a grafana dashboard. |
|
||||
| config.loki.grafanaDashboard.configMap.folder | string | `""` | folder where the dashboard is stored by grafana. |
|
||||
| config.loki.grafanaDashboard.configMap.name | string | `"falcosidekick-loki-dashboard-grafana"` | name specifies the name for the configmap. |
|
||||
| config.loki.grafanaDashboard.configMap.namespace | string | `""` | namespace specifies the namespace for the configmap. |
|
||||
| config.loki.grafanaDashboard.enabled | bool | `true` | enabled specifies whether this dashboard should be deployed. |
|
||||
| config.loki.hostport | string | `""` | Loki <http://host:port>, if not `empty`, Loki is *enabled* |
|
||||
| config.loki.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.loki.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
|
@ -373,6 +404,7 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.nats.hostport | string | `""` | NATS "nats://host:port", if not `empty`, NATS is *enabled* |
|
||||
| config.nats.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.nats.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| config.nats.subjecttemplate | string | `"falco.<priority>.<rule>"` | template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>) |
|
||||
| config.nodered.address | string | `""` | Node-RED address, if not empty, Node-RED output is enabled |
|
||||
| config.nodered.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.nodered.customheaders | string | `""` | Custom headers to add in POST, useful for Authentication, syntax is "key:value\,key:value" |
|
||||
|
@ -401,6 +433,24 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.opsgenie.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.opsgenie.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| config.opsgenie.region | `us` or `eu` | `""` | region of your domain |
|
||||
| config.otlp.metrics.checkcert | bool | `true` | Set to false if you want to skip TLS certificate validation (only with https) (default: true) |
|
||||
| config.otlp.metrics.endpoint | string | `""` | OTLP endpoint, typically in the form http{s}://{domain or ip}:4318/v1/metrics |
|
||||
| config.otlp.metrics.extraattributes | string | `""` | Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields |
|
||||
| config.otlp.metrics.extraenvvars | list | `[]` | Extra env vars (override the other settings) (default: "") |
|
||||
| config.otlp.metrics.headers | string | `""` | List of headers to apply to all outgoing metrics in the form of "some-key=some-value,other-key=other-value" (default: "") |
|
||||
| config.otlp.metrics.minimumpriority | string | `""` | Minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "") |
|
||||
| config.otlp.metrics.protocol | string | `"grpc"` | OTLP transport protocol to be used for metrics data; it can be "grpc" or "http/protobuf" (default: "grpc") |
|
||||
| config.otlp.metrics.timeout | int | `1000` | OTLP timeout for outgoing metrics in milliseconds (default: "" which uses SDK default: 10000) |
|
||||
| config.otlp.traces.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.otlp.traces.duration | int | `1000` | Artificial span duration in milliseconds (default: 1000) |
|
||||
| config.otlp.traces.endpoint | string | `""` | OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces, if not empty, OTLP Traces output is enabled |
|
||||
| config.otlp.traces.extraenvvars | object | `{}` | Extra env vars (override the other settings) |
|
||||
| config.otlp.traces.headers | string | `""` | OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "") |
|
||||
| config.otlp.traces.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
|
||||
| config.otlp.traces.protocol | string | `""` | OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json) |
|
||||
| config.otlp.traces.synced | bool | `false` | Set to true if you want traces to be sent synchronously (default: false) |
|
||||
| config.otlp.traces.timeout | int | `1000` | OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000) |
|
||||
| config.outputFieldFormat | string | `""` | |
|
||||
| config.pagerduty.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.pagerduty.region | string | `"us"` | Pagerduty Region, can be 'us' or 'eu' |
|
||||
| config.pagerduty.routingkey | string | `""` | Pagerduty Routing Key, if not empty, Pagerduty output is *enabled* |
|
||||
|
@ -410,6 +460,15 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.policyreport.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.policyreport.prunebypriority | bool | `false` | if true; the events with lowest severity are pruned first, in FIFO order |
|
||||
| config.prometheus.extralabels | string | `""` | comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields |
|
||||
| config.quickwit.apiendpoint | string | `"/api/v1"` | API endpoint (containing the API version, overideable in case of quickwit behind a reverse proxy with URL rewriting) |
|
||||
| config.quickwit.autocreateindex | bool | `false` | Autocreate a falco index mapping if it doesn't exists |
|
||||
| config.quickwit.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.quickwit.customHeaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
|
||||
| config.quickwit.hostport | string | `""` | http://{domain or ip}:{port}, if not empty, Quickwit output is enabled |
|
||||
| config.quickwit.index | string | `"falco"` | Index |
|
||||
| config.quickwit.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.quickwit.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| config.quickwit.version | string | `"0.7"` | Version of quickwi |
|
||||
| config.rabbitmq.minimumpriority | string | `"debug"` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.rabbitmq.queue | string | `""` | Rabbitmq Queue name |
|
||||
| config.rabbitmq.url | string | `""` | Rabbitmq URL, if not empty, Rabbitmq output is *enabled* |
|
||||
|
@ -459,13 +518,23 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.stan.hostport | string | `""` | Stan nats://{domain or ip}:{port}, if not empty, STAN output is *enabled* |
|
||||
| config.stan.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.stan.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| config.stan.subjecttemplate | string | `"falco.<priority>.<rule>"` | template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>) |
|
||||
| config.statsd.forwarder | string | `""` | The address for the StatsD forwarder, in the form <http://host:port>, if not empty StatsD is *enabled* |
|
||||
| config.statsd.namespace | string | `"falcosidekick."` | A prefix for all metrics |
|
||||
| config.sumologic.checkcert | bool | `true` | check if ssl certificate of the output is valid (default: true) |
|
||||
| config.sumologic.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default) |
|
||||
| config.sumologic.name | string | `""` | Override the default Sumologic Source Name |
|
||||
| config.sumologic.receiverURL | string | `""` | Sumologic HTTP Source URL, if not empty, Sumologic output is enabled |
|
||||
| config.sumologic.sourceCategory | string | `""` | Override the default Sumologic Source Category |
|
||||
| config.sumologic.sourceHost | string | `""` | Override the default Sumologic Source Host |
|
||||
| config.syslog.format | string | `"json"` | Syslog payload format. It can be either "json" or "cef" |
|
||||
| config.syslog.host | string | `""` | Syslog Host, if not empty, Syslog output is *enabled* |
|
||||
| config.syslog.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.syslog.port | string | `""` | Syslog endpoint port number |
|
||||
| config.syslog.protocol | string | `"tcp"` | Syslog transport protocol. It can be either "tcp" or "udp" |
|
||||
| config.talon.address | string | `""` | Talon address, if not empty, Talon output is enabled |
|
||||
| config.talon.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.talon.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.teams.activityimage | string | `""` | Teams section image |
|
||||
| config.teams.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.teams.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Teams), `facts` (only facts are displayed in Teams) |
|
||||
|
@ -475,6 +544,7 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.tekton.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.telegram.chatid | string | `""` | telegram Identifier of the shared chat |
|
||||
| config.telegram.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.telegram.messagethreadid | string | `""` | Telegram individual chats within the group |
|
||||
| config.telegram.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
|
||||
| config.telegram.token | string | `""` | telegram bot authentication token |
|
||||
| config.templatedfields | string | `""` | a list of escaped comma separated Go templated fields to add to falco events, syntax is "key:template\,key:template" |
|
||||
|
@ -485,6 +555,7 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.timescaledb.password | string | `"postgres"` | Password to authenticate with TimescaleDB |
|
||||
| config.timescaledb.port | int | `5432` | TimescaleDB port (default: 5432) |
|
||||
| config.timescaledb.user | string | `"postgres"` | Username to authenticate with TimescaleDB |
|
||||
| config.tlsclient.cacertfile | string | `""` | CA certificate file for server certification on TLS connections, appended to the system CA pool if not empty |
|
||||
| config.tlsserver.cacertfile | string | `"/etc/certs/server/ca.crt"` | CA certification file path for client certification if mutualtls is true |
|
||||
| config.tlsserver.cacrt | string | `""` | |
|
||||
| config.tlsserver.certfile | string | `"/etc/certs/server/server.crt"` | server certification file path for TLS Server |
|
||||
|
@ -504,6 +575,8 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.wavefront.flushintervalseconds | int | `1` | Wavefront flush interval in seconds. Defaults to 1 |
|
||||
| config.wavefront.metricname | string | `"falco.alert"` | Metric to be created in Wavefront. Defaults to falco.alert |
|
||||
| config.wavefront.minimumpriority | string | `"debug"` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.webex.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.webex.webhookurl | string | `""` | Webex WebhookURL, if not empty, Webex output is enabled |
|
||||
| config.webhook.address | string | `""` | Webhook address, if not empty, Webhook output is *enabled* |
|
||||
| config.webhook.checkcert | bool | `true` | check if ssl certificate of the output is valid |
|
||||
| config.webhook.customHeaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value\,key:value" |
|
||||
|
@ -526,18 +599,29 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| config.zincsearch.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
|
||||
| config.zincsearch.password | string | `""` | use this password to authenticate to ZincSearch |
|
||||
| config.zincsearch.username | string | `""` | use this username to authenticate to ZincSearch |
|
||||
| customAnnotations | object | `{}` | custom annotations to add to all resources |
|
||||
| customLabels | object | `{}` | custom labels to add to all resources |
|
||||
| extraVolumeMounts | list | `[]` | Extra volume mounts for sidekick deployment |
|
||||
| extraVolumes | list | `[]` | Extra volumes for sidekick deployment |
|
||||
| fullnameOverride | string | `""` | Override the name |
|
||||
| image | object | `{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"falcosecurity/falcosidekick","tag":"2.28.0"}` | number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) revisionHistoryLimit: 1 |
|
||||
| grafana | object | `{"dashboards":{"configMaps":{"falcosidekick":{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}},"enabled":false}}` | grafana contains the configuration related to grafana. |
|
||||
| grafana.dashboards | object | `{"configMaps":{"falcosidekick":{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}},"enabled":false}` | dashboards contains configuration for grafana dashboards. |
|
||||
| grafana.dashboards.configMaps | object | `{"falcosidekick":{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}}` | configmaps to be deployed that contain a grafana dashboard. |
|
||||
| grafana.dashboards.configMaps.falcosidekick | object | `{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}` | falcosidekick contains the configuration for falcosidekick's dashboard. |
|
||||
| grafana.dashboards.configMaps.falcosidekick.folder | string | `""` | folder where the dashboard is stored by grafana. |
|
||||
| grafana.dashboards.configMaps.falcosidekick.name | string | `"falcosidekick-grafana-dashboard"` | name specifies the name for the configmap. |
|
||||
| grafana.dashboards.configMaps.falcosidekick.namespace | string | `""` | namespace specifies the namespace for the configmap. |
|
||||
| grafana.dashboards.enabled | bool | `false` | enabled specifies whether the dashboards should be deployed. |
|
||||
| image | object | `{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"falcosecurity/falcosidekick","tag":"2.31.1"}` | number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) revisionHistoryLimit: 1 |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` | The image pull policy |
|
||||
| image.registry | string | `"docker.io"` | The image registry to pull from |
|
||||
| image.repository | string | `"falcosecurity/falcosidekick"` | The image repository to pull from |
|
||||
| image.tag | string | `"2.28.0"` | The image tag to pull |
|
||||
| image.tag | string | `"2.31.1"` | The image tag to pull |
|
||||
| imagePullSecrets | list | `[]` | Secrets for the registry |
|
||||
| ingress.annotations | object | `{}` | Ingress annotations |
|
||||
| ingress.enabled | bool | `false` | Whether to create the ingress |
|
||||
| ingress.hosts | list | `[{"host":"falcosidekick.local","paths":[{"path":"/"}]}]` | Ingress hosts |
|
||||
| ingress.ingressClassName | string | `""` | ingress class name |
|
||||
| ingress.tls | list | `[]` | Ingress TLS configuration |
|
||||
| nameOverride | string | `""` | Override name |
|
||||
| nodeSelector | object | `{}` | Sidekick nodeSelector field |
|
||||
|
@ -570,10 +654,11 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| replicaCount | int | `2` | number of running pods |
|
||||
| resources | object | `{}` | The resources for falcosdekick pods |
|
||||
| securityContext | object | `{}` | Sidekick container securityContext |
|
||||
| service.annotations | object | `{}` | Service annotations |
|
||||
| service.annotations | object | `{"prometheus.io/scrape":"true"}` | Service annotations |
|
||||
| service.port | int | `2801` | Service port |
|
||||
| service.type | string | `"ClusterIP"` | Service type |
|
||||
| serviceMonitor.additionalLabels | object | `{}` | specify Additional labels to be added on the Service Monitor. |
|
||||
| serviceMonitor.additionalProperties | object | `{}` | allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. |
|
||||
| serviceMonitor.enabled | bool | `false` | enable the deployment of a Service Monitor for the Prometheus Operator. |
|
||||
| serviceMonitor.interval | string | `""` | specify a user defined interval. When not specified Prometheus default interval is used. |
|
||||
| serviceMonitor.scrapeTimeout | string | `""` | specify a user defined scrape timeout. When not specified Prometheus default scrape timeout is used. |
|
||||
|
@ -587,6 +672,7 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| webui.enabled | bool | `false` | enable Falcosidekick-UI |
|
||||
| webui.existingSecret | string | `""` | Existing secret with configuration |
|
||||
| webui.externalRedis.enabled | bool | `false` | Enable or disable the usage of an external Redis. Is mutually exclusive with webui.redis.enabled. |
|
||||
| webui.externalRedis.password | string | `""` | Set the password of the external Redis |
|
||||
| webui.externalRedis.port | int | `6379` | The port of the external Redis database with RediSearch > v2 |
|
||||
| webui.externalRedis.url | string | `""` | The URL of the external Redis database with RediSearch > v2 |
|
||||
| webui.image.pullPolicy | string | `"IfNotPresent"` | The web UI image pull policy |
|
||||
|
@ -596,7 +682,14 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| webui.ingress.annotations | object | `{}` | Web UI ingress annotations |
|
||||
| webui.ingress.enabled | bool | `false` | Whether to create the Web UI ingress |
|
||||
| webui.ingress.hosts | list | `[{"host":"falcosidekick-ui.local","paths":[{"path":"/"}]}]` | Web UI ingress hosts configuration |
|
||||
| webui.ingress.ingressClassName | string | `""` | ingress class name |
|
||||
| webui.ingress.tls | list | `[]` | Web UI ingress TLS configuration |
|
||||
| webui.initContainer | object | `{"image":{"registry":"docker.io","repository":"redis/redis-stack","tag":"7.2.0-v11"},"resources":{},"securityContext":{}}` | Web UI wait-redis initContainer |
|
||||
| webui.initContainer.image.registry | string | `"docker.io"` | wait-redis initContainer image registry to pull from |
|
||||
| webui.initContainer.image.repository | string | `"redis/redis-stack"` | wait-redis initContainer image repository to pull from |
|
||||
| webui.initContainer.image.tag | string | `"7.2.0-v11"` | wait-redis initContainer image tag to pull |
|
||||
| webui.initContainer.resources | object | `{}` | wait-redis initContainer resources |
|
||||
| webui.initContainer.securityContext | object | `{}` | wait-redis initContainer securityContext |
|
||||
| webui.loglevel | string | `"info"` | Log level ("debug", "info", "warning", "error") |
|
||||
| webui.nodeSelector | object | `{}` | Web UI nodeSelector field |
|
||||
| webui.podAnnotations | object | `{}` | additions annotations on the pods web UI |
|
||||
|
@ -604,12 +697,15 @@ The following table lists the main configurable parameters of the Falcosidekick
|
|||
| webui.podSecurityContext | object | `{"fsGroup":1234,"runAsUser":1234}` | Web UI pod securityContext |
|
||||
| webui.priorityClassName | string | `""` | Name of the priority class to be used by the Web UI pods, priority class needs to be created beforehand |
|
||||
| webui.redis.affinity | object | `{}` | Affinity for the Web UI Redis pods |
|
||||
| webui.redis.customAnnotations | object | `{}` | custom annotations to add to all resources |
|
||||
| webui.redis.customConfig | object | `{}` | List of Custom config overrides for Redis |
|
||||
| webui.redis.customLabels | object | `{}` | custom labels to add to all resources |
|
||||
| webui.redis.enabled | bool | `true` | Is mutually exclusive with webui.externalRedis.enabled |
|
||||
| webui.redis.existingSecret | string | `""` | Existing secret with configuration |
|
||||
| webui.redis.image.pullPolicy | string | `"IfNotPresent"` | The web UI image pull policy |
|
||||
| webui.redis.image.registry | string | `"docker.io"` | The web UI Redis image registry to pull from |
|
||||
| webui.redis.image.repository | string | `"redis/redis-stack"` | The web UI Redis image repository to pull from |
|
||||
| webui.redis.image.tag | string | `"6.2.6-v3"` | The web UI Redis image tag to pull from |
|
||||
| webui.redis.image.tag | string | `"7.2.0-v11"` | The web UI Redis image tag to pull from |
|
||||
| webui.redis.nodeSelector | object | `{}` | Web UI Redis nodeSelector field |
|
||||
| webui.redis.password | string | `""` | Set a password for Redis |
|
||||
| webui.redis.podAnnotations | object | `{}` | additions annotations on the pods |
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,714 @@
|
|||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 5,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"legend": {
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"pieType": "pie",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"sum"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.2.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"expr": "count by(priority) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
|
||||
"legendFormat": "{{priority}}",
|
||||
"queryType": "range",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Priority counts",
|
||||
"type": "piechart"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"displayLabels": [
|
||||
"value",
|
||||
"percent"
|
||||
],
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true,
|
||||
"values": []
|
||||
},
|
||||
"pieType": "pie",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"sum"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.2.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"expr": "count by(rule) (rate({priority=~\".+\", rule!=\"Falco internal: metrics snapshot\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
|
||||
"legendFormat": "{{priority}}",
|
||||
"queryType": "range",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Rules counts",
|
||||
"type": "piechart"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "left",
|
||||
"cellOptions": {
|
||||
"type": "auto",
|
||||
"wrapText": false
|
||||
},
|
||||
"filterable": true,
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Value #A"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Number of Messages"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Time"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hidden",
|
||||
"value": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "k8s_ns"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 96
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "priority"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 91
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "rule"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 450
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "k8s_pod_name"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 184
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"enablePagination": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"last"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": [
|
||||
{
|
||||
"desc": false,
|
||||
"displayName": "k8s_pod_name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"pluginVersion": "11.2.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"expr": "count by(k8s_pod_name, rule, priority, k8s_ns) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
|
||||
"legendFormat": "",
|
||||
"queryType": "instant",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"transformations": [
|
||||
{
|
||||
"id": "sortBy",
|
||||
"options": {
|
||||
"fields": {},
|
||||
"sort": [
|
||||
{
|
||||
"desc": true,
|
||||
"field": "Value #A"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 6,
|
||||
"options": {
|
||||
"dedupStrategy": "none",
|
||||
"enableLogDetails": true,
|
||||
"prettifyLogMessage": false,
|
||||
"showCommonLabels": false,
|
||||
"showLabels": false,
|
||||
"showTime": false,
|
||||
"sortOrder": "Descending",
|
||||
"wrapLogMessage": false
|
||||
},
|
||||
"pluginVersion": "11.2.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"direction": "backward",
|
||||
"editorMode": "builder",
|
||||
"expr": "{priority=~\".+\"} |= `$line_filter` | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority`",
|
||||
"queryType": "range",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Realtime logs",
|
||||
"type": "logs"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 100,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "stepBefore",
|
||||
"lineStyle": {
|
||||
"fill": "solid"
|
||||
},
|
||||
"lineWidth": 1,
|
||||
"pointSize": 4,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "normal"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"fieldMinMax": false,
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 23
|
||||
},
|
||||
"id": 7,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "loki"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"expr": "count by(priority) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [1m]))",
|
||||
"legendFormat": "{{priority}}",
|
||||
"queryType": "range",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Priorities Rates",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"default": false,
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 100,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "stepBefore",
|
||||
"lineStyle": {
|
||||
"fill": "solid"
|
||||
},
|
||||
"lineWidth": 1,
|
||||
"pointSize": 4,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "normal"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"fieldMinMax": false,
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 23
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "loki"
|
||||
},
|
||||
"editorMode": "builder",
|
||||
"expr": "count by(rule) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [1m]))",
|
||||
"legendFormat": "{{priority}}",
|
||||
"queryType": "range",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Rules Rates",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "auto",
|
||||
"schemaVersion": 39,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": "",
|
||||
"current": {
|
||||
"selected": true,
|
||||
"text": [
|
||||
"arr",
|
||||
"core",
|
||||
"falco",
|
||||
"kube-system",
|
||||
"media",
|
||||
"monitoring",
|
||||
"rook",
|
||||
"rook-cluster",
|
||||
"storage",
|
||||
"utilities",
|
||||
"webs"
|
||||
],
|
||||
"value": [
|
||||
"arr",
|
||||
"core",
|
||||
"falco",
|
||||
"kube-system",
|
||||
"media",
|
||||
"monitoring",
|
||||
"rook",
|
||||
"rook-cluster",
|
||||
"storage",
|
||||
"utilities",
|
||||
"webs"
|
||||
]
|
||||
},
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"definition": "",
|
||||
"description": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "namespace",
|
||||
"multi": true,
|
||||
"name": "namespace",
|
||||
"options": [],
|
||||
"query": {
|
||||
"label": "namespace",
|
||||
"refId": "LokiVariableQueryEditor-VariableQuery",
|
||||
"stream": "",
|
||||
"type": 1
|
||||
},
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
"type": "query"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "Loki",
|
||||
"value": "loki"
|
||||
},
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "datasource",
|
||||
"multi": false,
|
||||
"name": "datasource",
|
||||
"options": [],
|
||||
"query": "loki",
|
||||
"queryValue": "",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"type": "datasource"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"selected": true,
|
||||
"text": [
|
||||
"Critical"
|
||||
],
|
||||
"value": [
|
||||
"Critical"
|
||||
]
|
||||
},
|
||||
"datasource": {
|
||||
"type": "loki",
|
||||
"uid": "${datasource}"
|
||||
},
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "priority",
|
||||
"multi": true,
|
||||
"name": "priority",
|
||||
"options": [],
|
||||
"query": {
|
||||
"label": "priority",
|
||||
"refId": "LokiVariableQueryEditor-VariableQuery",
|
||||
"stream": "",
|
||||
"type": 1
|
||||
},
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
"type": "query"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "",
|
||||
"value": ""
|
||||
},
|
||||
"description": "Text to filter lines",
|
||||
"hide": 0,
|
||||
"label": "line_filter",
|
||||
"name": "line_filter",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "",
|
||||
"value": ""
|
||||
}
|
||||
],
|
||||
"query": "",
|
||||
"skipUrlSync": false,
|
||||
"type": "textbox"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-24h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "Falco logs",
|
||||
"uid": "de6ixj4nl1kowc",
|
||||
"version": 2,
|
||||
"weekStart": ""
|
||||
}
|
|
@ -22,7 +22,7 @@
|
|||
2. Get the URL for Falcosidekick-UI (WebUI) by running these commands:
|
||||
{{- if .Values.webui.ingress.enabled }}
|
||||
{{- range $host := .Values.webui.ingress.hosts }}
|
||||
http{{ if $.Values.webui.ingress.tls }}s{{ end }}://{{ $host.host }}{{ index .paths 0 }}
|
||||
http{{ if $.Values.webui.ingress.tls }}s{{ end }}://{{ $host.host }}{{ index $host.paths 0 }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.webui.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falcosidekick.fullname" . }})-ui
|
||||
|
|
|
@ -6,6 +6,13 @@ metadata:
|
|||
labels:
|
||||
{{- include "falcosidekick.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: core
|
||||
{{- with .Values.customLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "falcosidekick.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
|
|
|
@ -7,6 +7,13 @@ metadata:
|
|||
labels:
|
||||
{{- include "falcosidekick.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: core
|
||||
{{- with .Values.customLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{ $key := .Values.config.tlsserver.serverkey }}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue