Compare commits

..

No commits in common. "master" and "falcosidekick-0.1.31" have entirely different histories.

202 changed files with 8255 additions and 30437 deletions

39
.circleci/config.yml Normal file
View File

@ -0,0 +1,39 @@
version: 2.1
jobs:
lint-charts:
docker:
- image: quay.io/helmpack/chart-testing:latest
steps:
- checkout
- run:
name: lint
command: .circleci/lint.sh
release-charts:
docker:
- image: cimg/base:stable
steps:
- run:
name: checkout
command: |
git clone https://${GITHUB_TOKEN}@github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}.git .
- run:
name: release
command: |
echo "export CR_REPO_URL=https://falcosecurity.github.io/charts" >> $BASH_ENV
echo "export GIT_USERNAME=$CIRCLE_PROJECT_USERNAME" >> $BASH_ENV
echo "export GIT_REPOSITORY_NAME=$CIRCLE_PROJECT_REPONAME" >> $BASH_ENV
.circleci/install_tools.sh
.circleci/release.sh
workflows:
version: 2
release:
jobs:
- "lint-charts"
- release-charts:
context: falco
filters:
tags:
ignore: /.*/
branches:
only: master

20
.circleci/install_tools.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -o errexit
readonly HELM_VERSION=3.2.1
readonly CHART_RELEASER_VERSION=1.0.0-beta.1
echo "Installing Helm..."
curl -LO "https://get.helm.sh/helm-v$HELM_VERSION-linux-amd64.tar.gz"
sudo mkdir -p "/usr/local/helm-v$HELM_VERSION"
sudo tar -xzf "helm-v$HELM_VERSION-linux-amd64.tar.gz" -C "/usr/local/helm-v$HELM_VERSION"
sudo ln -s "/usr/local/helm-v$HELM_VERSION/linux-amd64/helm" /usr/local/bin/helm
rm -f "helm-v$HELM_VERSION-linux-amd64.tar.gz"
echo "Installing chart-releaser..."
curl -LO "https://github.com/helm/chart-releaser/releases/download/v${CHART_RELEASER_VERSION}/chart-releaser_${CHART_RELEASER_VERSION}_linux_amd64.tar.gz"
sudo mkdir -p "/usr/local/chart-releaser-v$CHART_RELEASER_VERSION"
sudo tar -xzf "chart-releaser_${CHART_RELEASER_VERSION}_linux_amd64.tar.gz" -C "/usr/local/chart-releaser-v$CHART_RELEASER_VERSION"
sudo ln -s "/usr/local/chart-releaser-v$CHART_RELEASER_VERSION/cr" /usr/local/bin/cr
rm -f "chart-releaser_${CHART_RELEASER_VERSION}_linux_amd64.tar.gz"

20
.circleci/lint.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env sh
set -o errexit
set -o nounset
set -o pipefail
readonly REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel)}"
main() {
cd "$REPO_ROOT" > /dev/null
echo "Linting charts..."
# iterate over all charts
for chart_config in */Chart.yaml; do
helm lint $(dirname $chart_config)
done
}
main

87
.circleci/release.sh Executable file
View File

@ -0,0 +1,87 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
: "${GITHUB_TOKEN:?Environment variable GITHUB_TOKEN must be set}"
: "${CR_REPO_URL:?Environment variable CR_REPO_URL must be set}"
: "${GIT_USERNAME:?Environment variable GIT_USERNAME must be set}"
: "${GIT_REPOSITORY_NAME:?Environment variable GIT_REPOSITORY_NAME must be set}"
readonly REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel)}"
export CR_TOKEN="$GITHUB_TOKEN"
main() {
pushd "$REPO_ROOT" > /dev/null
echo "Fetching tags..."
git fetch --tags
echo "Fetching charts..."
local changed_charts=()
# iterate over all charts and skip those that already have a tag matching the current version
for chart_config in */Chart.yaml; do
local chart_name=$(cat $chart_config | awk '/^name: /{print $NF}')
local chart_ver=$(cat $chart_config | awk '/^version: /{print $NF}')
local tag="${chart_name}-${chart_ver}"
if git rev-parse "$tag" >/dev/null 2>&1; then
echo "Chart '$chart_name': tag '$tag' already exists, skipping."
else
echo "Chart '$chart_name': new version '$chart_ver' detected."
local changed_charts+=($chart_name)
fi
done
# preparing dirs
rm -rf .cr-release-packages
mkdir -p .cr-release-packages
rm -rf .cr-index
mkdir -p .cr-index
# only release those charts for which a new version has been detected
if [[ -n "${changed_charts[*]}" ]]; then
for chart in "${changed_charts[@]}"; do
echo "Packaging chart '$chart'..."
package_chart "$chart"
done
release_charts
# the newly created GitHub releases may not be available yet; let's wait a bit to be sure.
sleep 5
update_index
else
echo "Nothing to do. No chart changes detected."
fi
popd > /dev/null
}
package_chart() {
local chart="$1"
helm package "$chart" --destination .cr-release-packages --dependency-update
}
release_charts() {
cr upload -o "$GIT_USERNAME" -r "$GIT_REPOSITORY_NAME"
}
update_index() {
cr index -o "$GIT_USERNAME" -r "$GIT_REPOSITORY_NAME" -c "$CR_REPO_URL"
git config user.email "poiana@users.noreply.github.com"
git config user.name "poiana"
git checkout gh-pages
cp --force .cr-index/index.yaml index.yaml
git add index.yaml
git commit --message="Update index.yaml" --signoff
git push origin gh-pages
}
main

View File

@ -35,13 +35,9 @@ Please remove the leading whitespace before the `/kind <>` you uncommented.
> /area falco-chart
> /area falcosidekick-chart
> /area falco-exporter-chart
> /area falco-talon-chart
> /area event-generator-chart
> /area k8s-metacollector-chart
> /area falcosidekick
<!--
Please remove the leading whitespace before the `/area <>` you uncommented.
@ -61,7 +57,6 @@ Fixes #
**Special notes for your reviewer**:
**Checklist**
<!--
Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.

View File

@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@ -1,35 +0,0 @@
name: Check Helm Docs
on:
pull_request:
jobs:
readme:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run Helm Docs and check the outcome
run: |
for chart in $(ls ./charts); do
docker run \
--rm \
--workdir=/helm-docs \
--volume "$(pwd):/helm-docs" \
-u $(id -u) \
jnorwood/helm-docs:v1.11.0 \
helm-docs -c ./charts/$chart -t ./README.gotmpl -o ./README.md
done
exit_code=$(git diff --exit-code)
exit ${exit_code}
- name: Print a comment in case of failure
run: |
echo "The README.md files are not up to date.
Please, run \"make docs\" before pushing."
exit 1
if: |
failure() && github.event.pull_request.head.repo.full_name == github.repository

View File

@ -1,24 +0,0 @@
name: Links
on:
push:
branches:
- main
- master
pull_request:
jobs:
linkChecker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Link Checker
uses: lycheeverse/lychee-action@5c4ee84814c983aa7164eaee476f014e53ff3963 #v2.5.0
with:
args: --no-progress './**/*.yml' './**/*.yaml' './**/*.md' './**/*.gotmpl' './**/*.tpl' './**/OWNERS' './**/LICENSE'
token: ${{ secrets.GITHUB_TOKE }}
fail: true

View File

@ -1,66 +0,0 @@
name: Release Charts
on:
push:
branches:
- main
- master
paths:
- "charts/**"
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
id-token: write
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install Cosign
uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
- name: Add dependency chart repos
run: |
helm repo add falcosecurity https://falcosecurity.github.io/charts
- name: Run chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
with:
charts_dir: charts
config: cr.yaml
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Publish and Sign OCI Charts
run: |
for chart in `find .cr-release-packages -name '*.tgz' -print`; do
helm push ${chart} oci://ghcr.io/${GITHUB_REPOSITORY} |& tee helm-push-output.log
file_name=${chart##*/}
chart_name=${file_name%-*}
digest=$(awk -F "[, ]+" '/Digest/{print $NF}' < helm-push-output.log)
cosign sign "ghcr.io/${GITHUB_REPOSITORY}/${chart_name}@${digest}"
done
env:
COSIGN_YES: true

View File

@ -1,74 +0,0 @@
name: Test Charts
on:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: "3.14.0"
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- name: Set up chart-testing
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
- name: Run chart-testing (lint)
run: ct lint --config ct.yaml
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ct.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Create KIND Cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
with:
config: ./tests/kind-config.yaml
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --exclude-deprecated --config ct.yaml
go-unit-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: "3.10.3"
- name: Update repo deps
run: helm dependency update ./charts/falco
- name: Setup Go
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
with:
go-version: "1.21"
check-latest: true
- name: K8s-metacollector unit tests
run: go test ./charts/k8s-metacollector/tests/unit/...
- name: Falco unit tests
run: go test ./charts/falco/tests/unit/...

6
.gitignore vendored
View File

@ -1,6 +0,0 @@
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
.vscode

View File

@ -1,22 +0,0 @@
nats:/host:port
https://yds.serverless.yandexcloud.net/
http:/host:port
https://chat.googleapis.com/v1/spaces/XXXXXX/YYYYYY
https://xxxx/hooks/YYYY
https://cliq.zoho.eu/api/v2/channelsbyname/XXXX/message*
https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY
https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY
https://discord.com/api/webhooks/xxxxxxxxxx
http://kafkarest:8082/topics/test
https://api.spyderbat.com/
https://hooks.slack.com/services/XXXX/YYYY/ZZZZ
http://\{domain*
https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick
http://some.url/some/path/
https://localhost:32765/k8s-audit
https://some.url/some/path/
http://localhost:8765/versions
https://environmentid.live.dynatrace.com/api
https://yourdomain/e/ENVIRONMENTID/api
http://falco-talon:2803
https://http-intake.logs.datadoghq.com/

View File

@ -1,40 +0,0 @@
DOCS_IMAGE_VERSION="v1.11.0"
LINT_IMAGE_VERSION="v3.8.0"
# Charts's path relative to the current directory.
CHARTS := $(wildcard ./charts/*)
CHARTS_NAMES := $(notdir $(CHARTS))
.PHONY: lint
lint: helm-deps-update $(addprefix lint-, $(CHARTS_NAMES))
lint-%:
@docker run \
-it \
-e HOME=/home/ct \
--mount type=tmpfs,destination=/home/ct \
--workdir=/data \
--volume $$(pwd):/data \
-u $$(id -u) \
quay.io/helmpack/chart-testing:$(LINT_IMAGE_VERSION) \
ct lint --config ./ct.yaml --charts ./charts/$*
.PHONY: docs
docs: $(addprefix docs-, $(CHARTS_NAMES))
docs-%:
@docker run \
--rm \
--workdir=/helm-docs \
--volume "$$(pwd):/helm-docs" \
-u $$(id -u) \
jnorwood/helm-docs:$(DOCS_IMAGE_VERSION) \
helm-docs -c ./charts/$* -t ./README.gotmpl -o ./README.md
.PHONY: helm-deps-update
helm-deps-update: $(addprefix helm-deps-update-, $(CHARTS_NAMES))
helm-deps-update-%:
helm dependency update ./charts/$*

19
OWNERS
View File

@ -1,13 +1,16 @@
approvers:
- leogr
- Issif
- cpanato
- alacuku
- ekoops
reviewers:
- bencer
emeritus_approvers:
- leodido
- fntlnz
- kris-nova
- leogr
- nibalizer
- Issif
reviewers:
- leodido
- fntlnz
- kris-nova
- leogr
- nibalizer
- nestorsalceda
- bencer
- Issif

View File

@ -1,10 +1,8 @@
# Falco Helm Charts
[![Falco Core Repository](https://github.com/falcosecurity/evolution/blob/main/repos/badges/falco-core-blue.svg)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#core-scope) [![Stable](https://img.shields.io/badge/status-stable-brightgreen?style=for-the-badge)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#stable) [![License](https://img.shields.io/github/license/falcosecurity/charts?style=for-the-badge)](./LICENSE)
This GitHub project is the source for our [Helm chart repository](https://v3.helm.sh/docs/topics/chart_repository/).
This GitHub project is the source for the [Falco](https://github.com/falcosecurity/falco) Helm chart repository that you can use to [deploy](https://falco.org/docs/getting-started/deployment/) Falco in your Kubernetes infrastructure.
The purpose of this repository is to provide a place for maintaining and contributing Charts related to the Falco project, with CI processes in place for managing the releasing of Charts into [our Helm Chart Repository](https://falcosecurity.github.io/charts).
The purpose of this repository is to provide a place for maintaining and contributing Charts related to the Falco project, with CI processes in place for managing the releasing of Charts into [our Helm Chart Repository]((https://falcosecurity.github.io/charts)).
For more information about installing and using Helm, see the
[Helm Docs](https://helm.sh/docs/).
@ -12,21 +10,18 @@ For more information about installing and using Helm, see the
## Repository Structure
This GitHub repository contains the source for the packaged and versioned charts released to [https://falcosecurity.github.io/charts](https://falcosecurity.github.io/charts) (our Helm Chart Repository).
We also, are publishing the charts in a OCI Image and it is hosted in [GitHub Packages](https://github.com/orgs/falcosecurity/packages?repo_name=charts)
The Charts in this repository are organized into folders: each directory that contains a `Chart.yaml` is a chart.
The Charts in the `master` branch (with a corresponding [GitHub release](https://github.com/falcosecurity/charts/releases)) match the latest packaged Charts in [our Helm Chart Repository](https://falcosecurity.github.io/charts), though there may be previous versions of a Chart available in that Chart Repository.
The Charts in the `master` branch (with a corresponding [GitHub release](https://github.com/falcosecurity/charts/releases)) match the latest packaged Charts in [our Helm Chart Repository]((https://falcosecurity.github.io/charts)), though there may be previous versions of a Chart available in that Chart Repository.
## Charts
Charts currently available are listed below.
- [falco](./charts/falco)
- [falcosidekick](./charts/falcosidekick)
- [event-generator](./charts/event-generator)
- [k8s-metacollector](./charts/k8s-metacollector)
- [falco-talon](./charts/falco-talon)
- [falco](falco)
- [falco-exporter](falco-exporter)
- [falcosidekick](falcosidekick)
## Usage
@ -41,19 +36,4 @@ helm repo update
### Installing a chart
Please refer to the instruction provided by the Chart you want to install. For installing Falco via Helm, the documentation is [here](https://github.com/falcosecurity/charts/tree/master/charts/falco#adding-falcosecurity-repository).
## Contributing
We are glad to receive your contributions. To help you in the process, we have prepared a [CONTRIBUTING.md](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md), which includes detailed information on contributing to `falcosecurity` projects. Furthermore, we implemented a mechanism to automatically release and publish our charts whenever a PR is merged (if you are curious how this process works, you can find more details in our [release.md](release.md)).
So, we ask you to follow these simple steps when making your PR:
- The [DCO](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md#developer-certificate-of-origin) is required to contribute to a `falcosecurity` project. So ensure that all your commits have been signed off. We will not be able to merge the PR if a commit is not signed off.
- Bump the version number of the chart by modifying the `version` value in the chart's `Chart.yaml` file. This is particularly important, as it allows our CI to release a new chart version. If the version has not been increased, we will not be able to merge the PR.
- Add a new section in the chart's `CHANGELOG.md` file with the new version number of the chart.
- If your changes affect any chart variables, please update the chart's `README.gotmpl` file accordingly and run `make docs` in the main folder.
Finally, when opening your PR, please fill in the provided PR template, including the final checklist of items to indicate that all the steps above have been performed.
If you have any questions, please feel free to contact us via [GitHub issues](https://github.com/falcosecurity/charts/issues).
Please refer to the instruction provided by the Chart you want to install. For installing Falco via Helm, the documentation is [here](https://github.com/falcosecurity/charts/tree/master/falco#adding-falcosecurity-repository).

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,49 +0,0 @@
# Change Log
This file documents all notable changes to `event-generator` Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## v0.3.4
* Pass `--all` flag to event-generator binary to allow disabled rules to run, e.g. the k8saudit ruleset.
## v0.3.3
* Update README.md.
## v0.3.2
* no change to the chart itself. Updated README.md and makefile.
## v0.3.1
* noop change just to test the ci
## v0.3.0
## Major Changes
* Support configuration of revisionHistoryLimit of the deployment
## v0.2.0
## Major Changes
* Changing the grpc socket path from `unix:///var/run/falco/falco.soc` to `unix:///run/falco/falco.sock`. Please note that this change is potentially a breaking change if deploying the event generator alongside Falco < 0.33.0.
### Minor Changes
* Bump event-generator to 0.10.0
## v0.1.1
### Minor Changes
* Adding `-sleep` flag to the `pod-template.yaml`
## v0.1.0
### Major Changes
* Initial release of event-generator Helm Chart

View File

@ -1,123 +0,0 @@
# Event-generator
[event-generator](https://github.com/falcosecurity/event-generator) is a tool designed to generate events for both syscalls and k8s audit. The tool can be used to check if Falco is working properly. It does so by performing a variety of suspects actions which trigger security events. The event-event generator implements a [minimalistic framework](https://github.com/falcosecurity/event-generator/tree/master/events) which makes easy to implement new actions.
## Introduction
This chart helps to deploy the event-generator in a kubernetes cluster in order to test an already deployed Falco instance.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with default values and release name `event-generator` run:
```bash
helm install event-generator falcosecurity/event-generator
```
After a few seconds, event-generator should be running in the `default` namespace.
In order to install the event-generator in a custom namespace run:
```bash
# change the name of the namespace to fit your requirements.
kubectl create ns "ns-event-generator"
helm install event-generator falcosecurity/event-generator --namespace "ns-event-generator"
```
When the event-generator is installed using the default values in `values.yaml` file it is deployed using a k8s job, running the `run` command and, generates activity only for the k8s audit.
For more info check the next section.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
### Commands, actions and options
The event-generator tool accepts two commands: `run` and `test`. The first just generates activity, the later one, which is more sophisticated, also checks that for each generated activity Falco triggers the expected rule. Both of them accepts an argument that determines the actions to be performed:
```bash
event-generator run/test [regexp]
```
Without arguments, all actions are performed; otherwise, only those actions matching the given regular expression. If we want to `test` just the actions related to k8s the following command does the trick:
```bash
event-generator test ^k8saudit
```
The list of the supported actions can be found [here](https://github.com/falcosecurity/event-generator#list-actions)
Before diving in how this helm chart deploys and manages instances of the event-generator in kubernetes there are two more options that we need to talk about:
+ `--loop` to run actions in a loop
+ `--sleep` to set the length of time to wait before running an action (default to 1s)
### Deployment modes in k8s
Based on commands, actions and options configured the event-generator could be deployed as a k8s `job` or `deployment`. If the `config.loop` value is set a `deployment` is used since it is long running process, otherwise a `job`.
A configuration like the one below, set in the `values.yaml` file, will deploy the even-generator using a `deployment` with the `run` command passed to it and will will generate activity only for the syscalls:
```yaml
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator
command: run
# -- Regular expression used to select the actions to be run.
actions: "^syscall"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: true
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: false
# -- Path to the Falco grpc socket.
bindAddress: "unix:///var/run/falco/falco.sock"
```
The following configuration will use a k8s `job` since we want to perform the k8s activity once and check that Falco reacts properly to those actions:
```yaml
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator
command: test
# -- Regular expression used to select the actions to be run.
actions: "^k8saudit"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: false
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: true
# -- Path to the Falco grpc socket.
bindAddress: "unix:///var/run/falco/falco.sock"
```
Note that **grpc.enabled is set to true when running with the test command. Be sure that Falco exposes the grpc socket and emits output to it**.
## Uninstalling the Chart
To uninstall the `event-generator` release:
```bash
helm uninstall event-generator
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the {{ template "chart.name" . }} chart v{{ template "chart.version" . }} and their default values. See `values.yaml` for full list.
{{ template "chart.valuesSection" . }}

View File

@ -1,145 +0,0 @@
# Event-generator
[event-generator](https://github.com/falcosecurity/event-generator) is a tool designed to generate events for both syscalls and k8s audit. The tool can be used to check if Falco is working properly. It does so by performing a variety of suspects actions which trigger security events. The event-event generator implements a [minimalistic framework](https://github.com/falcosecurity/event-generator/tree/master/events) which makes easy to implement new actions.
## Introduction
This chart helps to deploy the event-generator in a kubernetes cluster in order to test an already deployed Falco instance.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with default values and release name `event-generator` run:
```bash
helm install event-generator falcosecurity/event-generator
```
After a few seconds, event-generator should be running in the `default` namespace.
In order to install the event-generator in a custom namespace run:
```bash
# change the name of the namespace to fit your requirements.
kubectl create ns "ns-event-generator"
helm install event-generator falcosecurity/event-generator --namespace "ns-event-generator"
```
When the event-generator is installed using the default values in `values.yaml` file it is deployed using a k8s job, running the `run` command and, generates activity only for the k8s audit.
For more info check the next section.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
### Commands, actions and options
The event-generator tool accepts two commands: `run` and `test`. The first just generates activity, the later one, which is more sophisticated, also checks that for each generated activity Falco triggers the expected rule. Both of them accepts an argument that determines the actions to be performed:
```bash
event-generator run/test [regexp]
```
Without arguments, all actions are performed; otherwise, only those actions matching the given regular expression. If we want to `test` just the actions related to k8s the following command does the trick:
```bash
event-generator test ^k8saudit
```
The list of the supported actions can be found [here](https://github.com/falcosecurity/event-generator#list-actions)
Before diving in how this helm chart deploys and manages instances of the event-generator in kubernetes there are two more options that we need to talk about:
+ `--loop` to run actions in a loop
+ `--sleep` to set the length of time to wait before running an action (default to 1s)
### Deployment modes in k8s
Based on commands, actions and options configured the event-generator could be deployed as a k8s `job` or `deployment`. If the `config.loop` value is set a `deployment` is used since it is long running process, otherwise a `job`.
A configuration like the one below, set in the `values.yaml` file, will deploy the even-generator using a `deployment` with the `run` command passed to it and will will generate activity only for the syscalls:
```yaml
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator
command: run
# -- Regular expression used to select the actions to be run.
actions: "^syscall"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: true
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: false
# -- Path to the Falco grpc socket.
bindAddress: "unix:///var/run/falco/falco.sock"
```
The following configuration will use a k8s `job` since we want to perform the k8s activity once and check that Falco reacts properly to those actions:
```yaml
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator
command: test
# -- Regular expression used to select the actions to be run.
actions: "^k8saudit"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: false
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: true
# -- Path to the Falco grpc socket.
bindAddress: "unix:///var/run/falco/falco.sock"
```
Note that **grpc.enabled is set to true when running with the test command. Be sure that Falco exposes the grpc socket and emits output to it**.
## Uninstalling the Chart
To uninstall the `event-generator` release:
```bash
helm uninstall event-generator
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the event-generator chart v0.3.4 and their default values. See `values.yaml` for full list.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity, like the nodeSelector but with more expressive syntax. |
| config.actions | string | `"^syscall"` | Regular expression used to select the actions to be run. |
| config.command | string | `"run"` | The event-generator accepts two commands (run, test): run: runs actions. test: runs and tests actions. For more info see: https://github.com/falcosecurity/event-generator. |
| config.grpc.bindAddress | string | `"unix:///run/falco/falco.sock"` | Path to the Falco grpc socket. |
| config.grpc.enabled | bool | `false` | Set it to true if you are deploying in "test" mode. |
| config.loop | bool | `true` | Runs in a loop the actions. If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job. |
| config.sleep | string | `""` | The length of time to wait before running an action. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms) |
| fullnameOverride | string | `""` | Used to override the chart full name. |
| image | object | `{"pullPolicy":"IfNotPresent","repository":"falcosecurity/event-generator","tag":"latest"}` | Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) revisionHistoryLimit: 1 |
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the event-generator image |
| image.repository | string | `"falcosecurity/event-generator"` | Repository from where the image is pulled. |
| image.tag | string | `"latest"` | Images' tag to select a development/custom version of event-generator instead of a release. Overrides the image tag whose default is the chart appVersion. |
| imagePullSecrets | list | `[]` | Secrets used to pull the image from a private repository. |
| nameOverride | string | `""` | Used to override the chart name. |
| nodeSelector | object | `{}` | Selectors to choose a given node where to run the pods. |
| podAnnotations | object | `{}` | Annotations to be added to the pod. |
| podSecurityContext | object | `{}` | Security context for the pod. |
| replicasCount | int | `1` | Number of replicas of the event-generator (meaningful when installed as a deployment). |
| securityContext | object | `{}` | Security context for the containers. |
| tolerations | list | `[]` | Tolerations to allow the pods to be scheduled on nodes whose taints the pod tolerates. |

View File

@ -1,69 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "event-generator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "event-generator.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "event-generator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "event-generator.labels" -}}
helm.sh/chart: {{ include "event-generator.chart" . }}
{{ include "event-generator.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "event-generator.selectorLabels" -}}
app.kubernetes.io/name: {{ include "event-generator.name" . | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/part-of: {{ include "event-generator.name" . | quote }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "event-generator.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "event-generator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{- define "grpc.unixSocketDir" -}}
{{- if and .Values.config.grpc.enabled .Values.config.grpc.bindAddress (hasPrefix "unix://" .Values.config.grpc.bindAddress) -}}
{{- .Values.config.grpc.bindAddress | trimPrefix "unix://" | dir -}}
{{- end -}}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if .Values.config.loop -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "event-generator.fullname" . }}
labels:
{{- include "event-generator.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicasCount }}
{{- if .Values.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- end }}
selector:
matchLabels:
{{- include "event-generator.selectorLabels" . | nindent 6 }}
template:
{{- include "event-generator.podTemplate" . | nindent 4 }}
{{- end -}}

View File

@ -1,12 +0,0 @@
{{- if not .Values.config.loop -}}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "event-generator.fullname" . }}
labels:
{{- include "event-generator.labels" . | nindent 4 }}
spec:
backoffLimit: 1
template:
{{- include "event-generator.podTemplate" . | nindent 4 }}
{{- end -}}

View File

@ -1,70 +0,0 @@
{{- define "event-generator.podTemplate" -}}
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "event-generator.selectorLabels" . | nindent 4 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}
{{- end }}
serviceAccountName: {{ include "event-generator.fullname" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 4 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 8 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/event-generator
- {{ .Values.config.command }}
- --all
{{- if .Values.config.actions }}
- {{ .Values.config.actions }}
{{- end }}
{{- if .Values.config.loop }}
- --loop
{{- end }}
{{- if .Values.config.sleep }}
- --sleep={{- .Values.config.sleep }}
{{- end }}
{{- if .Values.config.grpc.enabled }}
- --grpc-unix-socket={{- .Values.config.grpc.bindAddress }}
{{- end }}
env:
- name: FALCO_EVENT_GENERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.config.grpc.enabled }}
volumeMounts:
- mountPath: {{ include "grpc.unixSocketDir" . }}
name: unix-socket-dir
{{- end }}
{{- if .Values.config.grpc.enabled }}
volumes:
- hostPath:
path: {{ include "grpc.unixSocketDir" . }}
name: unix-socket-dir
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if not .Values.config.loop }}
restartPolicy: Never
{{- end }}
{{- end -}}

View File

@ -1,76 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "event-generator.fullname" . }}
labels:
{{- include "event-generator.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "event-generator.fullname" . }}
labels:
{{- include "event-generator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- services
- serviceaccounts
- pods
verbs:
- list
- get
- create
- delete
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- list
- get
- create
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- get
- list
- create
- delete
# These are only so the event generator can create roles that have these properties.
# It will result in a falco alert for the rules "ClusterRole With Wildcard Created", "ClusterRole With Pod Exec Created"
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- get
- apiGroups:
- ""
resources:
- '*'
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "event-generator.fullname" . }}
labels:
{{- include "event-generator.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "event-generator.fullname" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "event-generator.fullname" . }}
apiGroup: rbac.authorization.k8s.io
---

View File

@ -1,71 +0,0 @@
# Default values for event-generator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Number of replicas of the event-generator (meaningful when installed as a deployment).
replicasCount: 1
# -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
image:
# -- Repository from where the image is pulled.
repository: falcosecurity/event-generator
# -- Pull policy for the event-generator image
pullPolicy: IfNotPresent
# -- Images' tag to select a development/custom version of event-generator instead of a release.
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
# -- Secrets used to pull the image from a private repository.
imagePullSecrets: []
# -- Used to override the chart name.
nameOverride: ""
# -- Used to override the chart full name.
fullnameOverride: ""
# -- Annotations to be added to the pod.
podAnnotations: {}
# -- Security context for the pod.
podSecurityContext: {}
# fsGroup: 2000
# -- Security context for the containers.
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# -- Selectors to choose a given node where to run the pods.
nodeSelector: {}
# -- Tolerations to allow the pods to be scheduled on nodes whose taints the pod tolerates.
tolerations: []
# -- Affinity, like the nodeSelector but with more expressive syntax.
affinity: {}
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator.
command: run
# -- Regular expression used to select the actions to be run.
actions: "^syscall"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: true
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: false
# -- Path to the Falco grpc socket.
bindAddress: "unix:///run/falco/falco.sock"

View File

@ -1,39 +0,0 @@
# Change Log
This file documents all notable changes to Falco Talon Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## 0.3.0 - 2024-02-07
- bump up version to `v0.3.0`
- fix missing usage of the `imagePullSecrets`
## 0.2.3 - 2024-12-18
- add a Grafana dashboard for the Prometheus metrics
## 0.2.1 - 2024-12-09
- bump up version to `v0.2.1` for bug fixes
## 0.2.0 - 2024-11-26
- configure pod to not rollout on configmap change
- configure pod to rollout on secret change
- add config.rulesOverride allowing users to override config rules
## 0.1.3 - 2024-11-08
- change the key for the range over the rules files
## 0.1.2 - 2024-10-14
- remove all refs to the previous org
## 0.1.1 - 2024-10-01
- Use version `0.1.1`
- Fix wrong port for the `serviceMonitor`
## 0.1.0 - 2024-09-05
- First release

View File

@ -1,18 +0,0 @@
apiVersion: v1
appVersion: 0.3.0
description: React to the events from Falco
name: falco-talon
version: 0.3.0
keywords:
- falco
- monitoring
- security
- response-engine
home: https://github.com/falcosecurity/falco-talon
sources:
- https://github.com/falcosecurity/falco-talon
maintainers:
- name: Issif
email: issif+github@gadz.org
- name: IgorEulalio
email: igoreulalio.ie@gmail.com

View File

@ -1,76 +0,0 @@
# Falco Talon
![release](https://flat.badgen.net/github/release/falcosecurity/falco-talon/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falco-talon) ![licence](https://flat.badgen.net/badge/license/Apache2.0/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/issif/falco-talon?icon=docker)
## Description
`Falco Talon` is a Response Engine for managing threats in your Kubernetes. It enhances the solutions proposed by the Falco community with a no-code tailor made solution. With easy rules, you can react to `events` from [`Falco`](https://falco.org) in milliseconds.
## Architecture
`Falco Talon` can receive the `events` from [`Falco`](https://falco.org) or [`Falcosidekick`](https://github.com/falcosecurity/falcosidekick):
```mermaid
flowchart LR
falco
falcosidekick
falco-talon
falco -- event --> falcosidekick
falco -- event --> falco-talon
falcosidekick -- event --> falco-talon
kubernetes -- context --> falco-talon
falco-talon -- action --> aws
falco-talon -- output --> minio
falco-talon -- action --> kubernetes
falco-talon -- notification --> slack
```
## Documentation
The full documentation is available on its own website: [https://docs.falco-talon.org/docs](https://docs.falco-talon.org/docs).
## Installation
```shell
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
helm install falco-talon falcosecurity/falco-talon -n falco --create-namespace -f values.yaml
```
### Update the rules
Update `rules.yaml` then:
```
helm upgrade falco-talon falcosecurity/falco-talon -n falco -f values.yaml
```
### Uninstall Falco Talon
```
helm delete falco-talon -n falco
````
## Configuration
{{ template "chart.valuesSection" . }}
## Connect Falcosidekick
Once you have installed `Falco Talon` with Helm, you need to connect `Falcosidekick` by adding the flag `--set falcosidekick.config.webhook.address=http://falco-talon:2803`
```shell
helm upgrade -i falco falcosecurity/falco --namespace falco \
--create-namespace \
--set tty=true \
--set falcosidekick.enabled=true \
--set falcosidekick.config.talon.address=http://falco-talon:2803
```
## License
Falco Talon is licensed to you under the **Apache 2.0** open source license.
## Author
Thomas Labarussias (https://github.com/Issif)

View File

@ -1,184 +0,0 @@
# Falco Talon
![release](https://flat.badgen.net/github/release/falcosecurity/falco-talon/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falco-talon) ![licence](https://flat.badgen.net/badge/license/Apache2.0/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/issif/falco-talon?icon=docker)
## Description
`Falco Talon` is a Response Engine for managing threats in your Kubernetes. It enhances the solutions proposed by the Falco community with a no-code tailor made solution. With easy rules, you can react to `events` from [`Falco`](https://falco.org) in milliseconds.
## Architecture
`Falco Talon` can receive the `events` from [`Falco`](https://falco.org) or [`Falcosidekick`](https://github.com/falcosecurity/falcosidekick):
```mermaid
flowchart LR
falco
falcosidekick
falco-talon
falco -- event --> falcosidekick
falco -- event --> falco-talon
falcosidekick -- event --> falco-talon
kubernetes -- context --> falco-talon
falco-talon -- action --> aws
falco-talon -- output --> minio
falco-talon -- action --> kubernetes
falco-talon -- notification --> slack
```
## Documentation
The full documentation is available on its own website: [https://docs.falco-talon.org/docs](https://docs.falco-talon.org/docs).
## Installation
```shell
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
helm install falco-talon falcosecurity/falco-talon -n falco --create-namespace -f values.yaml
```
### Update the rules
Update `rules.yaml` then:
```
helm upgrade falco-talon falcosecurity/falco-talon -n falco -f values.yaml
```
### Uninstall Falco Talon
```
helm delete falco-talon -n falco
````
## Configuration
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | affinity |
| config | object | `{"aws":{"accesKey":"","externalId":"","region":"","roleArn":"","secretKey":""},"deduplication":{"leaderElection":true,"timeWindowSeconds":5},"defaultNotifiers":["k8sevents"],"listenAddress":"0.0.0.0","listenPort":2803,"minio":{"accessKey":"","endpoint":"","secretKey":"","useSsl":false},"notifiers":{"elasticsearch":{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""},"loki":{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""},"slack":{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""},"smtp":{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""},"webhook":{"url":""}},"otel":{"collectorEndpoint":"","collectorPort":4317,"collectorUseInsecureGrpc":false,"metricsEnabled":false,"tracesEnabled":false},"printAllEvents":false,"rulesOverride":"- action: Terminate Pod\n actionner: kubernetes:terminate\n parameters:\n ignore_daemonsets: true\n ignore_statefulsets: true\n grace_period_seconds: 20\n","watchRules":true}` | config of Falco Talon (See https://docs.falco-talon.org/docs/configuration/) |
| config.aws | object | `{"accesKey":"","externalId":"","region":"","roleArn":"","secretKey":""}` | aws |
| config.aws.accesKey | string | `""` | access key (if not specified, default access_key from provider credential chain will be used) |
| config.aws.externalId | string | `""` | external id |
| config.aws.region | string | `""` | region (if not specified, default region from provider credential chain will be used) |
| config.aws.roleArn | string | `""` | role arn |
| config.aws.secretKey | string | `""` | secret key (if not specified, default secret_key from provider credential chain will be used) |
| config.deduplication | object | `{"leaderElection":true,"timeWindowSeconds":5}` | deduplication of the Falco events |
| config.deduplication.leaderElection | bool | `true` | enable the leader election for cluster mode |
| config.deduplication.timeWindowSeconds | int | `5` | duration in seconds for the deduplication time window |
| config.defaultNotifiers | list | `["k8sevents"]` | default notifiers for all rules |
| config.listenAddress | string | `"0.0.0.0"` | listen address |
| config.listenPort | int | `2803` | listen port |
| config.minio | object | `{"accessKey":"","endpoint":"","secretKey":"","useSsl":false}` | minio |
| config.minio.accessKey | string | `""` | access key |
| config.minio.endpoint | string | `""` | endpoint |
| config.minio.secretKey | string | `""` | secret key |
| config.minio.useSsl | bool | `false` | use ssl |
| config.notifiers | object | `{"elasticsearch":{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""},"loki":{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""},"slack":{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""},"smtp":{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""},"webhook":{"url":""}}` | notifiers (See https://docs.falco-talon.org/docs/notifiers/list/ for the settings) |
| config.notifiers.elasticsearch | object | `{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""}` | elasticsearch |
| config.notifiers.elasticsearch.createIndexTemplate | bool | `true` | create the index template |
| config.notifiers.elasticsearch.numberOfReplicas | int | `1` | number of replicas |
| config.notifiers.elasticsearch.numberOfShards | int | `1` | number of shards |
| config.notifiers.elasticsearch.url | string | `""` | url |
| config.notifiers.loki | object | `{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""}` | loki |
| config.notifiers.loki.apiKey | string | `""` | api key |
| config.notifiers.loki.customHeaders | list | `[]` | custom headers |
| config.notifiers.loki.hostPort | string | `""` | host:port |
| config.notifiers.loki.tenant | string | `""` | tenant |
| config.notifiers.loki.user | string | `""` | user |
| config.notifiers.slack | object | `{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""}` | slack |
| config.notifiers.slack.footer | string | `"https://github.com/falcosecurity/falco-talon"` | footer |
| config.notifiers.slack.format | string | `"long"` | format |
| config.notifiers.slack.icon | string | `"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg"` | icon |
| config.notifiers.slack.username | string | `"Falco Talon"` | username |
| config.notifiers.slack.webhookUrl | string | `""` | webhook url |
| config.notifiers.smtp | object | `{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""}` | smtp |
| config.notifiers.smtp.format | string | `"html"` | format |
| config.notifiers.smtp.from | string | `""` | from |
| config.notifiers.smtp.hostPort | string | `""` | host:port |
| config.notifiers.smtp.password | string | `""` | password |
| config.notifiers.smtp.tls | bool | `false` | enable tls |
| config.notifiers.smtp.to | string | `""` | to |
| config.notifiers.smtp.user | string | `""` | user |
| config.notifiers.webhook | object | `{"url":""}` | webhook |
| config.notifiers.webhook.url | string | `""` | url |
| config.otel | object | `{"collectorEndpoint":"","collectorPort":4317,"collectorUseInsecureGrpc":false,"metricsEnabled":false,"tracesEnabled":false}` | open telemetry parameters |
| config.otel.collectorEndpoint | string | `""` | collector endpoint |
| config.otel.collectorPort | int | `4317` | collector port |
| config.otel.collectorUseInsecureGrpc | bool | `false` | use insecure grpc |
| config.otel.metricsEnabled | bool | `false` | enable otel metrics |
| config.otel.tracesEnabled | bool | `false` | enable otel traces |
| config.printAllEvents | bool | `false` | print in stdout all received events, not only those which match a rule |
| config.watchRules | bool | `true` | auto reload the rules when the files change |
| extraEnv | list | `[{"name":"LOG_LEVEL","value":"warning"}]` | extra env |
| grafana | object | `{"dashboards":{"configMaps":{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}},"enabled":false}}` | grafana contains the configuration related to grafana. |
| grafana.dashboards | object | `{"configMaps":{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}},"enabled":false}` | dashboards contains configuration for grafana dashboards. |
| grafana.dashboards.configMaps | object | `{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}}` | configmaps to be deployed that contain a grafana dashboard. |
| grafana.dashboards.configMaps.talon | object | `{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}` | falco-talon contains the configuration for falco talon's dashboard. |
| grafana.dashboards.configMaps.talon.folder | string | `""` | folder where the dashboard is stored by grafana. |
| grafana.dashboards.configMaps.talon.name | string | `"falco-talon-grafana-dashboard"` | name specifies the name for the configmap. |
| grafana.dashboards.configMaps.talon.namespace | string | `""` | namespace specifies the namespace for the configmap. |
| grafana.dashboards.enabled | bool | `false` | enabled specifies whether the dashboards should be deployed. |
| image | object | `{"pullPolicy":"Always","registry":"falco.docker.scarf.sh","repository":"falcosecurity/falco-talon","tag":""}` | image parameters |
| image.pullPolicy | string | `"Always"` | The image pull policy |
| image.registry | string | `"falco.docker.scarf.sh"` | The image registry to pull from |
| image.repository | string | `"falcosecurity/falco-talon"` | The image repository to pull from |
| image.tag | string | `""` | Override the image tag to pull |
| imagePullSecrets | list | `[]` | one or more secrets to be used when pulling images |
| ingress | object | `{"annotations":{},"enabled":false,"hosts":[{"host":"falco-talon.local","paths":[{"path":"/"}]}],"tls":[]}` | ingress parameters |
| ingress.annotations | object | `{}` | annotations of the ingress |
| ingress.enabled | bool | `false` | enable the ingress |
| ingress.hosts | list | `[{"host":"falco-talon.local","paths":[{"path":"/"}]}]` | hosts |
| ingress.tls | list | `[]` | tls |
| nameOverride | string | `""` | override name |
| nodeSelector | object | `{}` | node selector |
| podAnnotations | object | `{}` | pod annotations |
| podSecurityContext | object | `{"fsGroup":1234,"runAsUser":1234}` | pod security context |
| podSecurityContext.fsGroup | int | `1234` | group |
| podSecurityContext.runAsUser | int | `1234` | user id |
| podSecurityPolicy | object | `{"create":false}` | pod security policy |
| podSecurityPolicy.create | bool | `false` | enable the creation of the PSP |
| priorityClassName | string | `""` | priority class name |
| rbac | object | `{"caliconetworkpolicies":["get","update","patch","create"],"ciliumnetworkpolicies":["get","update","patch","create"],"clusterroles":["get","delete"],"configmaps":["get","delete"],"daemonsets":["get","delete"],"deployments":["get","delete"],"events":["get","update","patch","create"],"leases":["get","update","patch","watch","create"],"namespaces":["get","delete"],"networkpolicies":["get","update","patch","create"],"nodes":["get","update","patch","watch","create"],"pods":["get","update","patch","delete","list"],"podsEphemeralcontainers":["patch","create"],"podsEviction":["get","create"],"podsExec":["get","create"],"podsLog":["get"],"replicasets":["get","delete"],"roles":["get","delete"],"secrets":["get","delete"],"serviceAccount":{"create":true,"name":""},"statefulsets":["get","delete"]}` | rbac |
| rbac.serviceAccount.create | bool | `true` | create the service account. If create is false, name is required |
| rbac.serviceAccount.name | string | `""` | name of the service account |
| replicaCount | int | `2` | number of running pods |
| resources | object | `{}` | resources |
| service | object | `{"annotations":{},"port":2803,"type":"ClusterIP"}` | service parameters |
| service.annotations | object | `{}` | annotations of the service |
| service.port | int | `2803` | port of the service |
| service.type | string | `"ClusterIP"` | type of service |
| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"interval":"30s","path":"/metrics","port":"http","relabelings":[],"scheme":"http","scrapeTimeout":"10s","targetLabels":[],"tlsConfig":{}}` | serviceMonitor holds the configuration for the ServiceMonitor CRD. |
| serviceMonitor.additionalLabels | object | `{}` | additionalLabels specifies labels to be added on the Service Monitor. |
| serviceMonitor.enabled | bool | `false` | enable the deployment of a Service Monitor for the Prometheus Operator. |
| serviceMonitor.interval | string | `"30s"` | interval specifies the time interval at which Prometheus should scrape metrics from the service. |
| serviceMonitor.path | string | `"/metrics"` | path at which the metrics are exposed |
| serviceMonitor.port | string | `"http"` | portname at which the metrics are exposed |
| serviceMonitor.relabelings | list | `[]` | relabelings configures the relabeling rules to apply the targets metadata labels. |
| serviceMonitor.scheme | string | `"http"` | scheme specifies network protocol used by the metrics endpoint. In this case HTTP. |
| serviceMonitor.scrapeTimeout | string | `"10s"` | scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request. If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for that target. |
| serviceMonitor.targetLabels | list | `[]` | targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics. |
| serviceMonitor.tlsConfig | object | `{}` | tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when scraping metrics from a service. It allows you to define the details of the TLS connection, such as CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support TLS configuration for the metrics endpoint. |
| tolerations | list | `[]` | tolerations |
## Connect Falcosidekick
Once you have installed `Falco Talon` with Helm, you need to connect `Falcosidekick` by adding the flag `--set falcosidekick.config.webhook.address=http://falco-talon:2803`
```shell
helm upgrade -i falco falcosecurity/falco --namespace falco \
--create-namespace \
--set tty=true \
--set falcosidekick.enabled=true \
--set falcosidekick.config.talon.address=http://falco-talon:2803
```
## License
Falco Talon is licensed to you under the **Apache 2.0** open source license.
## Author
Thomas Labarussias (https://github.com/Issif)

View File

@ -1,8 +0,0 @@
- action: Terminate Pod
actionner: kubernetes:terminate
- action: Label Pod as Suspicious
actionner: kubernetes:label
parameters:
labels:
analysis/status: "suspicious"

View File

@ -1,73 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "falco-talon.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falco-talon.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "falco-talon.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
{{- print "networking.k8s.io/v1" -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "falco-talon.labels" -}}
helm.sh/chart: {{ include "falco-talon.chart" . }}
app.kubernetes.io/part-of: {{ include "falco-talon.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Name }}
{{ include "falco-talon.selectorLabels" . }}
{{- if .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag }}
{{- else }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "falco-talon.selectorLabels" -}}
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "falco-talon.ingress.isStable" -}}
{{- eq (include "falco-talon.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "falco-talon.ingress.supportsPathType" -}}
{{- or (eq (include "falco-talon.ingress.isStable" .) "true") (and (eq (include "falco-talon.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Validate if either serviceAccount create is set to true or serviceAccount name is passed
*/}}
{{- define "falco-talon.validateServiceAccount" -}}
{{- if and (not .Values.rbac.serviceAccount.create) (not .Values.rbac.serviceAccount.name) -}}
{{- fail ".Values.rbac.serviceAccount.create is set to false and .Values.rbac.serviceAccount.name is not provided or is provided as empty string." -}}
{{- end -}}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if .Values.podSecurityPolicy.create }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "falco-talon.name" .}}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falco-talon.name" . }}
verbs:
- use
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.talon.name }}
{{ if .Values.grafana.dashboards.configMaps.talon.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.talon.namespace }}
{{- else -}}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
grafana_dashboard: "1"
annotations:
{{- if .Values.grafana.dashboards.configMaps.talon.folder }}
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.talon.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.talon.folder }}
{{- end }}
data:
falco-talon-grafana-dashboard.json: |-
{{- .Files.Get "dashboards/falco-talon-grafana-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falco-talon.name" . }}-rules
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
data:
rules.yaml: |-
{{ $.Files.Get "rules.yaml" | nindent 4 }}
{{- if .Values.config.rulesOverride }}
{{ .Values.config.rulesOverride | nindent 4 }}
{{- end }}

View File

@ -1,101 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
annotations:
secret-checksum: {{ (lookup "v1" "Secret" .Release.Namespace (include "falco-talon.name" . | cat "-config")).data | toJson | sha256sum }}
spec:
serviceAccountName: {{ include "falco-talon.name" . }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
securityContext:
runAsUser: {{ .Values.podSecurityContext.runAsUser }}
fsGroup: {{ .Values.podSecurityContext.fsGroup }}
restartPolicy: Always
containers:
- name: {{ .Chart.Name }}
{{- if .Values.image.registry }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
{{- else }}
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: ["server", "-c", "/etc/falco-talon/config.yaml", "-r", "/etc/falco-talon/rules.yaml"]
ports:
- name: http
containerPort: 2803
protocol: TCP
- name: nats
containerPort: 4222
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.extraEnv }}
env:
{{- toYaml .Values.extraEnv | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: "config"
mountPath: "/etc/falco-talon/config.yaml"
subPath: config.yaml
readOnly: true
- name: "rules"
mountPath: "/etc/falco-talon/rules.yaml"
subPath: rules.yaml
readOnly: true
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: "rules"
configMap:
name: "{{ include "falco-talon.name" . }}-rules"
- name: "config"
secret:
secretName: "{{ include "falco-talon.name" . }}-config"

View File

@ -1,50 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $name := include "falco-talon.name" . -}}
{{- $ingressApiIsStable := eq (include "falco-talon.ingress.isStable" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "falco-talon.ingress.supportsPathType" .) "true" -}}
---
apiVersion: {{ include "falco-talon.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $name }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if $ingressSupportsPathType }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $name }}
port:
name: http
{{- else }}
serviceName: {{ $name }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,32 +0,0 @@
{{- if .Values.podSecurityPolicy.create}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "falco-talon.name" . }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
{{- end }}

View File

@ -1,216 +0,0 @@
{{- include "falco-talon.validateServiceAccount" . -}}
---
{{- if .Values.rbac.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
helm.sh/chart: {{ include "falco-talon.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
{{- if .Values.rbac.namespaces }}
- apiGroups:
- ""
resources:
- namespaces
verbs:
{{ toYaml .Values.rbac.namespaces | indent 6 }}
{{- end }}
{{- if .Values.rbac.pods }}
- apiGroups:
- ""
resources:
- pods
verbs:
{{ toYaml .Values.rbac.pods | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsEphemeralcontainers }}
- apiGroups:
- ""
resources:
- pods/ephemeralcontainers
verbs:
{{ toYaml .Values.rbac.podsEphemeralcontainers | indent 6 }}
{{- end }}
{{- if .Values.rbac.nodes }}
- apiGroups:
- ""
resources:
- nodes
verbs:
{{ toYaml .Values.rbac.nodes | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsLog }}
- apiGroups:
- ""
resources:
- pods/log
verbs:
{{ toYaml .Values.rbac.podsLog | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsExec }}
- apiGroups:
- ""
resources:
- pods/exec
verbs:
{{ toYaml .Values.rbac.podsExec | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsEviction }}
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
{{ toYaml .Values.rbac.podsEviction | indent 6 }}
{{- end }}
{{- if .Values.rbac.events }}
- apiGroups:
- ""
resources:
- events
verbs:
{{ toYaml .Values.rbac.events | indent 6 }}
{{- end }}
{{- if .Values.rbac.daemonsets }}
- apiGroups:
- "apps"
resources:
- daemonsets
verbs:
{{ toYaml .Values.rbac.daemonsets | indent 6 }}
{{- end }}
{{- if .Values.rbac.deployments }}
- apiGroups:
- "apps"
resources:
- deployments
verbs:
{{ toYaml .Values.rbac.deployments | indent 6 }}
{{- end }}
{{- if .Values.rbac.replicasets }}
- apiGroups:
- "apps"
resources:
- replicasets
verbs:
{{ toYaml .Values.rbac.replicasets | indent 6 }}
{{- end }}
{{- if .Values.rbac.statefulsets }}
- apiGroups:
- "apps"
resources:
- statefulsets
verbs:
{{ toYaml .Values.rbac.statefulsets | indent 6 }}
{{- end }}
{{- if .Values.rbac.networkpolicies }}
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
{{ toYaml .Values.rbac.networkpolicies | indent 6 }}
{{- end }}
{{- if .Values.rbac.caliconetworkpolicies }}
- apiGroups:
- "projectcalico.org"
resources:
- caliconetworkpolicies
verbs:
{{ toYaml .Values.rbac.caliconetworkpolicies | indent 6 }}
{{- end }}
{{- if .Values.rbac.ciliumnetworkpolicies }}
- apiGroups:
- "cilium.io"
resources:
- ciliumnetworkpolicies
verbs:
{{ toYaml .Values.rbac.ciliumnetworkpolicies | indent 6 }}
{{- end }}
{{- if .Values.rbac.roles }}
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- roles
verbs:
{{ toYaml .Values.rbac.roles | indent 6 }}
{{- end }}
{{- if .Values.rbac.clusterroles }}
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- clusterroles
verbs:
{{ toYaml .Values.rbac.clusterroles | indent 6 }}
{{- end }}
{{- if .Values.rbac.configmaps }}
- apiGroups:
- ""
resources:
- configmaps
verbs:
{{ toYaml .Values.rbac.configmaps | indent 6 }}
{{- end }}
{{- if .Values.rbac.secrets }}
- apiGroups:
- ""
resources:
- secrets
verbs:
{{ toYaml .Values.rbac.secrets | indent 6 }}
{{- end }}
{{- if .Values.rbac.leases }}
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
{{ toYaml .Values.rbac.leases | indent 6 }}
{{- end }}
{{- if .Values.podSecurityPolicy.create }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falco-talon.name" . }}
verbs:
- use
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
helm.sh/chart: {{ include "falco-talon.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "falco-talon.name" . }}
subjects:
- kind: ServiceAccount
{{- if .Values.rbac.serviceAccount.create }}
name: {{ include "falco-talon.name" . }}
{{- else }}
name: {{ .Values.rbac.serviceAccount.name }}
{{- end }}
namespace: {{ .Release.Namespace }}

View File

@ -1,71 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco-talon.name" . }}-config
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
stringData:
config.yaml: |
listen_address: {{ default "0.0.0.0" .Values.config.listenAddress }}
listen_port: {{ default 2803 .Values.config.listenPort }}
watch_rules: {{ default true .Values.config.watchRules }}
print_all_events: {{ default false .Values.config.printAllEvents }}
deduplication:
leader_election: {{ default true .Values.config.deduplication.leaderElection }}
time_window_seconds: {{ default 5 .Values.config.deduplication.timeWindowSeconds }}
default_notifiers:
{{- range .Values.config.defaultNotifiers }}
- {{ . -}}
{{ end }}
otel:
traces_enabled: {{ default false .Values.config.otel.tracesEnabled }}
metrics_enabled: {{ default false .Values.config.otel.metricsEnabled }}
collector_port: {{ default 4317 .Values.config.otel.collectorPort }}
collector_endpoint: {{ .Values.config.otel.collectorEndpoint }}
collector_use_insecure_grpc: {{ default false .Values.config.otel.collectorUseInsecureGrpc }}
notifiers:
slack:
webhook_url: {{ .Values.config.notifiers.slack.webhookUrl }}
icon: {{ .Values.config.notifiers.slack.icon }}
username: {{ .Values.config.notifiers.slack.username }}
footer: {{ .Values.config.notifiers.slack.footer }}
format: {{ .Values.config.notifiers.slack.format }}
webhook:
url: {{ .Values.config.notifiers.webhook.url }}
smtp:
host_port: {{ .Values.config.notifiers.smtp.hostPort }}
from: {{ .Values.config.notifiers.smtp.from }}
to: {{ .Values.config.notifiers.smtp.to }}
user: {{ .Values.config.notifiers.smtp.user }}
password: {{ .Values.config.notifiers.smtp.password }}
format: {{ .Values.config.notifiers.smtp.format }}
tls: {{ .Values.config.notifiers.smtp.tls }}
loki:
url: {{ .Values.config.notifiers.loki.url }}
user: {{ .Values.config.notifiers.loki.user }}
api_key: {{ .Values.config.notifiers.loki.apiKey }}
tenant: {{ .Values.config.notifiers.loki.tenant }}
custom_headers:
{{- range .Values.config.notifiers.loki.customHeaders }}
- {{ . -}}
{{ end }}
elasticsearch:
url: {{ .Values.config.notifiers.elasticsearch.url }}
create_index_template: {{ .Values.config.notifiers.loki.createIndexTemplate }}
number_of_shards: {{ .Values.config.notifiers.loki.numberOfShards }}
number_of_replicas: {{ .Values.config.notifiers.loki.numberOfReplicas }}
aws:
role_arn: {{ .Values.config.aws.roleArn }}
external_id: {{ .Values.config.aws.externalId }}
region: {{ .Values.config.aws.region }}
access_key: {{ .Values.config.aws.accessKey }}
secret_key: {{ .Values.config.aws.secretKey }}
minio:
endpoint: {{ .Values.config.minio.endpoint }}
access_key: {{ .Values.config.minio.accessKey }}
secret_key: {{ .Values.config.minio.secretKey }}
use_ssl: {{ .Values.config.minio.useSsl }}

View File

@ -1,44 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- port: {{ .Values.serviceMonitor.port }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "falco-talon.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,21 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "falco-talon.selectorLabels" . | nindent 4 }}

View File

@ -1,309 +0,0 @@
# Default values for falco-talon.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- number of running pods
replicaCount: 2
# -- image parameters
image:
# -- The image registry to pull from
registry: falco.docker.scarf.sh
# -- The image repository to pull from
repository: falcosecurity/falco-talon
# -- Override the image tag to pull
tag: ""
# -- The image pull policy
pullPolicy: Always
# -- pod security policy
podSecurityPolicy:
# -- enable the creation of the PSP
create: false
# -- pod security context
podSecurityContext:
# -- user id
runAsUser: 1234
# -- group
fsGroup: 1234
# -- one or more secrets to be used when pulling images
imagePullSecrets: []
# - registrySecretName
# -- override name
nameOverride: ""
# -- extra env
extraEnv:
- name: LOG_LEVEL
value: warning
# - name: AWS_REGION # Specify if running on EKS, ECS or EC2
# value: us-east-1
# -- priority class name
priorityClassName: ""
# -- pod annotations
podAnnotations: {}
# -- service parameters
service:
# -- type of service
type: ClusterIP
# -- port of the service
port: 2803
# -- annotations of the service
annotations: {}
# networking.gke.io/load-balancer-type: Internal
# -- ingress parameters
ingress:
# -- enable the ingress
enabled: false
# -- annotations of the ingress
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- hosts
hosts:
- host: falco-talon.local
paths:
- path: /
# -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.)
# pathType: Prefix
# -- tls
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- resources
resources: {}
# -- limits
# limits:
# # -- cpu limit
# cpu: 100m
# # -- memory limit
# memory: 128Mi
# -- requests
# requests:
# # -- cpu request
# cpu: 100m
# # -- memory request
# memory: 128Mi
# -- node selector
nodeSelector: {}
# -- tolerations
tolerations: []
# -- affinity
affinity: {}
# -- rbac
rbac:
serviceAccount:
# -- create the service account. If create is false, name is required
create: true
# -- name of the service account
name: ""
namespaces: ["get", "delete"]
pods: ["get", "update", "patch", "delete", "list"]
podsEphemeralcontainers: ["patch", "create"]
nodes: ["get", "update", "patch", "watch", "create"]
podsLog: ["get"]
podsExec: ["get", "create"]
podsEviction: ["get", "create"]
events: ["get", "update", "patch", "create"]
daemonsets: ["get", "delete"]
deployments: ["get", "delete"]
replicasets: ["get", "delete"]
statefulsets: ["get", "delete"]
networkpolicies: ["get", "update", "patch", "create"]
caliconetworkpolicies: ["get", "update", "patch", "create"]
ciliumnetworkpolicies: ["get", "update", "patch", "create"]
roles: ["get", "delete"]
clusterroles: ["get", "delete"]
configmaps: ["get", "delete"]
secrets: ["get", "delete"]
leases: ["get", "update", "patch", "watch", "create"]
# -- config of Falco Talon (See https://docs.falco-talon.org/docs/configuration/)
config:
# -- listen address
listenAddress: 0.0.0.0
# -- listen port
listenPort: 2803
# -- default notifiers for all rules
defaultNotifiers:
# - slack
- k8sevents
# -- auto reload the rules when the files change
watchRules: true
# -- deduplication of the Falco events
deduplication:
# -- enable the leader election for cluster mode
leaderElection: true
# -- duration in seconds for the deduplication time window
timeWindowSeconds: 5
# -- print in stdout all received events, not only those which match a rule
printAllEvents: false
# User-defined additional rules for rules_override.yaml
rulesOverride: |
- action: Terminate Pod
actionner: kubernetes:terminate
parameters:
ignore_daemonsets: true
ignore_statefulsets: true
grace_period_seconds: 20
# -- open telemetry parameters
otel:
# -- enable otel traces
tracesEnabled: false
# -- enable otel metrics
metricsEnabled: false
# -- collector port
collectorPort: 4317
# -- collector endpoint
collectorEndpoint: ""
# -- use insecure grpc
collectorUseInsecureGrpc: false
# -- notifiers (See https://docs.falco-talon.org/docs/notifiers/list/ for the settings)
notifiers:
# -- slack
slack:
# -- webhook url
webhookUrl: ""
# -- icon
icon: "https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg"
# -- username
username: "Falco Talon"
# -- footer
footer: "https://github.com/falcosecurity/falco-talon"
# -- format
format: "long"
# -- webhook
webhook:
# -- url
url: ""
# -- smtp
smtp:
# -- host:port
hostPort: ""
# -- from
from: ""
# -- to
to: ""
# -- user
user: ""
# -- password
password: ""
# -- format
format: "html"
# -- enable tls
tls: false
# -- loki
loki:
# -- host:port
hostPort: ""
# -- user
user: ""
# -- api key
apiKey: ""
# -- tenant
tenant: ""
# -- custom headers
customHeaders: []
# -- elasticsearch
elasticsearch:
# -- url
url: ""
# -- create the index template
createIndexTemplate: true
# -- number of shards
numberOfShards: 1
# -- number of replicas
numberOfReplicas: 1
# -- aws
aws:
# -- role arn
roleArn: ""
# -- external id
externalId: ""
# -- region (if not specified, default region from provider credential chain will be used)
region: ""
# -- access key (if not specified, default access_key from provider credential chain will be used)
accesKey: ""
# -- secret key (if not specified, default secret_key from provider credential chain will be used)
secretKey: ""
# -- minio
minio:
# -- endpoint
endpoint: ""
# -- access key
accessKey: ""
# -- secret key
secretKey: ""
# -- use ssl
useSsl: false
# -- serviceMonitor holds the configuration for the ServiceMonitor CRD.
serviceMonitor:
# -- enable the deployment of a Service Monitor for the Prometheus Operator.
enabled: false
# -- portname at which the metrics are exposed
port: http
# -- path at which the metrics are exposed
path: /metrics
# -- additionalLabels specifies labels to be added on the Service Monitor.
additionalLabels: {}
# -- interval specifies the time interval at which Prometheus should scrape metrics from the service.
interval: "30s"
# -- scheme specifies network protocol used by the metrics endpoint. In this case HTTP.
scheme: http
# -- scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request.
# If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for
# that target.
scrapeTimeout: "10s"
# -- relabelings configures the relabeling rules to apply the targets metadata labels.
relabelings: []
# -- targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics.
targetLabels: []
# -- tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when
# scraping metrics from a service. It allows you to define the details of the TLS connection, such as
# CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support
# TLS configuration for the metrics endpoint.
tlsConfig: {}
# insecureSkipVerify: false
# caFile: /path/to/ca.crt
# certFile: /path/to/client.crt
# keyFile: /path/to/client.key
# -- grafana contains the configuration related to grafana.
grafana:
# -- dashboards contains configuration for grafana dashboards.
dashboards:
# -- enabled specifies whether the dashboards should be deployed.
enabled: false
# --configmaps to be deployed that contain a grafana dashboard.
configMaps:
# -- falco-talon contains the configuration for falco talon's dashboard.
talon:
# -- name specifies the name for the configmap.
name: falco-talon-grafana-dashboard
# -- namespace specifies the namespace for the configmap.
namespace: ""
# -- folder where the dashboard is stored by grafana.
folder: ""

View File

@ -1,4 +0,0 @@
requirements.lock
Chart.lock
charts
.vscode

View File

@ -1,247 +0,0 @@
# Helm chart Breaking Changes
- [5.0.0](#500)
- [Default Falco Image](#default-falco-image)
- [4.0.0](#400)
- [Drivers](#drivers)
- [K8s Collector](#k8s-collector)
- [Plugins](#plugins)
- [3.0.0](#300)
- [Falcoctl](#falcoctl-support)
- [Rulesfiles](#rulesfiles)
- [Falco Images](#drop-support-for-falcosecurityfalco-image)
- [Driver Loader Init Container](#driver-loader-simplified-logic)
## 6.0.0
### Falco Talon configuration changes
The following backward-incompatible changes have been made to `values.yaml`:
- `falcotalon` configuration has been renamed to `falco-talon`
- `falcotalon.enabled` has been renamed to `responseActions.enabled`
## 5.0.0
### Default Falco Image
**Starting with version 5.0.0, the Helm chart now uses the default Falco container image, which is a distroless image without any additional tools installed.**
Previously, the chart used the `debian` image with the several tools included to avoid breaking changes during upgrades. The new image is more secure and lightweight, but it does not include these tools.
If you rely on some tool—for example, when using the `program_output` feature—you can manually override the `image.tag` value to use a different image flavor. For instance, setting `image.tag` to `0.41.0-debian` will restore access to the tools available in the Debian-based image.
## 4.0.0
### Drivers
The `driver` section has been reworked based on the following PR: https://github.com/falcosecurity/falco/pull/2413.
It is an attempt to uniform how a driver is configured in Falco.
It also groups the configuration based on the driver type.
Some of the drivers has been renamed:
* kernel modules has been renamed from `module` to `kmod`;
* the ebpf probe has not been changed. It's still `ebpf`;
* the modern ebpf probe has been renamed from `modern-bpf` to `modern_ebpf`.
The `gvisor` configuration has been moved under the `driver` section since it is considered a driver on its own.
### K8s Collector
The old Kubernetes client has been removed in Falco 0.37.0. For more info checkout this issue: https://github.com/falcosecurity/falco/issues/2973#issuecomment-1877803422.
The [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) and [k8s-meta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) substitute
the old implementation.
The following resources needed by Falco to connect to the API server are no longer needed and has been removed from the chart:
* service account;
* cluster role;
* cluster role binding.
When the `collectors.kubernetes` is enabled the chart deploys the [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) and configures Falco to load the
[k8s-meta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) plugin.
By default, the `collectors.kubernetes.enabled` is off; for more info, see the following issue: https://github.com/falcosecurity/falco/issues/2995.
### Plugins
The Falco docker image does not ship anymore the plugins: https://github.com/falcosecurity/falco/pull/2997.
For this reason, the `resolveDeps` is now enabled in relevant values files (ie. `values-k8saudit.yaml`).
When installing `rulesfile` artifacts `falcoctl` will try to resolve its dependencies and install the required plugins.
## 3.0.0
The new chart deploys new *k8s* resources and new configuration variables have been added to the `values.yaml` file. People upgrading the chart from `v2.x.y` have to port their configuration variables to the new `values.yaml` file used by the `v3.0.0` chart.
If you still want to use the old values, because you do not want to take advantage of the new and shiny **falcoctl** tool then just run:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
This way you will upgrade Falco to `v0.34.0`.
**NOTE**: The new version of Falco itself, installed by the chart, does not introduce breaking changes. You can port your previous Falco configuration to the new `values.yaml` by copy-pasting it.
### Falcoctl support
[Falcoctl](https://github.com/falcosecurity/falcoctl) is a new tool born to automatize operations when deploying Falco.
Before the `v3.0.0` of the charts *rulesfiles* and *plugins* were shipped bundled in the Falco docker image. It precluded the possibility to update the *rulesfiles* and *plugins* until a new version of Falco was released. Operators had to manually update the *rulesfiles or add new *plugins* to Falco. The process was cumbersome and error-prone. Operators had to create their own Falco docker images with the new plugins baked into it or wait for a new Falco release.
Starting from the `v3.0.0` chart release, we add support for **falcoctl** in the charts. By deploying it alongside Falco it allows to:
- *install* artifacts of the Falco ecosystem (i.e plugins and rules at the moment of writing)
- *follow* those artifacts(only *rulesfile* artifacts are recommended), to keep them up-to-date with the latest releases of the Falcosecurity organization. This allows, for instance, to update rules detecting new vulnerabilities or security issues without the need to redeploy Falco.
The chart deploys *falcoctl* using an *init container* and/or *sidecar container*. The first one is used to install artifacts and make them available to Falco at start-up time, the latter runs alongside Falco and updates the local artifacts when new updates are detected.
Based on your deployment scenario:
1. Falco without *plugins* and you just want to upgrade to the new Falco version:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
When upgrading an existing release, *helm* uses the new chart version. Since we added new template files and changed the values schema(added new parameters) we explicitly disable the **falcoctl** tool. By doing so, the command will reuse the existing configuration but will deploy Falco version `0.34.0`
2. Falco without *plugins* and you want to automatically get new *falco-rules* as soon as they are released:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
```
Helm first applies the values coming from the new chart version, then overrides them using the values of the previous release. The outcome is a new release of Falco that:
* uses the previous configuration;
* runs Falco version `0.34.0`;
* uses **falcoctl** to install and automatically update the [*falco-rules*](https://github.com/falcosecurity/rules/);
* checks for new updates every 6h (default value).
3. Falco with *plugins* and you want just to upgrade Falco:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
Very similar to scenario `1.`
4. Falco with plugins and you want to use **falcoctl** to download the plugins' *rulesfiles*:
* Save **falcoctl** configuration to file:
```yaml=
cat << EOF > ./falcoctl-values.yaml
####################
# falcoctl config #
####################
falcoctl:
image:
# -- The image pull policy.
pullPolicy: IfNotPresent
# -- The image registry to pull from.
registry: docker.io
# -- The image repository to pull from.
repository: falcosecurity/falcoctl
# -- Overrides the image tag whose default is the chart appVersion.
tag: "main"
artifact:
# -- Runs "falcoctl artifact install" command as an init container. It is used to install artfacts before
# Falco starts. It provides them to Falco by using an emptyDir volume.
install:
enabled: true
# -- Extra environment variables that will be pass onto falcoctl-artifact-install init container.
env: {}
# -- Arguments to pass to the falcoctl-artifact-install init container.
args: ["--verbose"]
# -- Resources requests and limits for the falcoctl-artifact-install init container.
resources: {}
# -- Security context for the falcoctl init container.
securityContext: {}
# -- Runs "falcoctl artifact follow" command as a sidecar container. It is used to automatically check for
# updates given a list of artifacts. If an update is found it downloads and installs it in a shared folder (emptyDir)
# that is accessible by Falco. Rulesfiles are automatically detected and loaded by Falco once they are installed in the
# correct folder by falcoctl. To prevent new versions of artifacts from breaking Falco, the tool checks if it is compatible
# with the running version of Falco before installing it.
follow:
enabled: true
# -- Extra environment variables that will be pass onto falcoctl-artifact-follow sidecar container.
env: {}
# -- Arguments to pass to the falcoctl-artifact-follow sidecar container.
args: ["--verbose"]
# -- Resources requests and limits for the falcoctl-artifact-follow sidecar container.
resources: {}
# -- Security context for the falcoctl-artifact-follow sidecar container.
securityContext: {}
# -- Configuration file of the falcoctl tool. It is saved in a configmap and mounted on the falcotl containers.
config:
# -- List of indexes that falcoctl downloads and uses to locate and download artiafcts. For more info see:
# https://github.com/falcosecurity/falcoctl/blob/main/proposals/20220916-rules-and-plugin-distribution.md#index-file-overview
indexes:
- name: falcosecurity
url: https://falcosecurity.github.io/falcoctl/index.yaml
# -- Configuration used by the artifact commands.
artifact:
# -- List of artifact types that falcoctl will handle. If the configured refs resolves to an artifact whose type is not contained
# in the list it will refuse to downloade and install that artifact.
allowedTypes:
- rulesfile
install:
# -- Do not resolve the depenencies for artifacts. By default is true, but for our use carse we disable it.
resolveDeps: false
# -- List of artifacts to be installed by the falcoctl init container.
refs: [k8saudit-rules:0.5]
# -- Directory where the *rulesfiles* are saved. The path is relative to the container, which in this case is an emptyDir
# mounted also by the Falco pod.
rulesfilesDir: /rulesfiles
# -- Same as the one above but for the artifacts.
pluginsDir: /plugins
follow:
# -- List of artifacts to be installed by the falcoctl init container.
refs: [k8saudit-rules:0.5]
# -- Directory where the *rulesfiles* are saved. The path is relative to the container, which in this case is an emptyDir
# mounted also by the Falco pod.
rulesfilesDir: /rulesfiles
# -- Same as the one above but for the artifacts.
pluginsDir: /plugins
EOF
```
* Set `falcoctl.artifact.install.enabled=true` to install *rulesfiles* of the loaded plugins. Configure **falcoctl** to install the *rulesfiles* of the plugins you are loading with Falco. For example, if you are loading **k8saudit** plugin then you need to set `falcoctl.config.artifact.install.refs=[k8saudit-rules:0.5]`. When Falco is deployed the **falcoctl** init container will download the specified artifacts based on their tag.
* Set `falcoctl.artifact.follow.enabled=true` to keep updated *rulesfiles* of the loaded plugins.
* Proceed to upgrade your Falco release by running:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--values=./falcoctl-values.yaml
```
5. Falco with **multiple sources** enabled (syscalls + plugins):
1. Upgrading Falco to the new version:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
2. Upgrading Falco and leveraging **falcoctl** for rules and plugins. Refer to point 4. for **falcoctl** configuration.
### Rulesfiles
Starting from `v0.3.0`, the chart drops the bundled **rulesfiles**. The previous version was used to create a configmap containing the following **rulesfiles**:
* application_rules.yaml
* aws_cloudtrail_rules.yaml
* falco_rules.local.yaml
* falco_rules.yaml
* k8s_audit_rules.yaml
The reason why we are dropping them is pretty simple, the files are already shipped within the Falco image and do not apport any benefit. On the other hand, we had to manually update those files for each Falco release.
For users out there, do not worry, we have you covered. As said before the **rulesfiles** are already shipped inside
the Falco image. Still, this solution has some drawbacks such as users having to wait for the next releases of Falco
to get the latest version of those **rulesfiles**. Or they could manually update them by using the [custom rules](.
/README.md#loading-custom-rules).
We came up with a better solution and that is **falcoctl**. Users can configure the **falcoctl** tool to fetch and install the latest **rulesfiles** as provided by the *falcosecurity* organization. For more info, please check the **falcoctl** section.
**NOTE**: if any user (wrongly) used to customize those files before deploying Falco please switch to using the
[custom rules](./README.md#loading-custom-rules).
### Drop support for `falcosecurity/falco` image
Starting from version `v2.0.0` of the chart the`falcosecurity/falco-no-driver` is the default image. We were still supporting the `falcosecurity/falco` image in `v2.0.0`. But in `v2.2.0` we broke the chart when using the `falcosecurity/falco` image. For more info please check out the following issue: https://github.com/falcosecurity/charts/issues/419
#### Driver-loader simplified logic
There is only one switch to **enable/disable** the driver-loader init container: driver.loader.enabled=true. This simplification comes as a direct consequence of dropping support for the `falcosecurity/falco` image. For more info: https://github.com/falcosecurity/charts/issues/418

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +0,0 @@
apiVersion: v2
name: falco
version: 6.2.2
appVersion: "0.41.3"
description: Falco
keywords:
- monitoring
- security
- alerting
- metric
- troubleshooting
- run-time
home: https://falco.org
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/falco/horizontal/color/falco-horizontal-color.svg
sources:
- https://github.com/falcosecurity/falco
maintainers:
- name: The Falco Authors
email: cncf-falco-dev@lists.cncf.io
dependencies:
- name: falcosidekick
version: "0.9.*"
condition: falcosidekick.enabled
repository: https://falcosecurity.github.io/charts
- name: k8s-metacollector
version: 0.1.*
repository: https://falcosecurity.github.io/charts
condition: collectors.kubernetes.enabled
- name: falco-talon
version: 0.3.*
repository: https://falcosecurity.github.io/charts
condition: responseActions.enabled

View File

@ -1,2 +0,0 @@
emeritus_approvers:
- bencer

View File

@ -1,592 +0,0 @@
# Falco
[Falco](https://falco.org) is a *Cloud Native Runtime Security* tool designed to detect anomalous activity in your applications. You can use Falco to monitor runtime security of your Kubernetes applications and internal components.
## Introduction
The deployment of Falco in a Kubernetes cluster is managed through a **Helm chart**. This chart manages the lifecycle of Falco in a cluster by handling all the k8s objects needed by Falco to be seamlessly integrated in your environment. Based on the configuration in [values.yaml](./values.yaml) file, the chart will render and install the required k8s objects. Keep in mind that Falco could be deployed in your cluster using a `daemonset` or a `deployment`. See next sections for more info.
## Attention
Before installing Falco in a Kubernetes cluster, a user should check that the kernel version used in the nodes is supported by the community. Also, before reporting any issue with Falco (missing kernel image, CrashLoopBackOff and similar), make sure to read [about the driver](#about-the-driver) section and adjust your setup as required.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with the release name `falco` in namespace `falco` run:
```bash
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco
```
After a few minutes Falco instances should be running on all your nodes. The status of Falco pods can be inspected through *kubectl*:
```bash
kubectl get pods -n falco -o wide
```
If everything went smoothly, you should observe an output similar to the following, indicating that all Falco instances are up and running in you cluster:
```bash
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
falco-57w7q 1/1 Running 0 3m12s 10.244.0.1 control-plane <none> <none>
falco-h4596 1/1 Running 0 3m12s 10.244.1.2 worker-node-1 <none> <none>
falco-kb55h 1/1 Running 0 3m12s 10.244.2.3 worker-node-2 <none> <none>
```
The cluster in our example has three nodes, one *control-plane* node and two *worker* nodes. The default configuration in [values.yaml](./values.yaml) of our helm chart deploys Falco using a `daemonset`. That's the reason why we have one Falco pod in each node.
> **Tip**: List Falco release using `helm list -n falco`, a release is a name used to track a specific deployment.
### Falco, Event Sources and Kubernetes
Starting from Falco 0.31.0 the [new plugin system](https://falco.org/docs/plugins/) is stable and production ready. The **plugin system** can be seen as the next step in the evolution of Falco. Historically, Falco monitored system events from the **kernel** trying to detect malicious behaviors on Linux systems. It also had the capability to process k8s Audit Logs to detect suspicious activities in Kubernetes clusters. Since Falco 0.32.0 all the related code to the k8s Audit Logs in Falco was removed and ported in a [plugin](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit). At the time being Falco supports different event sources coming from **plugins** or **drivers** (system events).
Note that **a Falco instance can handle multiple event sources in parallel**. you can deploy Falco leveraging **drivers** for syscall events and at the same time loading **plugins**. A step by step guide on how to deploy Falco with multiple sources can be found [here](https://falco.org/docs/getting-started/learning-environments/#falco-with-multiple-sources).
#### About Drivers
Falco needs a **driver** to analyze the system workload and pass security events to userspace. The supported drivers are:
* [Modern eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#modern-ebpf-probe)
* [Kernel module](https://falco.org/docs/concepts/event-sources/kernel/#kernel-module)
* [Legacy eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#legacy-ebpf-probe)
The driver must be loaded on the node where Falco is running. Falco now prefers the **Modern eBPF probe** by default. When using **falcoctl** with `driver.kind=auto`, it will automatically choose the best driver for your system. Specifically, it first attempts to use the Modern eBPF probe (which is shipped directly within the Falco binary) and will fall back to the _kernel module_ or the _original eBPF probe_ if the necessary BPF features are not available.
##### Pre-built drivers
The [kernel-crawler](https://github.com/falcosecurity/kernel-crawler) automatically discovers kernel versions and flavors. At the time being, it runs weekly. We have a site where users can check for the discovered kernel flavors and versions, [example for Amazon Linux 2](https://falcosecurity.github.io/kernel-crawler/?arch=x86_64&target=AmazonLinux2).
The discovery of a kernel version by the [kernel-crawler](https://falcosecurity.github.io/kernel-crawler/) does not imply that pre-built kernel modules and bpf probes are available. That is because once kernel-crawler has discovered new kernels versions, the drivers need to be built by jobs running on our [Driver Build Grid infra](https://github.com/falcosecurity/test-infra#dbg). Please keep in mind that the building process is based on best effort. Users can check the existence of prebuilt modules at the following [link](https://download.falco.org/driver/site/index.html?lib=3.0.1%2Bdriver&target=all&arch=all&kind=all).
##### Building the driver on the fly (fallback)
If a prebuilt driver is not available for your distribution/kernel, users can build the driver by them self or install the kernel headers on the nodes, and the init container (falco-driver-loader) will try and build the driver on the fly.
Falco needs **kernel headers** installed on the host as a prerequisite to build the driver on the fly correctly. You can find instructions for installing the kernel headers for your system under the [Install section](https://falco.org/docs/getting-started/installation/) of the official documentation.
##### Selecting a different driver loader image
Note that since Falco 0.36.0 and Helm chart version 3.7.0 the driver loader image has been updated to be compatible with newer kernels (5.x and above) meaning that if you have an older kernel version and you are trying to build the kernel module you may experience issues. In that case you can use the `falco-driver-loader-legacy` image to use the previous version of the toolchain. To do so you can set the appropriate value, i.e. `--set driver.loader.initContainer.image.repository=falcosecurity/falco-driver-loader-legacy`.
#### About Plugins
[Plugins](https://falco.org/docs/plugins/) are used to extend Falco to support new **data sources**. The current **plugin framework** supports *plugins* with the following *capabilities*:
* Event sourcing capability;
* Field extraction capability;
Plugin capabilities are *composable*, we can have a single plugin with both capabilities. Or on the other hand, we can load two different plugins each with its capability, one plugin as a source of events and another as an extractor. A good example of this is the [Kubernetes Audit Events](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit) and the [Falcosecurity Json](https://github.com/falcosecurity/plugins/tree/master/plugins/json) *plugins*. By deploying them both we have support for the **K8s Audit Logs** in Falco
Note that **the driver is not required when using plugins**.
#### About gVisor
gVisor is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system. For more information please consult the [official docs](https://gvisor.dev/docs/). In version `0.32.1`, Falco first introduced support for gVisor by leveraging the stream of system call information coming from gVisor.
Falco requires the version of [runsc](https://gvisor.dev/docs/user_guide/install/) to be equal to or above `20220704.0`. The following snippet shows the gVisor configuration variables found in [values.yaml](./values.yaml):
```yaml
driver:
gvisor:
enabled: true
runsc:
path: /home/containerd/usr/local/sbin
root: /run/containerd/runsc
config: /run/containerd/runsc/config.toml
```
Falco uses the [runsc](https://gvisor.dev/docs/user_guide/install/) binary to interact with sandboxed containers. The following variables need to be set:
* `runsc.path`: absolute path of the `runsc` binary in the k8s nodes;
* `runsc.root`: absolute path of the root directory of the `runsc` container runtime. It is of vital importance for Falco since `runsc` stores there the information of the workloads handled by it;
* `runsc.config`: absolute path of the `runsc` configuration file, used by Falco to set its configuration and make aware `gVisor` of its presence.
If you want to know more how Falco uses those configuration paths please have a look at the `falco.gvisor.initContainer` helper in [helpers.tpl](./templates/_helpers.tpl).
A preset `values.yaml` file [values-gvisor-gke.yaml](./values-gvisor-gke.yaml) is provided and can be used as it is to deploy Falco with gVisor support in a [GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/sandbox-pods) cluster. It is also a good starting point for custom deployments.
##### Example: running Falco on GKE, with or without gVisor-enabled pods
If you use GKE with k8s version at least `1.24.4-gke.1800` or `1.25.0-gke.200` with gVisor sandboxed pods, you can install a Falco instance to monitor them with, e.g.:
```
helm install falco-gvisor falcosecurity/falco \
--create-namespace \
--namespace falco-gvisor \
-f https://raw.githubusercontent.com/falcosecurity/charts/master/charts/falco/values-gvisor-gke.yaml
```
Note that the instance of Falco above will only monitor gVisor sandboxed workloads on gVisor-enabled node pools. If you also need to monitor regular workloads on regular node pools you can use the eBPF driver as usual:
```
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set driver.kind=ebpf
```
The two instances of Falco will operate independently and can be installed, uninstalled or configured as needed. If you were already monitoring your regular node pools with eBPF you don't need to reinstall it.
##### Falco+gVisor additional resources
An exhaustive blog post about Falco and gVisor can be found on the [Falco blog](https://falco.org/blog/intro-gvisor-falco/).
If you need help on how to set gVisor in your environment please have a look at the [gVisor official docs](https://gvisor.dev/docs/user_guide/quick_start/kubernetes/)
### About Falco Artifacts
Historically **rules files** and **plugins** used to be shipped inside the Falco docker image and/or inside the chart. Starting from version `v0.3.0` of the chart, the [**falcoctl tool**](https://github.com/falcosecurity/falcoctl) can be used to install/update **rules files** and **plugins**. When referring to such objects we will use the term **artifact**. For more info please check out the following [proposal](https://github.com/falcosecurity/falcoctl/blob/main/proposals/20220916-rules-and-plugin-distribution.md).
The default configuration of the chart for new installations is to use the **falcoctl** tool to handle **artifacts**. The chart will deploy two new containers along the Falco one:
* `falcoctl-artifact-install` an init container that makes sure to install the configured **artifacts** before the Falco container starts;
* `falcoctl-artifact-follow` a sidecar container that periodically checks for new artifacts (currently only *falco-rules*) and downloads them;
For more info on how to enable/disable and configure the **falcoctl** tool checkout the config values [here](./README.md#Configuration) and the [upgrading notes](./BREAKING-CHANGES.md#300)
### Deploying Falco in Kubernetes
After the clarification of the different [**event sources**](#falco-event-sources-and-kubernetes) and how they are consumed by Falco using the **drivers** and the **plugins**, now let us discuss how Falco is deployed in Kubernetes.
The chart deploys Falco using a `daemonset` or a `deployment` depending on the **event sources**.
#### Daemonset
When using the [drivers](#about-the-driver), Falco is typically deployed as a `DaemonSet`. By using a DaemonSet, Kubernetes ensures that a Falco instance is running on each node even as new nodes are added to your cluster. This makes it a perfect fit for monitoring across the entire cluster.
By default, with `driver.kind=auto`, the correct driver will will be automatically selected for each node. This is accomplished through the **driver loader** (implemented by `falcoctl`), which generates a new Falco configuration file and picks the right engine driver (Modern eBPF, kmod, or legacy eBPF) based on the underlying environment. If you prefer to manually force a specific driver, see the other available options below.
**Kernel module**
To run Falco with the [eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#kernel-module) you just need to set `driver.kind=kmod` as shown in the following snippet:
```bash
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco
--set driver.kind=kmod
```
**Legacy eBPF probe**
To run Falco with the [eBPF probe](http://falco.org/docs/concepts/event-sources/kernel/#legacy-ebpf-probe) you just need to set `driver.kind=ebpf` as shown in the following snippet:
```bash
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set driver.kind=ebpf
```
There are other configurations related to the eBPF probe, for more info please check the [values.yaml](./values.yaml) file. After you have made your changes to the configuration file you just need to run:
```bash
helm install falco falcosecurity/falco \
--create-namespace \
--namespace "your-custom-name-space" \
-f "path-to-custom-values.yaml-file"
```
**Modern eBPF probe**
To run Falco with the [modern eBPF probe](https://falco.org/docs/concepts/event-sources/kernel/#modern-ebpf-probe) you just need to set `driver.kind=modern_bpf` as shown in the following snippet:
```bash
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set driver.kind=modern_ebpf
```
#### Deployment
In the scenario when Falco is used with **plugins** as data sources, then the best option is to deploy it as a k8s `deployment`. **Plugins** could be of two types, the ones that follow the **push model** or the **pull model**. A plugin that adopts the firs model expects to receive the data from a remote source in a given endpoint. They just expose and endpoint and wait for data to be posted, for example [Kubernetes Audit Events](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit) expects the data to be sent by the *k8s api server* when configured in such way. On the other hand other plugins that abide by the **pull model** retrieves the data from a given remote service.
The following points explain why a k8s `deployment` is suitable when deploying Falco with plugins:
* need to be reachable when ingesting logs directly from remote services;
* need only one active replica, otherwise events will be sent/received to/from different Falco instances;
## Uninstalling the Chart
To uninstall a Falco release from your Kubernetes cluster always you helm. It will take care to remove all components deployed by the chart and clean up your environment. The following command will remove a release called `falco` in namespace `falco`;
```bash
helm uninstall falco --namespace falco
```
## Showing logs generated by Falco container
There are many reasons why we would have to inspect the messages emitted by the Falco container. When deployed in Kubernetes the Falco logs can be inspected through:
```bash
kubectl logs -n falco falco-pod-name
```
where `falco-pods-name` is the name of the Falco pod running in your cluster.
The command described above will just display the logs emitted by falco until the moment you run the command. The `-f` flag comes handy when we are doing live testing or debugging and we want to have the Falco logs as soon as they are emitted. The following command:
```bash
kubectl logs -f -n falco falco-pod-name
```
The `-f (--follow)` flag follows the logs and live stream them to your terminal and it is really useful when you are debugging a new rule and want to make sure that the rule is triggered when some actions are performed in the system.
If we need to access logs of a previous Falco run we do that by adding the `-p (--previous)` flag:
```bash
kubectl logs -p -n falco falco-pod-name
```
A scenario when we need the `-p (--previous)` flag is when we have a restart of a Falco pod and want to check what went wrong.
### Enabling real time logs
By default in Falco the output is buffered. When live streaming logs we will notice delays between the logs output (rules triggering) and the event happening.
In order to enable the logs to be emitted without delays you need to set `.Values.tty=true` in [values.yaml](./values.yaml) file.
## K8s-metacollector
Starting from Falco `0.37` the old [k8s-client](https://github.com/falcosecurity/falco/issues/2973) has been removed.
A new component named [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) replaces it.
The *k8s-metacollector* is a self-contained module that can be deployed within a Kubernetes cluster to perform the task of gathering metadata
from various Kubernetes resources and subsequently transmitting this collected metadata to designated subscribers.
Kubernetes' resources for which metadata will be collected and sent to Falco:
* pods;
* namespaces;
* deployments;
* replicationcontrollers;
* replicasets;
* services;
### Plugin
Since the *k8s-metacollector* is standalone, deployed in the cluster as a deployment, Falco instances need to connect to the component
in order to retrieve the `metadata`. Here it comes the [k8smeta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) plugin.
The plugin gathers details about Kubernetes resources from the *k8s-metacollector*. It then stores this information
in tables and provides access to Falco upon request. The plugin specifically acquires data for the node where the
associated Falco instance is deployed, resulting in node-level granularity.
### Exported Fields: Old and New
The old [k8s-client](https://github.com/falcosecurity/falco/issues/2973) used to populate the
[k8s](https://falco.org/docs/reference/rules/supported-fields/#field-class-k8s) fields. The **k8s** field class is still
available in Falco, for compatibility reasons, but most of the fields will return `N/A`. The following fields are still
usable and will return meaningful data when the `container runtime collectors` are enabled:
* k8s.pod.name;
* k8s.pod.id;
* k8s.pod.label;
* k8s.pod.labels;
* k8s.pod.ip;
* k8s.pod.cni.json;
* k8s.pod.namespace.name;
The [k8smeta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) plugin exports a whole new
[field class]https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta#supported-fields. Note that the new
`k8smeta.*` fields are usable only when the **k8smeta** plugin is loaded in Falco.
### Enabling the k8s-metacollector
The following command will deploy Falco + k8s-metacollector + k8smeta:
```bash
helm install falco falcosecurity/falco \
--namespace falco \
--create-namespace \
--set collectors.kubernetes.enabled=true
```
## Loading custom rules
Falco ships with a nice default ruleset. It is a good starting point but sooner or later, we are going to need to add custom rules which fit our needs.
So the question is: How can we load custom rules in our Falco deployment?
We are going to create a file that contains custom rules so that we can keep it in a Git repository.
```bash
cat custom-rules.yaml
```
And the file looks like this one:
```yaml
customRules:
rules-traefik.yaml: |-
- macro: traefik_consider_syscalls
condition: (evt.num < 0)
- macro: app_traefik
condition: container and container.image startswith "traefik"
# Restricting listening ports to selected set
- list: traefik_allowed_inbound_ports_tcp
items: [443, 80, 8080]
- rule: Unexpected inbound tcp connection traefik
desc: Detect inbound traffic to traefik using tcp on a port outside of expected set
condition: inbound and evt.rawres >= 0 and not fd.sport in (traefik_allowed_inbound_ports_tcp) and app_traefik
output: Inbound network connection to traefik on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting spawned processes to selected set
- list: traefik_allowed_processes
items: ["traefik"]
- rule: Unexpected spawned process traefik
desc: Detect a process started in a traefik container outside of an expected set
condition: spawned_process and not proc.name in (traefik_allowed_processes) and app_traefik
output: Unexpected process spawned in traefik container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image)
priority: NOTICE
```
So next step is to use the custom-rules.yaml file for installing the Falco Helm chart.
```bash
helm install falco -f custom-rules.yaml falcosecurity/falco
```
And we will see in our logs something like:
```bash
Tue Jun 5 15:08:57 2018: Loading rules from file /etc/falco/rules.d/rules-traefik.yaml:
```
And this means that our Falco installation has loaded the rules and is ready to help us.
## Kubernetes Audit Log
The Kubernetes Audit Log is now supported via the built-in [k8saudit](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit) plugin. It is entirely up to you to set up the [webhook backend](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/#webhook-backend) of the Kubernetes API server to forward the Audit Log event to the Falco listening port.
The following snippet shows how to deploy Falco with the [k8saudit](https://github.com/falcosecurity/plugins/tree/master/plugins/k8saudit) plugin:
```yaml
# -- Disable the drivers since we want to deploy only the k8saudit plugin.
driver:
enabled: false
# -- Disable the collectors, no syscall events to enrich with metadata.
collectors:
enabled: false
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurable.
controller:
kind: deployment
deployment:
# -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing.
# For more info check the section on Plugins in the README.md file.
replicas: 1
falcoctl:
artifact:
install:
# -- Enable the init container. We do not recommend installing (or following) plugins for security reasons since they are executable objects.
enabled: true
follow:
# -- Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules.
enabled: true
config:
artifact:
install:
# -- Resolve the dependencies for artifacts.
resolveDeps: true
# -- List of artifacts to be installed by the falcoctl init container.
# Only rulesfile, the plugin will be installed as a dependency.
refs: [k8saudit-rules:0.5]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
refs: [k8saudit-rules:0.5]
services:
- name: k8saudit-webhook
type: NodePort
ports:
- port: 9765 # See plugin open_params
nodePort: 30007
protocol: TCP
falco:
rules_files:
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
""
# maxEventBytes: 1048576
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: json
library_path: libjson.so
init_config: ""
# Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container.
load_plugins: [k8saudit, json]
```
Here is the explanation of the above configuration:
* disable the drivers by setting `driver.enabled=false`;
* disable the collectors by setting `collectors.enabled=false`;
* deploy the Falco using a k8s *deployment* by setting `controller.kind=deployment`;
* make our Falco instance reachable by the `k8s api-server` by configuring a service for it in `services`;
* enable the `falcoctl-artifact-install` init container;
* configure `falcoctl-artifact-install` to install the required plugins;
* disable the `falcoctl-artifact-follow` sidecar container;
* load the correct ruleset for our plugin in `falco.rulesFile`;
* configure the plugins to be loaded, in this case, the `k8saudit` and `json`;
* and finally we add our plugins in the `load_plugins` to be loaded by Falco.
The configuration can be found in the [values-k8saudit.yaml(./values-k8saudit.yaml] file ready to be used:
```bash
#make sure the falco namespace exists
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
-f ./values-k8saudit.yaml
```
After a few minutes a Falco instance should be running on your cluster. The status of Falco pod can be inspected through *kubectl*:
```bash
kubectl get pods -n falco -o wide
```
If everything went smoothly, you should observe an output similar to the following, indicating that the Falco instance is up and running:
```bash
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
falco-64484d9579-qckms 1/1 Running 0 101s 10.244.2.2 worker-node-2 <none> <none>
```
Furthermore you can check that Falco logs through *kubectl logs*
```bash
kubectl logs -n falco falco-64484d9579-qckms
```
In the logs you should have something similar to the following, indicating that Falco has loaded the required plugins:
```bash
Fri Jul 8 16:07:24 2022: Falco version 0.32.0 (driver version 39ae7d40496793cf3d3e7890c9bbdc202263836b)
Fri Jul 8 16:07:24 2022: Falco initialized with configuration file /etc/falco/falco.yaml
Fri Jul 8 16:07:24 2022: Loading plugin (k8saudit) from file /usr/share/falco/plugins/libk8saudit.so
Fri Jul 8 16:07:24 2022: Loading plugin (json) from file /usr/share/falco/plugins/libjson.so
Fri Jul 8 16:07:24 2022: Loading rules from file /etc/falco/k8s_audit_rules.yaml:
Fri Jul 8 16:07:24 2022: Starting internal webserver, listening on port 8765
```
*Note that the support for the dynamic backend (also known as the `AuditSink` object) has been deprecated from Kubernetes and removed from this chart.*
### Manual setup with NodePort on kOps
Using `kops edit cluster`, ensure these options are present, then run `kops update cluster` and `kops rolling-update cluster`:
```yaml
spec:
kubeAPIServer:
auditLogMaxBackups: 1
auditLogMaxSize: 10
auditLogPath: /var/log/k8s-audit.log
auditPolicyFile: /srv/kubernetes/assets/audit-policy.yaml
auditWebhookBatchMaxWait: 5s
auditWebhookConfigFile: /srv/kubernetes/assets/webhook-config.yaml
fileAssets:
- content: |
# content of the webserver CA certificate
# remove this fileAsset and certificate-authority from webhook-config if using http
name: audit-ca.pem
roles:
- Master
- content: |
apiVersion: v1
kind: Config
clusters:
- name: falco
cluster:
# remove 'certificate-authority' when using 'http'
certificate-authority: /srv/kubernetes/assets/audit-ca.pem
server: https://localhost:32765/k8s-audit
contexts:
- context:
cluster: falco
user: ""
name: default-context
current-context: default-context
preferences: {}
users: []
name: webhook-config.yaml
roles:
- Master
- content: |
# ... paste audit-policy.yaml here ...
# https://raw.githubusercontent.com/falcosecurity/plugins/master/plugins/k8saudit/configs/audit-policy.yaml
name: audit-policy.yaml
roles:
- Master
```
## Enabling gRPC
The Falco gRPC server and the Falco gRPC Outputs APIs are not enabled by default.
Moreover, Falco supports running a gRPC server with two main binding types:
- Over a local **Unix socket** with no authentication
- Over the **network** with mandatory mutual TLS authentication (mTLS)
### gRPC over unix socket (default)
The preferred way to use the gRPC is over a Unix socket.
To install Falco with gRPC enabled over a **unix socket**, you have to:
```shell
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set falco.grpc.enabled=true \
--set falco.grpc_output.enabled=true
```
### gRPC over network
The gRPC server over the network can only be used with mutual authentication between the clients and the server using TLS certificates.
How to generate the certificates is [documented here](https://falco.org/docs/grpc/#generate-valid-ca).
To install Falco with gRPC enabled over the **network**, you have to:
```shell
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set falco.grpc.enabled=true \
--set falco.grpc_output.enabled=true \
--set falco.grpc.unixSocketPath="" \
--set-file certs.server.key=/path/to/server.key \
--set-file certs.server.crt=/path/to/server.crt \
--set-file certs.ca.crt=/path/to/ca.crt
```
## Enable http_output
HTTP output enables Falco to send events through HTTP(S) via the following configuration:
```shell
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set falco.http_output.enabled=true \
--set falco.http_output.url="http://some.url/some/path/" \
--set falco.json_output=true \
--set json_include_output_property=true
```
Additionally, you can enable mTLS communication and load HTTP client cryptographic material via:
```shell
helm install falco falcosecurity/falco \
--create-namespace \
--namespace falco \
--set falco.http_output.enabled=true \
--set falco.http_output.url="https://some.url/some/path/" \
--set falco.json_output=true \
--set json_include_output_property=true \
--set falco.http_output.mtls=true \
--set falco.http_output.client_cert="/etc/falco/certs/client/client.crt" \
--set falco.http_output.client_key="/etc/falco/certs/client/client.key" \
--set falco.http_output.ca_cert="/etc/falco/certs/client/ca.crt" \
--set-file certs.client.key="/path/to/client.key",certs.client.crt="/path/to/client.crt",certs.ca.crt="/path/to/cacert.crt"
```
Or instead of directly setting the files via `--set-file`, mounting an existing volume with the `certs.existingClientSecret` value.
## Deploy Falcosidekick with Falco
[`Falcosidekick`](https://github.com/falcosecurity/falcosidekick) can be installed with `Falco` by setting `--set falcosidekick.enabled=true`. This setting automatically configures all options of `Falco` for working with `Falcosidekick`.
All values for the configuration of `Falcosidekick` are available by prefixing them with `falcosidekick.`. The full list of available values is [here](https://github.com/falcosecurity/charts/tree/master/charts/falcosidekick#configuration).
For example, to enable the deployment of [`Falcosidekick-UI`](https://github.com/falcosecurity/falcosidekick-ui), add `--set falcosidekick.enabled=true --set falcosidekick.webui.enabled=true`.
If you use a Proxy in your cluster, the requests between `Falco` and `Falcosidekick` might be captured, use the full FQDN of `Falcosidekick` by using `--set falcosidekick.fullfqdn=true` to avoid that.
## Configuration
The following table lists the main configurable parameters of the {{ template "chart.name" . }} chart v{{ template "chart.version" . }} and their default values. See [values.yaml](./values.yaml) for full list.
{{ template "chart.valuesSection" . }}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
{{- if eq .Values.controller.kind "daemonset" }}
Falco agents are spinning up on each node in your cluster. After a few
seconds, they are going to start monitoring your containers looking for
security issues.
{{printf "\n" }}
{{- end}}
{{- if .Values.integrations }}
WARNING: The following integrations have been deprecated and removed
- gcscc
- natsOutput
- snsOutput
- pubsubOutput
Consider to use falcosidekick (https://github.com/falcosecurity/falcosidekick) as replacement.
{{- else }}
No further action should be required.
{{- end }}
{{printf "\n" }}
{{- if not .Values.falcosidekick.enabled }}
Tip:
You can easily forward Falco events to Slack, Kafka, AWS Lambda and more with falcosidekick.
Full list of outputs: https://github.com/falcosecurity/charts/tree/master/charts/falcosidekick.
You can enable its deployment with `--set falcosidekick.enabled=true` or in your values.yaml.
See: https://github.com/falcosecurity/charts/blob/master/charts/falcosidekick/values.yaml for configuration values.
{{- end}}
{{- if (has .Values.driver.kind (list "module" "modern-bpf")) -}}
{{- println }}
WARNING(drivers):
{{- printf "\nThe driver kind: \"%s\" is an alias and might be removed in the future.\n" .Values.driver.kind -}}
{{- $driver := "" -}}
{{- if eq .Values.driver.kind "module" -}}
{{- $driver = "kmod" -}}
{{- else if eq .Values.driver.kind "modern-bpf" -}}
{{- $driver = "modern_ebpf" -}}
{{- end -}}
{{- printf "Please use \"%s\" instead." $driver}}
{{- end -}}
{{- if and (not (empty .Values.falco.load_plugins)) (or .Values.falcoctl.artifact.follow.enabled .Values.falcoctl.artifact.install.enabled) }}
NOTICE:
{{ printf "It seems you are loading the following plugins %v, please make sure to install them by specifying the correct reference to falcoctl.config.artifact.install.refs: %v" .Values.falco.load_plugins .Values.falcoctl.config.artifact.install.refs -}}
{{ printf "Ignore this notice if the value of falcoctl.config.artifact.install.refs is correct already." -}}
{{- end }}

View File

@ -1,561 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "falco.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "falco.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falco.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Allow the release namespace to be overridden
*/}}
{{- define "falco.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "falco.labels" -}}
helm.sh/chart: {{ include "falco.chart" . }}
{{ include "falco.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "falco.selectorLabels" -}}
app.kubernetes.io/name: {{ include "falco.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Renders a value that contains template.
Usage:
{{ include "falco.renderTemplate" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "falco.renderTemplate" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "falco.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "falco.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Return the proper Falco image name
*/}}
{{- define "falco.image" -}}
{{- with .Values.image.registry -}}
{{- . }}/
{{- end -}}
{{- .Values.image.repository }}:
{{- .Values.image.tag | default (printf "%s" .Chart.AppVersion) -}}
{{- end -}}
{{/*
Return the proper Falco driver loader image name
*/}}
{{- define "falco.driverLoader.image" -}}
{{- with .Values.driver.loader.initContainer.image.registry -}}
{{- . }}/
{{- end -}}
{{- .Values.driver.loader.initContainer.image.repository }}:
{{- .Values.driver.loader.initContainer.image.tag | default .Chart.AppVersion -}}
{{- end -}}
{{/*
Return the proper Falcoctl image name
*/}}
{{- define "falcoctl.image" -}}
{{ printf "%s/%s:%s" .Values.falcoctl.image.registry .Values.falcoctl.image.repository .Values.falcoctl.image.tag }}
{{- end -}}
{{/*
Extract the unixSocket's directory path
*/}}
{{- define "falco.unixSocketDir" -}}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (hasPrefix "unix://" .Values.falco.grpc.bind_address) -}}
{{- .Values.falco.grpc.bind_address | trimPrefix "unix://" | dir -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "rbac.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Build http url for falcosidekick.
*/}}
{{- define "falcosidekick.url" -}}
{{- if not .Values.falco.http_output.url -}}
{{- $falcoName := include "falco.fullname" . -}}
{{- $listenPort := .Values.falcosidekick.listenport | default "2801" -}}
{{- if .Values.falcosidekick.fullfqdn -}}
{{- printf "http://%s-falcosidekick.%s.svc.cluster.local:%s" $falcoName .Release.Namespace $listenPort -}}
{{- else -}}
{{- printf "http://%s-falcosidekick:%s" $falcoName $listenPort -}}
{{- end -}}
{{- else -}}
{{- .Values.falco.http_output.url -}}
{{- end -}}
{{- end -}}
{{/*
Set appropriate falco configuration if falcosidekick has been configured.
*/}}
{{- define "falco.falcosidekickConfig" -}}
{{- if .Values.falcosidekick.enabled -}}
{{- $_ := set .Values.falco "json_output" true -}}
{{- $_ := set .Values.falco "json_include_output_property" true -}}
{{- $_ := set .Values.falco.http_output "enabled" true -}}
{{- $_ := set .Values.falco.http_output "url" (include "falcosidekick.url" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get port from .Values.falco.grpc.bind_addres.
*/}}
{{- define "grpc.port" -}}
{{- $error := "unable to extract listenPort from .Values.falco.grpc.bind_address. Make sure it is in the correct format" -}}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (not (hasPrefix "unix://" .Values.falco.grpc.bind_address)) -}}
{{- $tokens := split ":" .Values.falco.grpc.bind_address -}}
{{- if $tokens._1 -}}
{{- $tokens._1 -}}
{{- else -}}
{{- fail $error -}}
{{- end -}}
{{- else -}}
{{- fail $error -}}
{{- end -}}
{{- end -}}
{{/*
Disable the syscall source if some conditions are met.
By default the syscall source is always enabled in falco. If no syscall source is enabled, falco
exits. Here we check that no producers for syscalls event has been configured, and if true
we just disable the sycall source.
*/}}
{{- define "falco.configSyscallSource" -}}
{{- $userspaceDisabled := true -}}
{{- $gvisorDisabled := (ne .Values.driver.kind "gvisor") -}}
{{- $driverDisabled := (not .Values.driver.enabled) -}}
{{- if or (has "-u" .Values.extra.args) (has "--userspace" .Values.extra.args) -}}
{{- $userspaceDisabled = false -}}
{{- end -}}
{{- if and $driverDisabled $userspaceDisabled $gvisorDisabled }}
- --disable-source
- syscall
{{- end -}}
{{- end -}}
{{/*
We need the falco binary in order to generate the configuration for gVisor. This init container
is deployed within the Falco pod when gVisor is enabled. The image is the same as the one of Falco we are
deploying and the configuration logic is a bash script passed as argument on the fly. This solution should
be temporary and will stay here until we move this logic to the falcoctl tool.
*/}}
{{- define "falco.gvisor.initContainer" -}}
- name: {{ .Chart.Name }}-gvisor-init
image: {{ include "falco.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /bin/bash
- -c
- |
set -o errexit
set -o nounset
set -o pipefail
root={{ .Values.driver.gvisor.runsc.root }}
config={{ .Values.driver.gvisor.runsc.config }}
echo "* Configuring Falco+gVisor integration...".
# Check if gVisor is configured on the node.
echo "* Checking for /host${config} file..."
if [[ -f /host${config} ]]; then
echo "* Generating the Falco configuration..."
/usr/bin/falco --gvisor-generate-config=${root}/falco.sock > /host${root}/pod-init.json
sed -E -i.orig '/"ignore_missing" : true,/d' /host${root}/pod-init.json
if [[ -z $(grep pod-init-config /host${config}) ]]; then
echo "* Updating the runsc config file /host${config}..."
echo " pod-init-config = \"${root}/pod-init.json\"" >> /host${config}
fi
# Endpoint inside the container is different from outside, add
# "/host" to the endpoint path inside the container.
echo "* Setting the updated Falco configuration to /gvisor-config/pod-init.json..."
sed 's/"endpoint" : "\/run/"endpoint" : "\/host\/run/' /host${root}/pod-init.json > /gvisor-config/pod-init.json
else
echo "* File /host${config} not found."
echo "* Please make sure that the gVisor is configured in the current node and/or the runsc root and config file path are correct"
exit -1
fi
echo "* Falco+gVisor correctly configured."
exit 0
volumeMounts:
- mountPath: /host{{ .Values.driver.gvisor.runsc.path }}
name: runsc-path
readOnly: true
- mountPath: /host{{ .Values.driver.gvisor.runsc.root }}
name: runsc-root
- mountPath: /host{{ .Values.driver.gvisor.runsc.config }}
name: runsc-config
- mountPath: /gvisor-config
name: falco-gvisor-config
{{- end -}}
{{- define "falcoctl.initContainer" -}}
- name: falcoctl-artifact-install
image: {{ include "falcoctl.image" . }}
imagePullPolicy: {{ .Values.falcoctl.image.pullPolicy }}
args:
- artifact
- install
{{- with .Values.falcoctl.artifact.install.args }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.falcoctl.artifact.install.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
securityContext:
{{- if .Values.falcoctl.artifact.install.securityContext }}
{{- toYaml .Values.falcoctl.artifact.install.securityContext | nindent 4 }}
{{- end }}
volumeMounts:
- mountPath: {{ .Values.falcoctl.config.artifact.install.pluginsDir }}
name: plugins-install-dir
- mountPath: {{ .Values.falcoctl.config.artifact.install.rulesfilesDir }}
name: rulesfiles-install-dir
- mountPath: /etc/falcoctl
name: falcoctl-config-volume
{{- with .Values.falcoctl.artifact.install.mounts.volumeMounts }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.falcoctl.artifact.install.env }}
env:
{{- include "falco.renderTemplate" ( dict "value" .Values.falcoctl.artifact.install.env "context" $) | nindent 4 }}
{{- end }}
{{- end -}}
{{- define "falcoctl.sidecar" -}}
- name: falcoctl-artifact-follow
image: {{ include "falcoctl.image" . }}
imagePullPolicy: {{ .Values.falcoctl.image.pullPolicy }}
args:
- artifact
- follow
{{- with .Values.falcoctl.artifact.follow.args }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.falcoctl.artifact.follow.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
securityContext:
{{- if .Values.falcoctl.artifact.follow.securityContext }}
{{- toYaml .Values.falcoctl.artifact.follow.securityContext | nindent 4 }}
{{- end }}
volumeMounts:
- mountPath: {{ .Values.falcoctl.config.artifact.follow.pluginsDir }}
name: plugins-install-dir
- mountPath: {{ .Values.falcoctl.config.artifact.follow.rulesfilesDir }}
name: rulesfiles-install-dir
- mountPath: /etc/falcoctl
name: falcoctl-config-volume
{{- with .Values.falcoctl.artifact.follow.mounts.volumeMounts }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.falcoctl.artifact.follow.env }}
env:
{{- include "falco.renderTemplate" ( dict "value" .Values.falcoctl.artifact.follow.env "context" $) | nindent 4 }}
{{- end }}
{{- end -}}
{{/*
Build configuration for k8smeta plugin and update the relevant variables.
* The configuration that needs to be built up is the initconfig section:
init_config:
collectorPort: 0
collectorHostname: ""
nodeName: ""
The falco chart exposes this configuriotino through two variable:
* collectors.kubenetetes.collectorHostname;
* collectors.kubernetes.collectorPort;
If those two variable are not set, then we take those values from the k8smetacollector subchart.
The hostname is built using the name of the service that exposes the collector endpoints and the
port is directly taken form the service's port that exposes the gRPC endpoint.
We reuse the helpers from the k8smetacollector subchart, by passing down the variables. There is a
hardcoded values that is the chart name for the k8s-metacollector chart.
* The falcoctl configuration is updated to allow plugin artifacts to be installed. The refs in the install
section are updated by adding the reference for the k8s meta plugin that needs to be installed.
NOTE: It seems that the named templates run during the validation process. And then again during the
render fase. In our case we are setting global variable that persist during the various phases.
We need to make the helper idempotent.
*/}}
{{- define "k8smeta.configuration" -}}
{{- if and .Values.collectors.kubernetes.enabled .Values.driver.enabled -}}
{{- $hostname := "" -}}
{{- if .Values.collectors.kubernetes.collectorHostname -}}
{{- $hostname = .Values.collectors.kubernetes.collectorHostname -}}
{{- else -}}
{{- $collectorContext := (dict "Release" .Release "Values" (index .Values "k8s-metacollector") "Chart" (dict "Name" "k8s-metacollector")) -}}
{{- $hostname = printf "%s.%s.svc" (include "k8s-metacollector.fullname" $collectorContext) (include "k8s-metacollector.namespace" $collectorContext) -}}
{{- end -}}
{{- $hasConfig := false -}}
{{- range .Values.falco.plugins -}}
{{- if eq (get . "name") "k8smeta" -}}
{{ $hasConfig = true -}}
{{- end -}}
{{- end -}}
{{- if not $hasConfig -}}
{{- $listenPort := default (index .Values "k8s-metacollector" "service" "ports" "broker-grpc" "port") .Values.collectors.kubernetes.collectorPort -}}
{{- $listenPort = int $listenPort -}}
{{- $pluginConfig := dict "name" "k8smeta" "library_path" "libk8smeta.so" "init_config" (dict "collectorHostname" $hostname "collectorPort" $listenPort "nodeName" "${FALCO_K8S_NODE_NAME}" "verbosity" .Values.collectors.kubernetes.verbosity "hostProc" .Values.collectors.kubernetes.hostProc) -}}
{{- $newConfig := append .Values.falco.plugins $pluginConfig -}}
{{- $_ := set .Values.falco "plugins" ($newConfig | uniq) -}}
{{- $loadedPlugins := append .Values.falco.load_plugins "k8smeta" -}}
{{- $_ = set .Values.falco "load_plugins" ($loadedPlugins | uniq) -}}
{{- end -}}
{{- $_ := set .Values.falcoctl.config.artifact.install "refs" ((append .Values.falcoctl.config.artifact.install.refs .Values.collectors.kubernetes.pluginRef) | uniq)}}
{{- $_ = set .Values.falcoctl.config.artifact "allowedTypes" ((append .Values.falcoctl.config.artifact.allowedTypes "plugin") | uniq)}}
{{- end -}}
{{- end -}}
{{/*
Based on the user input it populates the driver configuration in the falco config map.
*/}}
{{- define "falco.engineConfiguration" -}}
{{- if .Values.driver.enabled -}}
{{- $supportedDrivers := list "kmod" "ebpf" "modern_ebpf" "gvisor" "auto" -}}
{{- $aliasDrivers := list "module" "modern-bpf" -}}
{{- if and (not (has .Values.driver.kind $supportedDrivers)) (not (has .Values.driver.kind $aliasDrivers)) -}}
{{- fail (printf "unsupported driver kind: \"%s\". Supported drivers %s, alias %s" .Values.driver.kind $supportedDrivers $aliasDrivers) -}}
{{- end -}}
{{- if or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") -}}
{{- $kmodConfig := dict "kind" "kmod" "kmod" (dict "buf_size_preset" .Values.driver.kmod.bufSizePreset "drop_failed_exit" .Values.driver.kmod.dropFailedExit) -}}
{{- $_ := set .Values.falco "engine" $kmodConfig -}}
{{- else if eq .Values.driver.kind "ebpf" -}}
{{- $ebpfConfig := dict "kind" "ebpf" "ebpf" (dict "buf_size_preset" .Values.driver.ebpf.bufSizePreset "drop_failed_exit" .Values.driver.ebpf.dropFailedExit "probe" .Values.driver.ebpf.path) -}}
{{- $_ := set .Values.falco "engine" $ebpfConfig -}}
{{- else if or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf") -}}
{{- $ebpfConfig := dict "kind" "modern_ebpf" "modern_ebpf" (dict "buf_size_preset" .Values.driver.modernEbpf.bufSizePreset "drop_failed_exit" .Values.driver.modernEbpf.dropFailedExit "cpus_for_each_buffer" .Values.driver.modernEbpf.cpusForEachBuffer) -}}
{{- $_ := set .Values.falco "engine" $ebpfConfig -}}
{{- else if eq .Values.driver.kind "gvisor" -}}
{{- $root := printf "/host%s/k8s.io" .Values.driver.gvisor.runsc.root -}}
{{- $gvisorConfig := dict "kind" "gvisor" "gvisor" (dict "config" "/gvisor-config/pod-init.json" "root" $root) -}}
{{- $_ := set .Values.falco "engine" $gvisorConfig -}}
{{- else if eq .Values.driver.kind "auto" -}}
{{- $engineConfig := dict "kind" "modern_ebpf" "kmod" (dict "buf_size_preset" .Values.driver.kmod.bufSizePreset "drop_failed_exit" .Values.driver.kmod.dropFailedExit) "ebpf" (dict "buf_size_preset" .Values.driver.ebpf.bufSizePreset "drop_failed_exit" .Values.driver.ebpf.dropFailedExit "probe" .Values.driver.ebpf.path) "modern_ebpf" (dict "buf_size_preset" .Values.driver.modernEbpf.bufSizePreset "drop_failed_exit" .Values.driver.modernEbpf.dropFailedExit "cpus_for_each_buffer" .Values.driver.modernEbpf.cpusForEachBuffer) -}}
{{- $_ := set .Values.falco "engine" $engineConfig -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
It returns "true" if the driver loader has to be enabled, otherwise false.
*/}}
{{- define "driverLoader.enabled" -}}
{{- if or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf") (eq .Values.driver.kind "gvisor") (not .Values.driver.enabled) (not .Values.driver.loader.enabled) -}}
false
{{- else -}}
true
{{- end -}}
{{- end -}}
{{/*
Based on the user input it populates the metrics configuration in the falco config map.
*/}}
{{- define "falco.metricsConfiguration" -}}
{{- if .Values.metrics.enabled -}}
{{- $_ := set .Values.falco.webserver "prometheus_metrics_enabled" true -}}
{{- $_ = set .Values.falco.webserver "enabled" true -}}
{{- $_ = set .Values.falco.metrics "enabled" .Values.metrics.enabled -}}
{{- $_ = set .Values.falco.metrics "interval" .Values.metrics.interval -}}
{{- $_ = set .Values.falco.metrics "output_rule" .Values.metrics.outputRule -}}
{{- $_ = set .Values.falco.metrics "rules_counters_enabled" .Values.metrics.rulesCountersEnabled -}}
{{- $_ = set .Values.falco.metrics "resource_utilization_enabled" .Values.metrics.resourceUtilizationEnabled -}}
{{- $_ = set .Values.falco.metrics "state_counters_enabled" .Values.metrics.stateCountersEnabled -}}
{{- $_ = set .Values.falco.metrics "kernel_event_counters_enabled" .Values.metrics.kernelEventCountersEnabled -}}
{{- $_ = set .Values.falco.metrics "kernel_event_counters_per_cpu_enabled" .Values.metrics.kernelEventCountersPerCPUEnabled -}}
{{- $_ = set .Values.falco.metrics "libbpf_stats_enabled" .Values.metrics.libbpfStatsEnabled -}}
{{- $_ = set .Values.falco.metrics "convert_memory_to_mb" .Values.metrics.convertMemoryToMB -}}
{{- $_ = set .Values.falco.metrics "include_empty_values" .Values.metrics.includeEmptyValues -}}
{{- end -}}
{{- end -}}
{{/*
This helper is used to add the container plugin to the falco configuration.
*/}}
{{ define "falco.containerPlugin" -}}
{{ if and .Values.driver.enabled .Values.collectors.enabled -}}
{{ if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{ else if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled .Values.collectors.containerEngine.enabled -}}
{{ if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled -}}
{{ $_ := set .Values.collectors.containerEngine.engines.docker "enabled" .Values.collectors.docker.enabled -}}
{{ $_ = set .Values.collectors.containerEngine.engines.docker "sockets" (list .Values.collectors.docker.socket) -}}
{{ $_ = set .Values.collectors.containerEngine.engines.containerd "enabled" .Values.collectors.containerd.enabled -}}
{{ $_ = set .Values.collectors.containerEngine.engines.containerd "sockets" (list .Values.collectors.containerd.socket) -}}
{{ $_ = set .Values.collectors.containerEngine.engines.cri "enabled" .Values.collectors.crio.enabled -}}
{{ $_ = set .Values.collectors.containerEngine.engines.cri "sockets" (list .Values.collectors.crio.socket) -}}
{{ $_ = set .Values.collectors.containerEngine.engines.podman "enabled" false -}}
{{ $_ = set .Values.collectors.containerEngine.engines.lxc "enabled" false -}}
{{ $_ = set .Values.collectors.containerEngine.engines.libvirt_lxc "enabled" false -}}
{{ $_ = set .Values.collectors.containerEngine.engines.bpm "enabled" false -}}
{{ end -}}
{{ $hasConfig := false -}}
{{ range .Values.falco.plugins -}}
{{ if eq (get . "name") "container" -}}
{{ $hasConfig = true -}}
{{ end -}}
{{ end -}}
{{ if not $hasConfig -}}
{{ $pluginConfig := dict -}}
{{ with .Values.collectors.containerEngine -}}
{{ $pluginConfig = dict "name" "container" "library_path" "libcontainer.so" "init_config" (dict "label_max_len" .labelMaxLen "with_size" .withSize "hooks" .hooks "engines" .engines) -}}
{{ end -}}
{{ $newConfig := append .Values.falco.plugins $pluginConfig -}}
{{ $_ := set .Values.falco "plugins" ($newConfig | uniq) -}}
{{ $loadedPlugins := append .Values.falco.load_plugins "container" -}}
{{ $_ = set .Values.falco "load_plugins" ($loadedPlugins | uniq) -}}
{{ end -}}
{{ $_ := set .Values.falcoctl.config.artifact.install "refs" ((append .Values.falcoctl.config.artifact.install.refs .Values.collectors.containerEngine.pluginRef) | uniq) -}}
{{ $_ = set .Values.falcoctl.config.artifact "allowedTypes" ((append .Values.falcoctl.config.artifact.allowedTypes "plugin") | uniq) -}}
{{ end -}}
{{ end -}}
{{ end -}}
{{/*
This helper is used to add container plugin volumes to the falco pod.
*/}}
{{- define "falco.containerPluginVolumes" -}}
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{- end -}}
{{ $volumes := list -}}
{{- if .Values.collectors.docker.enabled -}}
{{ $volumes = append $volumes (dict "name" "docker-socket" "hostPath" (dict "path" .Values.collectors.docker.socket)) -}}
{{- end -}}
{{- if .Values.collectors.crio.enabled -}}
{{ $volumes = append $volumes (dict "name" "crio-socket" "hostPath" (dict "path" .Values.collectors.crio.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerd.enabled -}}
{{ $volumes = append $volumes (dict "name" "containerd-socket" "hostPath" (dict "path" .Values.collectors.containerd.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerEngine.enabled -}}
{{- $seenPaths := dict -}}
{{- $idx := 0 -}}
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
{{- range $engineName := $engineOrder -}}
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
{{- if and $val $val.enabled -}}
{{- range $index, $socket := $val.sockets -}}
{{- $mountPath := print "/host" $socket -}}
{{- if not (hasKey $seenPaths $mountPath) -}}
{{ $volumes = append $volumes (dict "name" (printf "container-engine-socket-%d" $idx) "hostPath" (dict "path" $socket)) -}}
{{- $idx = add $idx 1 -}}
{{- $_ := set $seenPaths $mountPath true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if gt (len $volumes) 0 -}}
{{ toYaml $volumes -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
This helper is used to add container plugin volumeMounts to the falco pod.
*/}}
{{- define "falco.containerPluginVolumeMounts" -}}
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{- end -}}
{{ $volumeMounts := list -}}
{{- if .Values.collectors.docker.enabled -}}
{{ $volumeMounts = append $volumeMounts (dict "name" "docker-socket" "mountPath" (print "/host" .Values.collectors.docker.socket)) -}}
{{- end -}}
{{- if .Values.collectors.crio.enabled -}}
{{ $volumeMounts = append $volumeMounts (dict "name" "crio-socket" "mountPath" (print "/host" .Values.collectors.crio.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerd.enabled -}}
{{ $volumeMounts = append $volumeMounts (dict "name" "containerd-socket" "mountPath" (print "/host" .Values.collectors.containerd.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerEngine.enabled -}}
{{- $seenPaths := dict -}}
{{- $idx := 0 -}}
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
{{- range $engineName := $engineOrder -}}
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
{{- if and $val $val.enabled -}}
{{- range $index, $socket := $val.sockets -}}
{{- $mountPath := print "/host" $socket -}}
{{- if not (hasKey $seenPaths $mountPath) -}}
{{ $volumeMounts = append $volumeMounts (dict "name" (printf "container-engine-socket-%d" $idx) "mountPath" $mountPath) -}}
{{- $idx = add $idx 1 -}}
{{- $_ := set $seenPaths $mountPath true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if gt (len $volumeMounts) 0 -}}
{{ toYaml ($volumeMounts) }}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,19 +0,0 @@
{{- with .Values.certs }}
{{- if and .server.key .server.crt .ca.crt }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco.fullname" $ }}-certs
namespace: {{ include "falco.namespace" $ }}
labels:
{{- include "falco.labels" $ | nindent 4 }}
type: Opaque
data:
{{ $key := .server.key }}
server.key: {{ $key | b64enc | quote }}
{{ $crt := .server.crt }}
server.crt: {{ $crt | b64enc | quote }}
falco.pem: {{ print $key $crt | b64enc | quote }}
ca.crt: {{ .ca.crt | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco.fullname" . }}-client-certs
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco.labels" $ | nindent 4 }}
type: Opaque
data:
{{ $key := .Values.certs.client.key }}
client.key: {{ $key | b64enc | quote }}
{{ $crt := .Values.certs.client.crt }}
client.crt: {{ $crt | b64enc | quote }}
falcoclient.pem: {{ print $key $crt | b64enc | quote }}
ca.crt: {{ .Values.certs.ca.crt | b64enc | quote }}
ca.pem: {{ .Values.certs.ca.crt | b64enc | quote }}
{{- end }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falco.fullname" . }}
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
data:
falco.yaml: |-
{{- include "falco.falcosidekickConfig" . }}
{{- include "k8smeta.configuration" . -}}
{{- include "falco.engineConfiguration" . -}}
{{- include "falco.metricsConfiguration" . -}}
{{- include "falco.containerPlugin" . -}}
{{- toYaml .Values.falco | nindent 4 }}

View File

@ -1,26 +0,0 @@
{{- if eq .Values.controller.kind "daemonset" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "falco.fullname" . }}
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- if .Values.controller.labels }}
{{- toYaml .Values.controller.labels | nindent 4 }}
{{- end }}
{{- if .Values.controller.annotations }}
annotations:
{{ toYaml .Values.controller.annotations | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "falco.selectorLabels" . | nindent 6 }}
template:
{{- include "falco.podTemplate" . | nindent 4 }}
{{- with .Values.controller.daemonset.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if eq .Values.controller.kind "deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falco.fullname" . }}
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- if .Values.controller.labels }}
{{- toYaml .Values.controller.labels | nindent 4 }}
{{- end }}
{{- if .Values.controller.annotations }}
annotations:
{{ toYaml .Values.controller.annotations | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.controller.deployment.replicas }}
{{- if .Values.controller.deployment.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.controller.deployment.revisionHistoryLimit }}
{{- end }}
selector:
matchLabels:
{{- include "falco.selectorLabels" . | nindent 6 }}
template:
{{- include "falco.podTemplate" . | nindent 4 }}
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.falco.name }}
{{ if .Values.grafana.dashboards.configMaps.falco.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.falco.namespace }}
{{- else -}}
namespace: {{ include "falco.namespace" . }}
{{- end }}
labels:
{{- include "falco.labels" . | nindent 4 }}
grafana_dashboard: "1"
{{- if .Values.grafana.dashboards.configMaps.falco.folder }}
annotations:
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.falco.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.falco.folder }}
{{- end }}
data:
falco-dashboard.json: |-
{{- .Files.Get "dashboards/falco-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,14 +0,0 @@
{{- if or .Values.falcoctl.artifact.install.enabled .Values.falcoctl.artifact.follow.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falco.fullname" . }}-falcoctl
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
data:
falcoctl.yaml: |-
{{- include "k8smeta.configuration" . -}}
{{- include "falco.containerPlugin" . -}}
{{- toYaml .Values.falcoctl.config | nindent 4 }}
{{- end }}

View File

@ -1,16 +0,0 @@
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (not (hasPrefix "unix://" .Values.falco.grpc.bind_address)) }}
kind: Service
apiVersion: v1
metadata:
name: {{ include "falco.fullname" . }}-grpc
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
spec:
clusterIP: None
selector:
{{- include "falco.selectorLabels" . | nindent 4 }}
ports:
- protocol: TCP
port: {{ include "grpc.port" . }}
{{- end }}

View File

@ -1,423 +0,0 @@
{{- define "falco.podTemplate" -}}
metadata:
name: {{ include "falco.fullname" . }}
labels:
{{- include "falco.selectorLabels" . | nindent 4 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/rules: {{ include (print $.Template.BasePath "/rules-configmap.yaml") . | sha256sum }}
{{- if and .Values.certs (not .Values.certs.existingSecret) }}
checksum/certs: {{ include (print $.Template.BasePath "/certs-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.driver.enabled }}
{{- if (or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf")) }}
{{- if .Values.driver.modernEbpf.leastPrivileged }}
container.apparmor.security.beta.kubernetes.io/{{ .Chart.Name }}: unconfined
{{- end }}
{{- else if eq .Values.driver.kind "ebpf" }}
{{- if .Values.driver.ebpf.leastPrivileged }}
container.apparmor.security.beta.kubernetes.io/{{ .Chart.Name }}: unconfined
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.falco.podHostname }}
hostname: {{ .Values.falco.podHostname }}
{{- end }}
serviceAccountName: {{ include "falco.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 4}}
{{- end }}
{{- if .Values.driver.enabled }}
{{- if and (eq .Values.driver.kind "ebpf") .Values.driver.ebpf.hostNetwork }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
{{- end }}
{{- end }}
{{- if .Values.podPriorityClassName }}
priorityClassName: {{ .Values.podPriorityClassName }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if eq .Values.driver.kind "gvisor" }}
hostNetwork: true
hostPID: true
dnsPolicy: ClusterFirstWithHostNet
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: {{ include "falco.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{- toYaml .Values.resources | nindent 8 }}
securityContext:
{{- include "falco.securityContext" . | nindent 8 }}
args:
- /usr/bin/falco
{{- include "falco.configSyscallSource" . | indent 8 }}
{{- with .Values.extra.args }}
{{- toYaml . | nindent 8 }}
{{- end }}
env:
- name: HOST_ROOT
value: /host
- name: FALCO_HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: FALCO_K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.extra.env }}
{{- include "falco.renderTemplate" ( dict "value" .Values.extra.env "context" $) | nindent 8 }}
{{- end }}
tty: {{ .Values.tty }}
{{- if .Values.falco.webserver.enabled }}
ports:
- containerPort: {{ .Values.falco.webserver.listen_port }}
name: web
protocol: TCP
livenessProbe:
initialDelaySeconds: {{ .Values.healthChecks.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.healthChecks.livenessProbe.timeoutSeconds }}
periodSeconds: {{ .Values.healthChecks.livenessProbe.periodSeconds }}
httpGet:
path: {{ .Values.falco.webserver.k8s_healthz_endpoint }}
port: {{ .Values.falco.webserver.listen_port }}
{{- if .Values.falco.webserver.ssl_enabled }}
scheme: HTTPS
{{- end }}
readinessProbe:
initialDelaySeconds: {{ .Values.healthChecks.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.healthChecks.readinessProbe.timeoutSeconds }}
periodSeconds: {{ .Values.healthChecks.readinessProbe.periodSeconds }}
httpGet:
path: {{ .Values.falco.webserver.k8s_healthz_endpoint }}
port: {{ .Values.falco.webserver.listen_port }}
{{- if .Values.falco.webserver.ssl_enabled }}
scheme: HTTPS
{{- end }}
{{- end }}
volumeMounts:
{{- include "falco.containerPluginVolumeMounts" . | nindent 8 -}}
{{- if or .Values.falcoctl.artifact.install.enabled .Values.falcoctl.artifact.follow.enabled }}
{{- if has "rulesfile" .Values.falcoctl.config.artifact.allowedTypes }}
- mountPath: /etc/falco
name: rulesfiles-install-dir
{{- end }}
{{- if has "plugin" .Values.falcoctl.config.artifact.allowedTypes }}
- mountPath: /usr/share/falco/plugins
name: plugins-install-dir
{{- end }}
{{- end }}
{{- if eq (include "driverLoader.enabled" .) "true" }}
- mountPath: /etc/falco/config.d
name: specialized-falco-configs
{{- end }}
- mountPath: /root/.falco
name: root-falco-fs
- mountPath: /host/proc
name: proc-fs
{{- if and .Values.driver.enabled (not .Values.driver.loader.enabled) }}
readOnly: true
- mountPath: /host/boot
name: boot-fs
readOnly: true
- mountPath: /host/lib/modules
name: lib-modules
- mountPath: /host/usr
name: usr-fs
readOnly: true
{{- end }}
{{- if .Values.driver.enabled }}
- mountPath: /host/etc
name: etc-fs
readOnly: true
{{- end -}}
{{- if and .Values.driver.enabled (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) }}
- mountPath: /host/dev
name: dev-fs
readOnly: true
- name: sys-fs
mountPath: /sys/module
{{- end }}
{{- if and .Values.driver.enabled (and (eq .Values.driver.kind "ebpf") (contains "falco-no-driver" .Values.image.repository)) }}
- name: debugfs
mountPath: /sys/kernel/debug
{{- end }}
- mountPath: /etc/falco/falco.yaml
name: falco-yaml
subPath: falco.yaml
{{- if .Values.customRules }}
- mountPath: /etc/falco/rules.d
name: rules-volume
{{- end }}
{{- if or .Values.certs.existingSecret (and .Values.certs.server.key .Values.certs.server.crt .Values.certs.ca.crt) }}
- mountPath: /etc/falco/certs
name: certs-volume
readOnly: true
{{- end }}
{{- if or .Values.certs.existingClientSecret (and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt) }}
- mountPath: /etc/falco/certs/client
name: client-certs-volume
readOnly: true
{{- end }}
{{- include "falco.unixSocketVolumeMount" . | nindent 8 -}}
{{- with .Values.mounts.volumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if eq .Values.driver.kind "gvisor" }}
- mountPath: /usr/local/bin/runsc
name: runsc-path
readOnly: true
- mountPath: /host{{ .Values.driver.gvisor.runsc.root }}
name: runsc-root
- mountPath: /host{{ .Values.driver.gvisor.runsc.config }}
name: runsc-config
- mountPath: /gvisor-config
name: falco-gvisor-config
{{- end }}
{{- if .Values.falcoctl.artifact.follow.enabled }}
{{- include "falcoctl.sidecar" . | nindent 4 }}
{{- end }}
initContainers:
{{- with .Values.extra.initContainers }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if eq .Values.driver.kind "gvisor" }}
{{- include "falco.gvisor.initContainer" . | nindent 4 }}
{{- end }}
{{- if eq (include "driverLoader.enabled" .) "true" }}
{{- include "falco.driverLoader.initContainer" . | nindent 4 }}
{{- end }}
{{- if .Values.falcoctl.artifact.install.enabled }}
{{- include "falcoctl.initContainer" . | nindent 4 }}
{{- end }}
volumes:
{{- include "falco.containerPluginVolumes" . | nindent 4 -}}
{{- if eq (include "driverLoader.enabled" .) "true" }}
- name: specialized-falco-configs
emptyDir: {}
{{- end }}
{{- if or .Values.falcoctl.artifact.install.enabled .Values.falcoctl.artifact.follow.enabled }}
- name: plugins-install-dir
emptyDir: {}
- name: rulesfiles-install-dir
emptyDir: {}
{{- end }}
- name: root-falco-fs
emptyDir: {}
{{- if .Values.driver.enabled }}
- name: boot-fs
hostPath:
path: /boot
- name: lib-modules
hostPath:
path: /lib/modules
- name: usr-fs
hostPath:
path: /usr
- name: etc-fs
hostPath:
path: /etc
{{- end }}
{{- if and .Values.driver.enabled (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) }}
- name: dev-fs
hostPath:
path: /dev
- name: sys-fs
hostPath:
path: /sys/module
{{- end }}
{{- if and .Values.driver.enabled (and (eq .Values.driver.kind "ebpf") (contains "falco-no-driver" .Values.image.repository)) }}
- name: debugfs
hostPath:
path: /sys/kernel/debug
{{- end }}
- name: proc-fs
hostPath:
path: /proc
{{- if eq .Values.driver.kind "gvisor" }}
- name: runsc-path
hostPath:
path: {{ .Values.driver.gvisor.runsc.path }}/runsc
type: File
- name: runsc-root
hostPath:
path: {{ .Values.driver.gvisor.runsc.root }}
- name: runsc-config
hostPath:
path: {{ .Values.driver.gvisor.runsc.config }}
type: File
- name: falco-gvisor-config
emptyDir: {}
{{- end }}
- name: falcoctl-config-volume
configMap:
name: {{ include "falco.fullname" . }}-falcoctl
items:
- key: falcoctl.yaml
path: falcoctl.yaml
- name: falco-yaml
configMap:
name: {{ include "falco.fullname" . }}
items:
- key: falco.yaml
path: falco.yaml
{{- if .Values.customRules }}
- name: rules-volume
configMap:
name: {{ include "falco.fullname" . }}-rules
{{- end }}
{{- if or .Values.certs.existingSecret (and .Values.certs.server.key .Values.certs.server.crt .Values.certs.ca.crt) }}
- name: certs-volume
secret:
{{- if .Values.certs.existingSecret }}
secretName: {{ .Values.certs.existingSecret }}
{{- else }}
secretName: {{ include "falco.fullname" . }}-certs
{{- end }}
{{- end }}
{{- if or .Values.certs.existingClientSecret (and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt) }}
- name: client-certs-volume
secret:
{{- if .Values.certs.existingClientSecret }}
secretName: {{ .Values.certs.existingClientSecret }}
{{- else }}
secretName: {{ include "falco.fullname" . }}-client-certs
{{- end }}
{{- end }}
{{- include "falco.unixSocketVolume" . | nindent 4 -}}
{{- with .Values.mounts.volumes }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}
{{- define "falco.driverLoader.initContainer" -}}
- name: {{ .Chart.Name }}-driver-loader
image: {{ include "falco.driverLoader.image" . }}
imagePullPolicy: {{ .Values.driver.loader.initContainer.image.pullPolicy }}
args:
{{- with .Values.driver.loader.initContainer.args }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if eq .Values.driver.kind "module" }}
- kmod
{{- else if eq .Values.driver.kind "modern-bpf"}}
- modern_ebpf
{{- else }}
- {{ .Values.driver.kind }}
{{- end }}
{{- with .Values.driver.loader.initContainer.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
securityContext:
{{- if .Values.driver.loader.initContainer.securityContext }}
{{- toYaml .Values.driver.loader.initContainer.securityContext | nindent 4 }}
{{- else if (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) }}
privileged: true
{{- end }}
volumeMounts:
- mountPath: /root/.falco
name: root-falco-fs
- mountPath: /host/proc
name: proc-fs
readOnly: true
- mountPath: /host/boot
name: boot-fs
readOnly: true
- mountPath: /host/lib/modules
name: lib-modules
- mountPath: /host/usr
name: usr-fs
readOnly: true
- mountPath: /host/etc
name: etc-fs
readOnly: true
- mountPath: /etc/falco/config.d
name: specialized-falco-configs
env:
- name: HOST_ROOT
value: /host
{{- if .Values.driver.loader.initContainer.env }}
{{- include "falco.renderTemplate" ( dict "value" .Values.driver.loader.initContainer.env "context" $) | nindent 4 }}
{{- end }}
{{- if eq .Values.driver.kind "auto" }}
- name: FALCOCTL_DRIVER_CONFIG_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FALCOCTL_DRIVER_CONFIG_CONFIGMAP
value: {{ include "falco.fullname" . }}
{{- else }}
- name: FALCOCTL_DRIVER_CONFIG_UPDATE_FALCO
value: "false"
{{- end }}
{{- end -}}
{{- define "falco.securityContext" -}}
{{- $securityContext := dict -}}
{{- if .Values.driver.enabled -}}
{{- if (or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") (eq .Values.driver.kind "auto")) -}}
{{- $securityContext := set $securityContext "privileged" true -}}
{{- end -}}
{{- if eq .Values.driver.kind "ebpf" -}}
{{- if .Values.driver.ebpf.leastPrivileged -}}
{{- $securityContext := set $securityContext "capabilities" (dict "add" (list "SYS_ADMIN" "SYS_RESOURCE" "SYS_PTRACE")) -}}
{{- else -}}
{{- $securityContext := set $securityContext "privileged" true -}}
{{- end -}}
{{- end -}}
{{- if (or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf")) -}}
{{- if .Values.driver.modernEbpf.leastPrivileged -}}
{{- $securityContext := set $securityContext "capabilities" (dict "add" (list "BPF" "SYS_RESOURCE" "PERFMON" "SYS_PTRACE")) -}}
{{- else -}}
{{- $securityContext := set $securityContext "privileged" true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if not (empty (.Values.containerSecurityContext)) -}}
{{- toYaml .Values.containerSecurityContext }}
{{- else -}}
{{- toYaml $securityContext }}
{{- end -}}
{{- end -}}
{{- define "falco.unixSocketVolumeMount" -}}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (hasPrefix "unix://" .Values.falco.grpc.bind_address) }}
- mountPath: {{ include "falco.unixSocketDir" . }}
name: grpc-socket-dir
{{- end }}
{{- end -}}
{{- define "falco.unixSocketVolume" -}}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (hasPrefix "unix://" .Values.falco.grpc.bind_address) }}
- name: grpc-socket-dir
hostPath:
path: {{ include "falco.unixSocketDir" . }}
{{- end }}
{{- end -}}

View File

@ -1,17 +0,0 @@
{{- if and .Values.rbac.create (eq .Values.driver.kind "auto")}}
kind: Role
apiVersion: {{ include "rbac.apiVersion" . }}
metadata:
name: {{ include "falco.fullname" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- update
{{- end }}

View File

@ -1,16 +0,0 @@
{{- if and .Values.rbac.create (eq .Values.driver.kind "auto")}}
kind: RoleBinding
apiVersion: {{ include "rbac.apiVersion" . }}
metadata:
name: {{ include "falco.fullname" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "falco.serviceAccountName" . }}
namespace: {{ include "falco.namespace" . }}
roleRef:
kind: Role
name: {{ include "falco.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -1,14 +0,0 @@
{{- if .Values.customRules }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falco.fullname" . }}-rules
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
data:
{{- range $file, $content := .Values.customRules }}
{{ $file }}: |-
{{ $content | indent 4}}
{{- end }}
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if and .Values.metrics.enabled .Values.metrics.service.create }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "falco.fullname" . }}-metrics
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- with .Values.metrics.service.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
type: "falco-metrics"
{{- with .Values.metrics.service.annotations }}
annotations:
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.metrics.service.type }}
ports:
- port: {{ .Values.metrics.service.ports.metrics.port }}
targetPort: {{ .Values.metrics.service.ports.metrics.targetPort }}
protocol: {{ .Values.metrics.service.ports.metrics.protocol }}
name: "metrics"
selector:
{{- include "falco.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -1,51 +0,0 @@
{{- if .Values.serviceMonitor.create }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "falco.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ tpl .Values.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ include "falco.namespace" . }}
{{- end }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: "{{ .Values.serviceMonitor.endpointPort }}"
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "falco.selectorLabels" . | nindent 6 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 6 }}
{{- end }}
type: "falco-metrics"
namespaceSelector:
matchNames:
- {{ include "falco.namespace" . }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
{{- with .Values.serviceAccount.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
kind: ServiceAccount
metadata:
name: {{ include "falco.serviceAccountName" . }}
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- with $dot := . }}
{{- range $service := $dot.Values.services }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falco.fullname" $dot }}-{{ $service.name }}
namespace: {{ include "falco.namespace" $dot }}
labels:
{{- include "falco.labels" $dot | nindent 4 }}
spec:
{{- with $service }}
{{- omit . "name" "selector" | toYaml | nindent 2 }}
{{- end}}
selector:
{{- include "falco.selectorLabels" $dot | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,35 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"gopkg.in/yaml.v3"
)
// ChartInfo returns chart's information.
func ChartInfo(t *testing.T, chartPath string) (map[string]interface{}, error) {
// Get chart info.
output, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "show", "chart", chartPath)
if err != nil {
return nil, err
}
chartInfo := map[string]interface{}{}
err = yaml.Unmarshal([]byte(output), &chartInfo)
return chartInfo, err
}

View File

@ -1,29 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
const (
// ReleaseName is the name of the release we expect in the rendered resources.
ReleaseName = "rendered-resources"
// PatternK8sMetacollectorFiles is the regex pattern we expect to find in the rendered resources.
PatternK8sMetacollectorFiles = `# Source: falco/charts/k8s-metacollector/templates/([^\n]+)`
// K8sMetaPluginName is the name of the k8smeta plugin we expect in the falco configuration.
K8sMetaPluginName = "k8smeta"
// ContainerPluginName name of the container plugin we expect in the falco configuration.
ContainerPluginName = "container"
// ChartPath is the path to the chart.
ChartPath = "../../.."
)

View File

@ -1,13 +0,0 @@
package containerPlugin
var volumeNames = []string{
"docker-socket",
"containerd-socket",
"crio-socket",
"container-engine-socket-0",
"container-engine-socket-1",
"container-engine-socket-2",
"container-engine-socket-3",
"container-engine-socket-4",
"container-engine-socket-5",
}

View File

@ -1,767 +0,0 @@
package containerPlugin
import (
"path/filepath"
"slices"
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
)
func TestContainerPluginConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
// Check engines configurations.
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok, "checking if engines section exists")
require.Len(t, engines, 7, "checking number of engines")
var engineConfig ContainerEngineConfig
// Unmarshal the engines configuration.
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check the default values for each engine.
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
require.True(t, engineConfig.LXC.Enabled)
require.True(t, engineConfig.LibvirtLXC.Enabled)
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "changeDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/custom/docker.sock"}, engineConfig.Docker.Sockets)
},
},
{
name: "changeCriSocket",
values: map[string]string{
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/cri.sock"}, engineConfig.CRI.Sockets)
},
},
{
name: "disableDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.False(t, engineConfig.Docker.Enabled)
},
},
{
name: "disableCriSocket",
values: map[string]string{
"collectors.containerEngine.engines.cri.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.False(t, engineConfig.CRI.Enabled)
},
},
{
name: "changeContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/custom/containerd.sock"}, engineConfig.Containerd.Sockets)
},
},
{
name: "disableContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.containerd.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.False(t, engineConfig.Containerd.Enabled)
},
},
{
name: "defaultContainerEngineConfig",
values: map[string]string{},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
require.Equal(t, float64(100), initConfigMap["label_max_len"])
require.False(t, initConfigMap["with_size"].(bool))
hooks := initConfigMap["hooks"].([]interface{})
require.Len(t, hooks, 1)
require.Contains(t, hooks, "create")
engines := initConfigMap["engines"].(map[string]interface{})
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check default engine configurations
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
require.True(t, engineConfig.LXC.Enabled)
require.True(t, engineConfig.LibvirtLXC.Enabled)
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "customContainerEngineConfig",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.labelMaxLen": "200",
"collectors.containerEngine.withSize": "true",
"collectors.containerEngine.hooks[0]": "create",
"collectors.containerEngine.hooks[1]": "start",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.lxc.enabled": "false",
"collectors.containerEngine.engines.libvirt_lxc.enabled": "false",
"collectors.containerEngine.engines.bpm.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
require.Equal(t, float64(200), initConfigMap["label_max_len"])
require.True(t, initConfigMap["with_size"].(bool))
hooks := initConfigMap["hooks"].([]interface{})
require.Len(t, hooks, 2)
require.Contains(t, hooks, "create")
require.Contains(t, hooks, "start")
engines := initConfigMap["engines"].(map[string]interface{})
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check custom engine configurations
require.False(t, engineConfig.Docker.Enabled)
require.False(t, engineConfig.Podman.Enabled)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/custom/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/crio.sock"}, engineConfig.CRI.Sockets)
require.False(t, engineConfig.LXC.Enabled)
require.False(t, engineConfig.LibvirtLXC.Enabled)
require.False(t, engineConfig.BPM.Enabled)
},
},
{
name: "customDockerEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check Docker engine configuration
require.False(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/custom/docker.sock", "/custom/docker.sock2"}, engineConfig.Docker.Sockets)
},
},
{
name: "customContainerdEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.containerd.sockets[1]": "/custom/containerd.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check Containerd engine configuration
require.False(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/custom/containerd.sock", "/custom/containerd.sock2"}, engineConfig.Containerd.Sockets)
},
},
{
name: "customPodmanEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "true",
"collectors.containerEngine.engines.podman.sockets[0]": "/custom/podman.sock",
"collectors.containerEngine.engines.podman.sockets[1]": "/custom/podman.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check Podman engine configuration
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/custom/podman.sock", "/custom/podman.sock2"}, engineConfig.Podman.Sockets)
},
},
{
name: "customCRIEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/cri.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check CRI engine configuration
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/cri.sock", "/custom/cri.sock2"}, engineConfig.CRI.Sockets)
},
},
{
name: "customLXCEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.lxc.enabled": "true",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check LXC engine configuration
require.True(t, engineConfig.LXC.Enabled)
},
},
{
name: "customLibvirtLXCEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.libvirt_lxc.enabled": "true",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check LibvirtLXC engine configuration
require.True(t, engineConfig.LibvirtLXC.Enabled)
},
},
{
name: "customBPMEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.bpm.enabled": "true",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check BPM engine configuration
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "allCollectorsDisabled",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "false",
},
expected: func(t *testing.T, config any) {
// When config is nil, it means the plugin wasn't found in the configuration
require.Nil(t, config, "container plugin should not be present in configuration when all collectors are disabled")
// If somehow the config exists (which it shouldn't), verify there are no engine configurations
if config != nil {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
if ok {
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"]
if ok {
engineMap := engines.(map[string]interface{})
require.Empty(t, engineMap, "engines configuration should be empty when all collectors are disabled")
}
}
}
},
},
{
name: "allCollectorsDisabledTopLevel",
values: map[string]string{
"collectors.enabled": "false",
},
expected: func(t *testing.T, config any) {
// When config is nil, it means the plugin wasn't found in the configuration
require.Nil(t, config, "container plugin should not be present in configuration when all collectors are disabled")
// If somehow the config exists (which it shouldn't), verify there are no engine configurations
if config != nil {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
if ok {
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"]
if ok {
engineMap := engines.(map[string]interface{})
require.Empty(t, engineMap, "engines configuration should be empty when all collectors are disabled")
}
}
}
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the chart with the given options.
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
// Unmarshal the output into a ConfigMap object.
helm.UnmarshalK8SYaml(t, output, &cm)
// Unmarshal the data field of the ConfigMap into a map.
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
// Extract the container plugin configuration.
plugins, ok := config["plugins"]
require.True(t, ok, "checking if plugins section exists")
pluginsList := plugins.([]interface{})
found := false
// Get the container plugin configuration.
for _, plugin := range pluginsList {
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.ContainerPluginName {
testCase.expected(t, plugin)
found = true
}
}
if found {
// Check that the plugin has been added to the ones that are enabled.
loadPlugins := config["load_plugins"]
require.True(t, slices.Contains(loadPlugins.([]interface{}), unit.ContainerPluginName))
} else {
testCase.expected(t, nil)
loadPlugins := config["load_plugins"]
require.False(t, slices.Contains(loadPlugins.([]interface{}), unit.ContainerPluginName))
}
})
}
}
func TestInvalidCollectorConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expectedErr string
}{
{
name: "dockerAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectoars.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
{
name: "containerdAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "true",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
{
name: "crioAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectoars.containerd.enabled": "false",
"collectors.crio.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Attempt to render the template, expect an error
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
})
}
}
// Test that the helper does not overwrite user's configuration.
// And that the container reference is added to the configmap.
func TestFalcoctlRefs(t *testing.T) {
t.Parallel()
refShouldBeSet := func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test allowed types.
allowedTypes := artifactConfig["allowedTypes"]
require.Len(t, allowedTypes, 2)
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 2)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
refShouldNotBeSet := func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test allowed types.
allowedTypes := artifactConfig["allowedTypes"]
require.Len(t, allowedTypes, 2)
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 1)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.False(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
refShouldBeSet,
},
{
"setPluginConfiguration",
map[string]string{
"collectors.enabled": "false",
},
refShouldNotBeSet,
},
{
"driver disabled",
map[string]string{
"driver.enabled": "false",
},
refShouldNotBeSet,
},
}
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/falcoctl-configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falcoctl.yaml"], &config)
testCase.expected(t, config)
})
}
}
type ContainerEngineSocket struct {
Enabled bool `yaml:"enabled"`
Sockets []string `yaml:"sockets,omitempty"`
}
type ContainerEngineConfig struct {
Docker ContainerEngineSocket `yaml:"docker"`
Podman ContainerEngineSocket `yaml:"podman"`
Containerd ContainerEngineSocket `yaml:"containerd"`
CRI ContainerEngineSocket `yaml:"cri"`
LXC ContainerEngineSocket `yaml:"lxc"`
LibvirtLXC ContainerEngineSocket `yaml:"libvirt_lxc"`
BPM ContainerEngineSocket `yaml:"bpm"`
}

View File

@ -1,310 +0,0 @@
package containerPlugin
import (
"path/filepath"
"slices"
"testing"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
func TestContainerPluginVolumeMounts(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, volumeMounts []corev1.VolumeMount)
}{
{
name: "defaultValues",
values: nil,
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 6)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
require.Equal(t, "/host/run/podman/podman.sock", volumeMounts[1].MountPath)
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[3].MountPath)
require.Equal(t, "container-engine-socket-4", volumeMounts[4].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[4].MountPath)
require.Equal(t, "container-engine-socket-5", volumeMounts[5].Name)
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[5].MountPath)
},
},
{
name: "defaultDockerVolumeMount",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
},
},
{
name: "customDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/docker.sock", volumeMounts[0].MountPath)
},
},
{
name: "defaultCriVolumeMount",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 4)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[0].MountPath)
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[1].MountPath)
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[3].MountPath)
},
},
{
name: "customCriSocket",
values: map[string]string{
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/crio.sock", volumeMounts[0].MountPath)
},
},
{
name: "defaultContainerdVolumeMount",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[0].MountPath)
},
},
{
name: "customContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/containerd.sock", volumeMounts[0].MountPath)
},
},
{
name: "ContainerEnginesDefaultValues",
values: map[string]string{},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 6)
// dockerV := findVolumeMount("docker-socket-0", volumeMounts)
// require.NotNil(t, dockerV)
// require.Equal(t, "/host/var/run/docker.sock", dockerV.MountPath)
// podmanV := findVolumeMount("podman-socket-0", volumeMounts)
// require.NotNil(t, podmanV)
// require.Equal(t, "/host/run/podman/podman.sock", podmanV.MountPath)
// containerdV := findVolumeMount("containerd-socket-0", volumeMounts)
// require.NotNil(t, containerdV)
// require.Equal(t, "/host/run/host-containerd/containerd.sock", containerdV.MountPath)
// crioV0 := findVolumeMount("cri-socket-0", volumeMounts)
// require.NotNil(t, crioV0)
// require.Equal(t, "/host/run/containerd/containerd.sock", crioV0.MountPath)
// crioV1 := findVolumeMount("cri-socket-1", volumeMounts)
// require.NotNil(t, crioV1)
// require.Equal(t, "/host/run/crio/crio.sock", crioV1.MountPath)
// crioV2 := findVolumeMount("cri-socket-2", volumeMounts)
// require.NotNil(t, crioV2)
// require.Equal(t, "/host/run/k3s/containerd/containerd.sock", crioV2.MountPath)
},
},
{
name: "ContainerEnginesDockerWithMultipleSockets",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 2)
dockerV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
require.NotNil(t, dockerV0)
require.Equal(t, "/host/var/run/docker.sock", dockerV0.MountPath)
dockerV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
require.NotNil(t, dockerV1)
require.Equal(t, "/host/custom/docker.sock", dockerV1.MountPath)
},
},
{
name: "ContainerEnginesCrioWithMultipleSockets",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/run/crio/crio.sock",
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/crio.sock",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 2)
crioV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
require.NotNil(t, crioV0)
require.Equal(t, "/host/run/crio/crio.sock", crioV0.MountPath)
crioV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
require.NotNil(t, crioV1)
require.Equal(t, "/host/custom/crio.sock", crioV1.MountPath)
},
},
{
name: "noVolumeMountsWhenCollectorsDisabled",
values: map[string]string{
"collectors.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 0)
},
},
{
name: "noVolumeMountsWhenDriverDisabled",
values: map[string]string{
"driver.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 0)
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Render the template
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
// Parse the YAML output
var daemonset appsv1.DaemonSet
helm.UnmarshalK8SYaml(t, output, &daemonset)
// Find volumeMounts in the falco container
var pluginVolumeMounts []corev1.VolumeMount
for _, container := range daemonset.Spec.Template.Spec.Containers {
if container.Name == "falco" {
for _, volumeMount := range container.VolumeMounts {
if slices.Contains(volumeNames, volumeMount.Name) {
pluginVolumeMounts = append(pluginVolumeMounts, volumeMount)
}
}
}
}
// Run the test case's assertions
tc.expected(t, pluginVolumeMounts)
})
}
}
func TestInvalidVolumeMountConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expectedErr string
}{
{
name: "bothOldAndNewConfigEnabled",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Attempt to render the template, expect an error
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
})
}
}
func findVolumeMount(name string, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount {
for _, v := range volumeMounts {
if v.Name == name {
return &v
}
}
return nil
}

View File

@ -1,373 +0,0 @@
package containerPlugin
import (
"path/filepath"
"slices"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
)
func TestContainerPluginVolumes(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, volumes []corev1.Volume)
}{
{
name: "defaultValues",
values: nil,
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 6)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
},
},
{
name: "defaultDockerVolume",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
},
},
{
name: "customDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/docker.sock", volumes[0].HostPath.Path)
},
},
{
name: "defaultCriVolume",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 4)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[3].HostPath.Path)
},
},
{
name: "customCrioSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/crio.sock", volumes[0].HostPath.Path)
},
},
{
name: "defaultContainerdVolume",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[0].HostPath.Path)
},
},
{
name: "customContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/containerd.sock", volumes[0].HostPath.Path)
},
},
{
name: "ContainerEnginesDefaultValues",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 6)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
},
},
{
name: "ContainerEnginesDockerWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/docker.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesCrioWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/run/crio/crio.sock",
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/crio.sock",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/crio.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesPodmanWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "true",
"collectors.containerEngine.engines.podman.sockets[0]": "/run/podman/podman.sock",
"collectors.containerEngine.engines.podman.sockets[1]": "/custom/podman.sock",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/podman.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesContainerdWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/run/containerd/containerd.sock",
"collectors.containerEngine.engines.containerd.sockets[1]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/containerd.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesMultipleWithCustomSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker/socket.sock",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/var/custom/crio.sock",
"collectors.containerEngine.engines.podman.enabled": "true",
"collectors.containerEngine.engines.podman.sockets[0]": "/run/podman/podman.sock",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 4)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/docker/socket.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/var/custom/crio.sock", volumes[3].HostPath.Path)
},
},
{
name: "noVolumesWhenCollectorsDisabled",
values: map[string]string{
"collectors.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 0)
},
},
{
name: "noVolumesWhenDriverDisabled",
values: map[string]string{
"driver.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 0)
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Render the template
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
// Parse the YAML output
var daemonset appsv1.DaemonSet
helm.UnmarshalK8SYaml(t, output, &daemonset)
// Find volumes that match our container plugin pattern
var pluginVolumes []corev1.Volume
for _, volume := range daemonset.Spec.Template.Spec.Volumes {
// Check if the volume is for container sockets
if volume.HostPath != nil && slices.Contains(volumeNames, volume.Name) {
pluginVolumes = append(pluginVolumes, volume)
}
}
// Run the test case's assertions
tc.expected(t, pluginVolumes)
})
}
}
func TestInvalidVolumeConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expectedErr string
}{
{
name: "bothOldAndNewConfigEnabled",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Attempt to render the template, expect an error
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
})
}
}
func findVolume(name string, volumes []corev1.Volume) *corev1.Volume {
for _, v := range volumes {
if v.Name == name {
return &v
}
}
return nil
}

View File

@ -1,17 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package unit contains the unit tests for the Falco chart.
package unit

View File

@ -1,334 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
func TestDriverConfigInFalcoConfig(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
func(t *testing.T, config any) {
require.Len(t, config, 4, "should have four items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kind=kmod",
map[string]string{
"driver.kind": "kmod",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "kmod", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kind=module(alias)",
map[string]string{
"driver.kind": "module",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "kmod", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kmod=config",
map[string]string{
"driver.kmod.bufSizePreset": "6",
"driver.kmod.dropFailedExit": "true",
"driver.kind": "module",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "kmod", kind)
require.Equal(t, float64(6), bufSizePreset)
require.True(t, dropFailedExit)
},
},
{
"ebpf=config",
map[string]string{
"driver.kind": "ebpf",
"driver.ebpf.bufSizePreset": "6",
"driver.ebpf.dropFailedExit": "true",
"driver.ebpf.path": "testing/Path/ebpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "ebpf", kind)
require.Equal(t, "testing/Path/ebpf", path)
require.Equal(t, float64(6), bufSizePreset)
require.True(t, dropFailedExit)
},
},
{
"kind=ebpf",
map[string]string{
"driver.kind": "ebpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "ebpf", kind)
require.Equal(t, "${HOME}/.falco/falco-bpf.o", path)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kind=modern_ebpf",
map[string]string{
"driver.kind": "modern_ebpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.Equal(t, float64(2), cpusForEachBuffer)
require.False(t, dropFailedExit)
},
},
{
"kind=modern-bpf(alias)",
map[string]string{
"driver.kind": "modern-bpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.Equal(t, float64(2), cpusForEachBuffer)
require.False(t, dropFailedExit)
},
},
{
"modernEbpf=config",
map[string]string{
"driver.kind": "modern-bpf",
"driver.modernEbpf.bufSizePreset": "6",
"driver.modernEbpf.dropFailedExit": "true",
"driver.modernEbpf.cpusForEachBuffer": "8",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(6), bufSizePreset)
require.Equal(t, float64(8), cpusForEachBuffer)
require.True(t, dropFailedExit)
},
},
{
"kind=gvisor",
map[string]string{
"driver.kind": "gvisor",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, config, root, err := getGvisorConfig(config)
require.NoError(t, err)
require.Equal(t, "gvisor", kind)
require.Equal(t, "/gvisor-config/pod-init.json", config)
require.Equal(t, "/host/run/containerd/runsc/k8s.io", root)
},
},
{
"gvisor=config",
map[string]string{
"driver.kind": "gvisor",
"driver.gvisor.runsc.root": "/my/root/test",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, config, root, err := getGvisorConfig(config)
require.NoError(t, err)
require.Equal(t, "gvisor", kind)
require.Equal(t, "/gvisor-config/pod-init.json", config)
require.Equal(t, "/host/my/root/test/k8s.io", root)
},
},
{
"kind=auto",
map[string]string{
"driver.kind": "auto",
},
func(t *testing.T, config any) {
require.Len(t, config, 4, "should have four items")
// Check that configuration for kmod has been set.
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
// Check that configuration for ebpf has been set.
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, "${HOME}/.falco/falco-bpf.o", path)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
// Check that configuration for modern_ebpf has been set.
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.Equal(t, float64(2), cpusForEachBuffer)
require.False(t, dropFailedExit)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
engine := config["engine"]
testCase.expected(t, engine)
})
}
}
func TestDriverConfigWithUnsupportedDriver(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
values := map[string]string{
"driver.kind": "notExisting",
}
options := &helm.Options{SetValues: values}
_, err = helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
require.Error(t, err)
require.True(t, strings.Contains(err.Error(),
"unsupported driver kind: \"notExisting\". Supported drivers [kmod ebpf modern_ebpf gvisor auto], alias [module modern-bpf]"))
}
func getKmodConfig(config interface{}) (kind string, bufSizePreset float64, dropFailedExit bool, err error) {
configMap, ok := config.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
kmod := configMap["kmod"].(map[string]interface{})
bufSizePreset = kmod["buf_size_preset"].(float64)
dropFailedExit = kmod["drop_failed_exit"].(bool)
return
}
func getEbpfConfig(config interface{}) (kind, path string, bufSizePreset float64, dropFailedExit bool, err error) {
configMap, ok := config.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
ebpf := configMap["ebpf"].(map[string]interface{})
bufSizePreset = ebpf["buf_size_preset"].(float64)
dropFailedExit = ebpf["drop_failed_exit"].(bool)
path = ebpf["probe"].(string)
return
}
func getModernEbpfConfig(config interface{}) (kind string, bufSizePreset, cpusForEachBuffer float64, dropFailedExit bool, err error) {
configMap, ok := config.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
modernEbpf := configMap["modern_ebpf"].(map[string]interface{})
bufSizePreset = modernEbpf["buf_size_preset"].(float64)
dropFailedExit = modernEbpf["drop_failed_exit"].(bool)
cpusForEachBuffer = modernEbpf["cpus_for_each_buffer"].(float64)
return
}
func getGvisorConfig(cfg interface{}) (kind, config, root string, err error) {
configMap, ok := cfg.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
gvisor := configMap["gvisor"].(map[string]interface{})
config = gvisor["config"].(string)
root = gvisor["root"].(string)
return
}

View File

@ -1,266 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"testing"
v1 "k8s.io/api/core/v1"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
)
var (
namespaceEnvVar = v1.EnvVar{
Name: "FALCOCTL_DRIVER_CONFIG_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "",
FieldPath: "metadata.namespace",
},
}}
configmapEnvVar = v1.EnvVar{
Name: "FALCOCTL_DRIVER_CONFIG_CONFIGMAP",
Value: unit.ReleaseName + "-falco",
}
updateConfigMapEnvVar = v1.EnvVar{
Name: "FALCOCTL_DRIVER_CONFIG_UPDATE_FALCO",
Value: "false",
}
)
// TestDriverLoaderEnabled tests the helper that enables the driver loader based on the configuration.
func TestDriverLoaderEnabled(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, initContainer any)
}{
{
"defaultValues",
nil,
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "auto")
require.True(t, *container.SecurityContext.Privileged)
require.Contains(t, container.Env, namespaceEnvVar)
require.Contains(t, container.Env, configmapEnvVar)
require.NotContains(t, container.Env, updateConfigMapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=modern-bpf",
map[string]string{
"driver.kind": "modern-bpf",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.kind=modern_ebpf",
map[string]string{
"driver.kind": "modern_ebpf",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.kind=gvisor",
map[string]string{
"driver.kind": "gvisor",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.disabled",
map[string]string{
"driver.enabled": "false",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.loader.disabled",
map[string]string{
"driver.loader.enabled": "false",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.kind=kmod",
map[string]string{
"driver.kind": "kmod",
},
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "kmod")
require.True(t, *container.SecurityContext.Privileged)
require.NotContains(t, container.Env, namespaceEnvVar)
require.NotContains(t, container.Env, configmapEnvVar)
require.Contains(t, container.Env, updateConfigMapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=module",
map[string]string{
"driver.kind": "module",
},
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "kmod")
require.True(t, *container.SecurityContext.Privileged)
require.NotContains(t, container.Env, namespaceEnvVar)
require.NotContains(t, container.Env, configmapEnvVar)
require.Contains(t, container.Env, updateConfigMapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=ebpf",
map[string]string{
"driver.kind": "ebpf",
},
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "ebpf")
require.Nil(t, container.SecurityContext)
require.NotContains(t, container.Env, namespaceEnvVar)
require.Contains(t, container.Env, updateConfigMapEnvVar)
require.NotContains(t, container.Env, configmapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=kmod&driver.loader.disabled",
map[string]string{
"driver.kind": "kmod",
"driver.loader.enabled": "false",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
var ds appsv1.DaemonSet
helm.UnmarshalK8SYaml(t, output, &ds)
for i := range ds.Spec.Template.Spec.InitContainers {
if ds.Spec.Template.Spec.InitContainers[i].Name == "falco-driver-loader" {
testCase.expected(t, ds.Spec.Template.Spec.InitContainers[i])
return
}
}
testCase.expected(t, nil)
})
}
}
// volumenMounts checks that the expected volume mounts have been configured.
func volumeMounts(t *testing.T, volumeMounts []v1.VolumeMount) {
rootFalcoFS := v1.VolumeMount{
Name: "root-falco-fs",
ReadOnly: false,
MountPath: "/root/.falco",
}
require.Contains(t, volumeMounts, rootFalcoFS)
procFS := v1.VolumeMount{
Name: "proc-fs",
ReadOnly: true,
MountPath: "/host/proc",
}
require.Contains(t, volumeMounts, procFS)
bootFS := v1.VolumeMount{
Name: "boot-fs",
ReadOnly: true,
MountPath: "/host/boot",
}
require.Contains(t, volumeMounts, bootFS)
libModulesFS := v1.VolumeMount{
Name: "lib-modules",
ReadOnly: false,
MountPath: "/host/lib/modules",
}
require.Contains(t, volumeMounts, libModulesFS)
usrFS := v1.VolumeMount{
Name: "usr-fs",
ReadOnly: true,
MountPath: "/host/usr",
}
require.Contains(t, volumeMounts, usrFS)
etcFS := v1.VolumeMount{
Name: "etc-fs",
ReadOnly: true,
MountPath: "/host/etc",
}
require.Contains(t, volumeMounts, etcFS)
specializedFalcoConfigs := v1.VolumeMount{
Name: "specialized-falco-configs",
ReadOnly: false,
MountPath: "/etc/falco/config.d",
}
require.Contains(t, volumeMounts, specializedFalcoConfigs)
}

View File

@ -1,145 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
)
type grafanaDashboardsTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestGrafanaDashboardsTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
suite.Run(t, &grafanaDashboardsTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "falco-test-dashboard",
namespace: "falco-test-dashboard",
templates: []string{"templates/falco-dashboard-grafana.yaml"},
})
}
func (g *grafanaDashboardsTemplateTest) TestCreationDefaultValues() {
// Render the dashboard configmap and check that it has not been rendered.
_, err := helm.RenderTemplateE(g.T(), &helm.Options{}, g.chartPath, g.releaseName, g.templates, fmt.Sprintf("--namespace=%s", g.namespace))
g.Error(err, "should error")
g.Equal("error while running command: exit status 1; Error: could not find template templates/falco-dashboard-grafana.yaml in chart", err.Error())
}
func (g *grafanaDashboardsTemplateTest) TestConfig() {
testCases := []struct {
name string
values map[string]string
expected func(cm *corev1.ConfigMap)
}{
{"dashboard enabled",
map[string]string{
"grafana.dashboards.enabled": "true",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("falco-grafana-dashboard", cm.Name)
// Check the namespace.
g.Equal(g.namespace, cm.Namespace)
g.Nil(cm.Annotations)
},
},
{"namespace",
map[string]string{
"grafana.dashboards.enabled": "true",
"grafana.dashboards.configMaps.falco.namespace": "custom-namespace",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("falco-grafana-dashboard", cm.Name)
// Check the namespace.
g.Equal("custom-namespace", cm.Namespace)
g.Nil(cm.Annotations)
},
},
{"folder",
map[string]string{
"grafana.dashboards.enabled": "true",
"grafana.dashboards.configMaps.falco.folder": "custom-folder",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("falco-grafana-dashboard", cm.Name)
g.NotNil(cm.Annotations)
g.Len(cm.Annotations, 2)
// Check sidecar annotation.
val, ok := cm.Annotations["k8s-sidecar-target-directory"]
g.True(ok)
g.Equal("/tmp/dashboards/custom-folder", val)
// Check grafana annotation.
val, ok = cm.Annotations["grafana_dashboard_folder"]
g.True(ok)
g.Equal("custom-folder", val)
},
},
}
for _, testCase := range testCases {
testCase := testCase
g.Run(testCase.name, func() {
subT := g.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the configmap unmarshal it.
output, err := helm.RenderTemplateE(subT, options, g.chartPath, g.releaseName, g.templates, "--namespace="+g.namespace)
g.NoError(err, "should succeed")
var cfgMap corev1.ConfigMap
helm.UnmarshalK8SYaml(subT, output, &cfgMap)
// Common checks
// Check that contains the right label.
g.Contains(cfgMap.Labels, "grafana_dashboard")
// Check that the dashboard is contained in the config map.
file, err := os.Open("../../../dashboards/falco-dashboard.json")
g.NoError(err)
content, err := io.ReadAll(file)
g.NoError(err)
cfgData, ok := cfgMap.Data["falco-dashboard.json"]
g.True(ok)
g.Equal(strings.TrimRight(string(content), "\n"), cfgData)
testCase.expected(&cfgMap)
})
}
}

View File

@ -1,210 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
)
type metricsConfig struct {
Enabled bool `yaml:"enabled"`
ConvertMemoryToMB bool `yaml:"convert_memory_to_mb"`
IncludeEmptyValues bool `yaml:"include_empty_values"`
KernelEventCountersEnabled bool `yaml:"kernel_event_counters_enabled"`
KernelEventCountersPerCPUEnabled bool `yaml:"kernel_event_counters_per_cpu_enabled"`
ResourceUtilizationEnabled bool `yaml:"resource_utilization_enabled"`
RulesCountersEnabled bool `yaml:"rules_counters_enabled"`
LibbpfStatsEnabled bool `yaml:"libbpf_stats_enabled"`
OutputRule bool `yaml:"output_rule"`
StateCountersEnabled bool `yaml:"state_counters_enabled"`
Interval string `yaml:"interval"`
}
type webServerConfig struct {
Enabled bool `yaml:"enabled"`
K8sHealthzEndpoint string `yaml:"k8s_healthz_endpoint"`
ListenPort string `yaml:"listen_port"`
PrometheusMetricsEnabled bool `yaml:"prometheus_metrics_enabled"`
SSLCertificate string `yaml:"ssl_certificate"`
SSLEnabled bool `yaml:"ssl_enabled"`
Threadiness int `yaml:"threadiness"`
}
func TestMetricsConfigInFalcoConfig(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, metricsConfig, webServerConfig any)
}{
{
"defaultValues",
nil,
func(t *testing.T, metricsConfig, webServerConfig any) {
require.Len(t, metricsConfig, 11, "should have ten items")
metrics, err := getMetricsConfig(metricsConfig)
require.NoError(t, err)
require.NotNil(t, metrics)
require.True(t, metrics.ConvertMemoryToMB)
require.False(t, metrics.Enabled)
require.False(t, metrics.IncludeEmptyValues)
require.True(t, metrics.KernelEventCountersEnabled)
require.True(t, metrics.ResourceUtilizationEnabled)
require.True(t, metrics.RulesCountersEnabled)
require.Equal(t, "1h", metrics.Interval)
require.True(t, metrics.LibbpfStatsEnabled)
require.True(t, metrics.OutputRule)
require.True(t, metrics.StateCountersEnabled)
require.False(t, metrics.KernelEventCountersPerCPUEnabled)
webServer, err := getWebServerConfig(webServerConfig)
require.NoError(t, err)
require.NotNil(t, webServer)
require.True(t, webServer.Enabled)
require.False(t, webServer.PrometheusMetricsEnabled)
},
},
{
"metricsEnabled",
map[string]string{
"metrics.enabled": "true",
},
func(t *testing.T, metricsConfig, webServerConfig any) {
require.Len(t, metricsConfig, 11, "should have ten items")
metrics, err := getMetricsConfig(metricsConfig)
require.NoError(t, err)
require.NotNil(t, metrics)
require.True(t, metrics.ConvertMemoryToMB)
require.True(t, metrics.Enabled)
require.False(t, metrics.IncludeEmptyValues)
require.True(t, metrics.KernelEventCountersEnabled)
require.True(t, metrics.ResourceUtilizationEnabled)
require.True(t, metrics.RulesCountersEnabled)
require.Equal(t, "1h", metrics.Interval)
require.True(t, metrics.LibbpfStatsEnabled)
require.False(t, metrics.OutputRule)
require.True(t, metrics.StateCountersEnabled)
require.False(t, metrics.KernelEventCountersPerCPUEnabled)
webServer, err := getWebServerConfig(webServerConfig)
require.NoError(t, err)
require.NotNil(t, webServer)
require.True(t, webServer.Enabled)
require.True(t, webServer.PrometheusMetricsEnabled)
},
},
{
"Flip/Change Values",
map[string]string{
"metrics.enabled": "true",
"metrics.convertMemoryToMB": "false",
"metrics.includeEmptyValues": "true",
"metrics.kernelEventCountersEnabled": "false",
"metrics.resourceUtilizationEnabled": "false",
"metrics.rulesCountersEnabled": "false",
"metrics.libbpfStatsEnabled": "false",
"metrics.outputRule": "false",
"metrics.stateCountersEnabled": "false",
"metrics.interval": "1s",
"metrics.kernelEventCountersPerCPUEnabled": "true",
},
func(t *testing.T, metricsConfig, webServerConfig any) {
require.Len(t, metricsConfig, 11, "should have ten items")
metrics, err := getMetricsConfig(metricsConfig)
require.NoError(t, err)
require.NotNil(t, metrics)
require.False(t, metrics.ConvertMemoryToMB)
require.True(t, metrics.Enabled)
require.True(t, metrics.IncludeEmptyValues)
require.False(t, metrics.KernelEventCountersEnabled)
require.False(t, metrics.ResourceUtilizationEnabled)
require.False(t, metrics.RulesCountersEnabled)
require.Equal(t, "1s", metrics.Interval)
require.False(t, metrics.LibbpfStatsEnabled)
require.False(t, metrics.OutputRule)
require.False(t, metrics.StateCountersEnabled)
require.True(t, metrics.KernelEventCountersPerCPUEnabled)
webServer, err := getWebServerConfig(webServerConfig)
require.NoError(t, err)
require.NotNil(t, webServer)
require.True(t, webServer.Enabled)
require.True(t, webServer.PrometheusMetricsEnabled)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
metrics := config["metrics"]
webServer := config["webserver"]
testCase.expected(t, metrics, webServer)
})
}
}
func getMetricsConfig(config any) (*metricsConfig, error) {
var metrics metricsConfig
metricsByte, err := yaml.Marshal(config)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(metricsByte, &metrics); err != nil {
return nil, err
}
return &metrics, nil
}
func getWebServerConfig(config any) (*webServerConfig, error) {
var webServer webServerConfig
webServerByte, err := yaml.Marshal(config)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(webServerByte, &webServer); err != nil {
return nil, err
}
return &webServer, nil
}

View File

@ -1,60 +0,0 @@
package falcoTemplates
import (
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"path/filepath"
"strings"
"testing"
)
func TestServiceAccount(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, sa *corev1.ServiceAccount)
}{
{
"defaultValues",
nil,
func(t *testing.T, sa *corev1.ServiceAccount) {
require.Equal(t, sa.Name, "rendered-resources-falco")
},
},
{
"kind=auto",
map[string]string{
"serviceAccount.create": "false",
},
func(t *testing.T, sa *corev1.ServiceAccount) {
require.Equal(t, sa.Name, "")
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/serviceaccount.yaml"})
if err != nil {
require.True(t, strings.Contains(err.Error(), "Error: could not find template templates/serviceaccount.yaml in chart"))
}
var sa corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &sa)
testCase.expected(t, &sa)
})
}
}

View File

@ -1,160 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"encoding/json"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"reflect"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
type serviceMonitorTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceMonitorTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
suite.Run(t, &serviceMonitorTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "falco-test",
namespace: "falco-namespace-test",
templates: []string{"templates/serviceMonitor.yaml"},
})
}
func (s *serviceMonitorTemplateTest) TestCreationDefaultValues() {
// Render the servicemonitor and check that it has not been rendered.
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/serviceMonitor.yaml in chart", err.Error())
}
func (s *serviceMonitorTemplateTest) TestEndpoint() {
defaultEndpointsJSON := `[
{
"port": "metrics",
"interval": "15s",
"scrapeTimeout": "10s",
"honorLabels": true,
"path": "/metrics",
"scheme": "http"
}
]`
var defaultEndpoints []monitoringv1.Endpoint
err := json.Unmarshal([]byte(defaultEndpointsJSON), &defaultEndpoints)
s.NoError(err)
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Len(svcMonitor.Spec.Endpoints, 1, "should have only one endpoint")
s.True(reflect.DeepEqual(svcMonitor.Spec.Endpoints[0], defaultEndpoints[0]))
}
func (s *serviceMonitorTemplateTest) TestNamespaceSelector() {
selectorsLabelJson := `{
"app.kubernetes.io/instance": "my-falco",
"foo": "bar"
}`
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"},
SetJsonValues: map[string]string{"serviceMonitor.selector": selectorsLabelJson}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Len(svcMonitor.Spec.NamespaceSelector.MatchNames, 1)
s.Equal("default", svcMonitor.Spec.NamespaceSelector.MatchNames[0])
}
func (s *serviceMonitorTemplateTest) TestServiceMonitorSelector() {
testCases := []struct {
name string
values string
expected map[string]string
}{
{
"defaultValues",
"",
map[string]string{
"app.kubernetes.io/instance": "falco-test",
"app.kubernetes.io/name": "falco",
"type": "falco-metrics",
},
},
{
"customValues",
`{
"foo": "bar"
}`,
map[string]string{
"app.kubernetes.io/instance": "falco-test",
"app.kubernetes.io/name": "falco",
"foo": "bar",
"type": "falco-metrics",
},
},
{
"overwriteDefaultValues",
`{
"app.kubernetes.io/instance": "falco-overwrite",
"foo": "bar"
}`,
map[string]string{
"app.kubernetes.io/instance": "falco-overwrite",
"app.kubernetes.io/name": "falco",
"foo": "bar",
"type": "falco-metrics",
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"},
SetJsonValues: map[string]string{"serviceMonitor.selector": testCase.values}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Equal(testCase.expected, svcMonitor.Spec.Selector.MatchLabels, "should be the same")
})
}
}

View File

@ -1,177 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
)
type serviceTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
suite.Run(t, &serviceTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "falco-test",
namespace: "falco-namespace-test",
templates: []string{"templates/service.yaml"},
})
}
func (s *serviceTemplateTest) TestCreationDefaultValues() {
// Render the service and check that it has not been rendered.
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/service.yaml in chart", err.Error())
}
func (s *serviceTemplateTest) TestDefaultLabelsValues() {
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true"}}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should render template")
cInfo, err := unit.ChartInfo(s.T(), s.chartPath)
s.NoError(err)
// Get app version.
appVersion, found := cInfo["appVersion"]
s.True(found, "should find app version in chart info")
appVersion = appVersion.(string)
// Get chart version.
chartVersion, found := cInfo["version"]
s.True(found, "should find chart version in chart info")
// Get chart name.
chartName, found := cInfo["name"]
s.True(found, "should find chart name in chart info")
chartName = chartName.(string)
expectedLabels := map[string]string{
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
"app.kubernetes.io/name": chartName.(string),
"app.kubernetes.io/instance": s.releaseName,
"app.kubernetes.io/version": appVersion.(string),
"app.kubernetes.io/managed-by": "Helm",
"type": "falco-metrics",
}
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
labels := svc.GetLabels()
for key, value := range labels {
expectedVal := expectedLabels[key]
s.Equal(expectedVal, value)
}
for key, value := range expectedLabels {
expectedVal := labels[key]
s.Equal(expectedVal, value)
}
}
func (s *serviceTemplateTest) TestCustomLabelsValues() {
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true",
"metrics.service.labels.customLabel": "customLabelValues"}}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should render template")
cInfo, err := unit.ChartInfo(s.T(), s.chartPath)
s.NoError(err)
// Get app version.
appVersion, found := cInfo["appVersion"]
s.True(found, "should find app version in chart info")
appVersion = appVersion.(string)
// Get chart version.
chartVersion, found := cInfo["version"]
s.True(found, "should find chart version in chart info")
// Get chart name.
chartName, found := cInfo["name"]
s.True(found, "should find chart name in chart info")
chartName = chartName.(string)
expectedLabels := map[string]string{
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
"app.kubernetes.io/name": chartName.(string),
"app.kubernetes.io/instance": s.releaseName,
"app.kubernetes.io/version": appVersion.(string),
"app.kubernetes.io/managed-by": "Helm",
"type": "falco-metrics",
"customLabel": "customLabelValues",
}
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
labels := svc.GetLabels()
for key, value := range labels {
expectedVal := expectedLabels[key]
s.Equal(expectedVal, value)
}
for key, value := range expectedLabels {
expectedVal := labels[key]
s.Equal(expectedVal, value)
}
}
func (s *serviceTemplateTest) TestDefaultAnnotationsValues() {
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true"}}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err)
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
s.Nil(svc.Annotations, "should be nil")
}
func (s *serviceTemplateTest) TestCustomAnnotationsValues() {
values := map[string]string{
"metrics.enabled": "true",
"metrics.service.annotations.annotation1": "customAnnotation1",
"metrics.service.annotations.annotation2": "customAnnotation2",
}
annotations := map[string]string{
"annotation1": "customAnnotation1",
"annotation2": "customAnnotation2",
}
options := &helm.Options{SetValues: values}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err)
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
s.Len(svc.Annotations, 2)
for key, value := range svc.Annotations {
expectedVal := annotations[key]
s.Equal(expectedVal, value)
}
}

View File

@ -1,649 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8smetaPlugin
import (
"encoding/json"
"fmt"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"slices"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
// Using the default values we want to test that all the expected resources for the k8s-metacollector are rendered.
func TestRenderedResourcesWithDefaultValues(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
options := &helm.Options{}
// Template the chart using the default values.yaml file.
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, nil)
require.NoError(t, err)
// Extract all rendered files from the output.
re := regexp.MustCompile(unit.PatternK8sMetacollectorFiles)
matches := re.FindAllStringSubmatch(output, -1)
require.Len(t, matches, 0)
}
func TestRenderedResourcesWhenNotEnabled(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
// Template files that we expect to be rendered.
templateFiles := []string{
"clusterrole.yaml",
"clusterrolebinding.yaml",
"deployment.yaml",
"service.yaml",
"serviceaccount.yaml",
}
require.NoError(t, err)
options := &helm.Options{SetValues: map[string]string{
"collectors.kubernetes.enabled": "true",
}}
// Template the chart using the default values.yaml file.
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, nil)
require.NoError(t, err)
// Extract all rendered files from the output.
re := regexp.MustCompile(unit.PatternK8sMetacollectorFiles)
matches := re.FindAllStringSubmatch(output, -1)
var renderedTemplates []string
for _, match := range matches {
// Filter out test templates.
if !strings.Contains(match[1], "test-") {
renderedTemplates = append(renderedTemplates, match[1])
}
}
// Assert that the rendered resources are equal tho the expected ones.
require.Equal(t, len(renderedTemplates), len(templateFiles), "should be equal")
for _, rendered := range renderedTemplates {
require.True(t, slices.Contains(templateFiles, rendered), "template files should contain all the rendered files")
}
}
func TestPluginConfigurationInFalcoConfig(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"overrideK8s-metacollectorNamespace",
map[string]string{
"k8s-metacollector.namespaceOverride": "test",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.test.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"overrideK8s-metacollectorName",
map[string]string{
"k8s-metacollector.fullnameOverride": "collector",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "collector.default.svc", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"overrideK8s-metacollectorNamespaceAndName",
map[string]string{
"k8s-metacollector.namespaceOverride": "test",
"k8s-metacollector.fullnameOverride": "collector",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "collector.test.svc", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set CollectorHostname",
map[string]string{
"collectors.kubernetes.collectorHostname": "test",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "test", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set CollectorHostname and namespace name",
map[string]string{
"collectors.kubernetes.collectorHostname": "test-with-override",
"k8s-metacollector.namespaceOverride": "test",
"k8s-metacollector.fullnameOverride": "collector",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "test-with-override", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set collectorPort",
map[string]string{
"collectors.kubernetes.collectorPort": "8888",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(8888), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set collector logger level and hostProc",
map[string]string{
"collectors.kubernetes.verbosity": "trace",
"collectors.kubernetes.hostProc": "/host/test",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "trace", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host/test", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"driver disabled",
map[string]string{
"driver.enabled": "false",
},
func(t *testing.T, config any) {
require.Nil(t, config)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
// Enable the collector.
if testCase.values != nil {
testCase.values["collectors.kubernetes.enabled"] = "true"
} else {
testCase.values = map[string]string{"collectors.kubernetes.enabled": "true"}
}
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
plugins := config["plugins"]
pluginsArray := plugins.([]interface{})
found := false
// Find the k8smeta plugin configuration.
for _, plugin := range pluginsArray {
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.K8sMetaPluginName {
testCase.expected(t, plugin)
found = true
}
}
if found {
// Check that the plugin has been added to the ones that need to be loaded.
loadplugins := config["load_plugins"]
require.True(t, slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
} else {
testCase.expected(t, nil)
loadplugins := config["load_plugins"]
require.True(t, !slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
}
})
}
}
// Test that the helper does not overwrite user's configuration.
func TestPluginConfigurationUniqueEntries(t *testing.T) {
t.Parallel()
pluginsJSON := `[
{
"init_config": null,
"library_path": "libk8saudit.so",
"name": "k8saudit",
"open_params": "http://:9765/k8s-audit"
},
{
"library_path": "libcloudtrail.so",
"name": "cloudtrail"
},
{
"init_config": "",
"library_path": "libjson.so",
"name": "json"
},
{
"init_config": {
"collectorHostname": "rendered-resources-k8s-metacollector.default.svc",
"collectorPort": 45000,
"nodeName": "${FALCO_K8S_NODE_NAME}"
},
"library_path": "libk8smeta.so",
"name": "k8smeta"
},
{
"init_config": {
"engines": {
"bpm": {
"enabled": false
},
"containerd": {
"enabled": true,
"sockets": [
"/run/containerd/containerd.sock"
]
},
"cri": {
"enabled": true,
"sockets": [
"/run/crio/crio.sock"
]
},
"docker": {
"enabled": true,
"sockets": [
"/var/run/docker.sock"
]
},
"libvirt_lxc": {
"enabled": false
},
"lxc": {
"enabled": false
},
"podman": {
"enabled": false,
"sockets": [
"/run/podman/podman.sock"
]
}
},
"hooks": [
"create"
],
"label_max_len": 100,
"with_size": false
},
"library_path": "libcontainer.so",
"name": "container"
}
]`
loadPluginsJSON := `[
"k8smeta",
"k8saudit",
"container"
]`
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
options := &helm.Options{SetJsonValues: map[string]string{
"falco.plugins": pluginsJSON,
"falco.load_plugins": loadPluginsJSON,
}, SetValues: map[string]string{"collectors.kubernetes.enabled": "true"}}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
plugins := config["plugins"]
out, err := json.MarshalIndent(plugins, "", " ")
require.NoError(t, err)
require.Equal(t, pluginsJSON, string(out))
pluginsArray := plugins.([]interface{})
// Find the k8smeta plugin configuration.
numConfigK8smeta := 0
for _, plugin := range pluginsArray {
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.K8sMetaPluginName {
numConfigK8smeta++
}
}
require.Equal(t, 1, numConfigK8smeta)
// Check that the plugin has been added to the ones that need to be loaded.
loadplugins := config["load_plugins"]
require.Len(t, loadplugins.([]interface{}), 3)
require.True(t, slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
}
// Test that the helper does not overwrite user's configuration.
func TestFalcoctlRefs(t *testing.T) {
t.Parallel()
pluginsJSON := `[
{
"init_config": null,
"library_path": "libk8saudit.so",
"name": "k8saudit",
"open_params": "http://:9765/k8s-audit"
},
{
"library_path": "libcloudtrail.so",
"name": "cloudtrail"
},
{
"init_config": "",
"library_path": "libjson.so",
"name": "json"
},
{
"init_config": {
"collectorHostname": "rendered-resources-k8s-metacollector.default.svc",
"collectorPort": 45000,
"nodeName": "${FALCO_K8S_NODE_NAME}"
},
"library_path": "libk8smeta.so",
"name": "k8smeta"
}
]`
testFunc := func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test allowed types.
allowedTypes := artifactConfig["allowedTypes"]
require.Len(t, allowedTypes, 2)
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 3)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
testCases := []struct {
name string
valuesJSON map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
testFunc,
},
{
"setPluginConfiguration",
map[string]string{
"falco.plugins": pluginsJSON,
},
testFunc,
},
{
"driver disabled",
map[string]string{
"driver.enabled": "false",
},
func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.True(t, !slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0"))
},
},
}
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetJsonValues: testCase.valuesJSON, SetValues: map[string]string{"collectors.kubernetes.enabled": "true"}}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/falcoctl-configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falcoctl.yaml"], &config)
testCase.expected(t, config)
})
}
}

View File

@ -1,63 +0,0 @@
# Default values to deploy Falco on GKE with gVisor.
# Affinity constraint for pods' scheduling.
# Needed to deploy Falco on the gVisor enabled nodes.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: sandbox.gke.io/runtime
operator: In
values:
- gvisor
# Tolerations to allow Falco to run on Kubernetes 1.6 masters.
# Adds the neccesssary tolerations to allow Falco pods to be scheduled on the gVisor enabled nodes.
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: sandbox.gke.io/runtime
operator: Equal
value: gvisor
# Enable gVisor and set the appropriate paths.
driver:
enabled: true
kind: gvisor
gvisor:
runsc:
path: /home/containerd/usr/local/sbin
root: /run/containerd/runsc
config: /run/containerd/runsc/config.toml
# Enable the containerd collector to enrich the syscall events with metadata.
collectors:
enabled: true
containerd:
enabled: true
socket: /run/containerd/containerd.sock
falcoctl:
artifact:
install:
# -- Enable the init container. We do not recommend installing plugins for security reasons since they are executable objects.
# We install only "rulesfiles".
enabled: true
follow:
# -- Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules.
enabled: true
config:
artifact:
install:
# -- List of artifacts to be installed by the falcoctl init container.
# We do not recommend installing (or following) plugins for security reasons since they are executable objects.
refs: [falco-rules:4]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
# We do not recommend installing (or following) plugins for security reasons since they are executable objects.
refs: [falco-rules:4]
# Set this to true to force Falco so output the logs as soon as they are emmitted.
tty: false

View File

@ -1,58 +0,0 @@
# -- Disable the drivers since we want to deploy only the k8saudit plugin.
driver:
enabled: false
# -- Disable the collectors, no syscall events to enrich with metadata.
collectors:
enabled: false
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale.
controller:
kind: deployment
deployment:
# -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing.
# For more info check the section on Plugins in the README.md file.
replicas: 1
falcoctl:
artifact:
install:
# -- Enable the init container.
enabled: true
follow:
# -- Enable the sidecar container.
enabled: true
config:
artifact:
install:
# -- List of artifacts to be installed by the falcoctl init container.
refs: [k8saudit-rules:0.11, k8saudit:0.11]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
refs: [k8saudit-rules:0.11]
services:
- name: k8saudit-webhook
type: NodePort
ports:
- port: 9765 # See plugin open_params
nodePort: 30007
protocol: TCP
falco:
rules_files:
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
""
# maxEventBytes: 1048576
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: json
library_path: libjson.so
init_config: ""
# Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container.
load_plugins: [k8saudit, json]

View File

@ -1,62 +0,0 @@
# Enable the driver, and choose between the kernel module or the ebpf probe.
# Default value: kernel module.
driver:
enabled: true
kind: module
# Enable the collectors used to enrich the events with metadata.
# Check the values.yaml file for fine-grained options.
collectors:
enabled: true
# We set the controller to daemonset since we have the syscalls source enabled.
# It will ensure that every node on our cluster will be monitored by Falco.
# Please note that the api-server will use the "k8saudit-webhook" service to send
# audit logs to the falco instances. That means that when we have multiple instances of Falco
# we can not predict to which instance the audit logs will be sent. When testing please check all
# the Falco instance to make sure that at least one of them have received the audit logs.
controller:
kind: daemonset
falcoctl:
artifact:
install:
# -- Enable the init container.
enabled: true
follow:
# -- Enable the sidecar container.
enabled: true
config:
artifact:
install:
# -- List of artifacts to be installed by the falcoctl init container.
refs: [falco-rules:4, k8saudit-rules:0.11, k8saudit:0.11]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
refs: [falco-rules:4, k8saudit-rules:0.11, k8saudit:0.11]
services:
- name: k8saudit-webhook
type: NodePort
ports:
- port: 9765 # See plugin open_params
nodePort: 30007
protocol: TCP
falco:
rules_files:
- /etc/falco/falco_rules.yaml
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
""
# maxEventBytes: 1048576
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: json
library_path: libjson.so
init_config: ""
load_plugins: [k8saudit, json]

File diff suppressed because it is too large Load Diff

View File

@ -1,753 +0,0 @@
# Change Log
This file documents all notable changes to Falcosidekick Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
Before release 0.1.20, the helm chart can be found in `falcosidekick` [repository](https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick).
## 0.10.2
- Add type information to `volumeClaimTemplates`.
## 0.10.1
- Add an "or" condition for `configmap-ui`
## 0.10.0
- Add new features to the Loki dashboard
## 0.9.11
- Add `customtags` setting
## 0.9.10
- Fix missing values in the README
## 0.9.9
- Added Azure Workload Identity for Falcosidekick
## 0.9.8
- Ugrade to Falcosidekick 2.31.1 (fix last release)
## 0.9.7
- Ugrade to Falcosidekick 2.31.1
## 0.9.6
- Ugrade to Falcosidekick 2.31.0
## 0.9.5
- Move the `prometheus.io/scrape` annotation to the default values, to allow overrides.
## 0.9.4
- Fix Prometheus metrics names in Prometheus Rule
## 0.9.3
- Add a Grafana dashboard for the Prometheus metrics
## 0.9.2
- Add new dashboard with Loki
## 0.9.1
- Ugrade to Falcosidekick 2.30.0
## 0.8.9
- Fix customConfig mount path for webui redis
## 0.8.8
- Fix customConfig template for webui redis
## 0.8.7
- Fix securityContext for webui initContainer
## 0.8.6
- Use of `redis-cli` by the initContainer of Falcosidekick-UI to wait til the redis is up and running
- Add the possibility to override the default redis server settings
- Allow to set up a password to use with an external redis
- Fix wrong value used for `OTLP_TRACES_PROTOCOL` env var
- Used names for the priorities in the prometheus rules
## 0.8.5
- Fix an issue with the by default missing custom CA cert
## 0.8.4
- Fix falcosidekick chart ignoring custom service type for webui redis
## 0.8.3
- Add a condition to create the secrets for the redis only if the webui is deployed
## 0.8.2
- Fix redis-availability check of the UI init-container in case externalRedis is enabled
## 0.8.1
- Allow to set resources, securityContext and image overwrite for wait-redis initContainer
## 0.8.0
- Ugrade to Falcosidekick 2.29.0
- Allow to set custom labels and annotations to set to all resources
- Allow to use an existing secrets and values for the env vars at the same time
- Fix missing ingressClassName settings in the values.yaml
- Add of an initContainer to check if the redis for falcosidekick-ui is up
## 0.7.22
- Upgrade redis-stack image to 7.2.0-v11
## 0.7.21
- Fix the Falco Sidekick WEBUI_URL secret value.
## 0.7.20
- Align Web UI service port from values.yaml file with Falco Sidekick WEBUI_URL secret value.
## 0.7.19
- Enhanced the service Monitor to support additional Properties.
- Fix the promql query for prometheusRules: FalcoErrorOutputEventsRateHigh.
## 0.7.18
- Fix PrometheusRule duplicate alert name
## 0.7.17
- Fix the labels for the serviceMonitor
## 0.7.16
- Fix the error with the `NOTES` (`index of untyped nil Use`) when the ingress is enabled to falcosidekick-ui
## 0.7.15
- Fix ServiceMonitor selector labels
## 0.7.14
- Fix duplicate component labels
## 0.7.13
- Fix ServiceMonitor port name and selector labels
## 0.7.12
- Align README values with the values.yaml file
## 0.7.11
- Fix a link in the falcosidekick README to the policy report output documentation
## 0.7.10
- Set Helm recommended labels (`app.kubernetes.io/name`, `app.kubernetes.io/instance`, `app.kubernetes.io/version`, `helm.sh/chart`, `app.kubernetes.io/part-of`, `app.kubernetes.io/managed-by`) using helpers.tpl
## 0.7.9
- noop change to the chart itself. Updated makefile.
## 0.7.8
- Fix the condition for missing cert files
## 0.7.7
- Support extraArgs in the helm chart
## 0.7.6
- Fix the behavior with the `AWS IRSA` with a new value `aws.config.useirsa`
- Add a section in the README to describe how to use a subpath for `Falcosidekick-ui` ingress
- Add a `ServiceMonitor` for prometheus-operator
- Add a `PrometheusRule` for prometheus-operator
## 0.7.5
- noop change just to test the ci
## 0.7.4
- Fix volume mount when `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables are defined.
## 0.7.3
- Allow to set (m)TLS Server cryptographic material via `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables or through `config.tlsserver.existingSecret` variables.
## 0.7.2
- Fix the wrong key of the secret for the user
## 0.7.1
- Allow to set a password `webui.redis.password` for Redis for `Falcosidekick-UI`
- The user for `Falcosidekick-UI` is now set with an env var from a secret
## 0.7.0
- Support configuration of revisionHistoryLimit of the deployments
## 0.6.3
- Update Falcosidekick to 2.28.0
- Add Mutual TLS Client config
- Add TLS Server config
- Add `bracketreplacer` config
- Add `customseveritymap` to `alertmanager` output
- Add Drop Event config to `alertmanager` output
- Add `customheaders` to `elasticsearch` output
- Add `customheaders` to `loki` output
- Add `customheaders` to `grafana` output
- Add `rolearn` and `externalid` for `aws` outputs
- Add `method` to `webhook` output
- Add `customattributes` to `gcp.pubsub` output
- Add `region` to `pargerduty` output
- Add `topiccreation` and `tls` to `kafka` output
- Add `Grafana OnCall` output
- Add `Redis` output
- Add `Telegram` output
- Add `N8N` output
- Add `OpenObserver` output
## 0.6.2
- Fix interpolation of `SYSLOG_PORT`
## 0.6.1
- Add `webui.allowcors` value for `Falcosidekick-UI`
## 0.6.0
- Change the docker image for the redis pod for falcosidekick-ui
## 0.5.16
- Add `affinity`, `nodeSelector` and `tolerations` values for the Falcosidekick test-connection pod
## 0.5.15
- Set extra labels and annotations for `AlertManager` only if they're not empty
## 0.5.14
- Fix Prometheus extralabels configuration in Falcosidekick
## 0.5.13
- Fix missing quotes in Falcosidekick-UI ttl argument
## 0.5.12
- Fix missing space in Falcosidekick-UI ttl argument
## 0.5.11
- Fix missing space in Falcosidekick-UI arguments
## 0.5.10
- upgrade Falcosidekick image to 2.27.0
- upgrade Falcosidekick-UI image to 2.1.0
- Add `Yandex Data Streams` output
- Add `Node-Red` output
- Add `MQTT` output
- Add `Zincsearch` output
- Add `Gotify` output
- Add `Spyderbat` output
- Add `Tekton` output
- Add `TimescaleDB` output
- Add `AWS Security Lake` output
- Add `config.templatedfields` to set templated fields
- Add `config.slack.channel` to override `Slack` channel
- Add `config.alertmanager.extralabels` and `config.alertmanager.extraannotations` for `AlertManager` output
- Add `config.influxdb.token`, `config.influxdb.organization` and `config.influxdb.precision` for `InfluxDB` output
- Add `config.aws.checkidentity` to disallow STS checks
- Add `config.smtp.authmechanism`, `config.smtp.token`, `config.smtp.identity`, `config.smtp.trace` to manage `SMTP` auth
- Update default doc type for `Elastichsearch`
- Add `config.loki.user`, `config.loki.apikey` to manage auth to Grafana Cloud for `Loki` output
- Add `config.kafka.sasl`, `config.kafka.async`, `config.kafka.compression`, `config.kafka.balancer`, `config.kafka.clientid` to manage auth and communication for `Kafka` output
- Add `config.syslog.format` to manage the format of `Syslog` payload
- Add `webui.ttl` to set TTL of keys in Falcosidekick-UI
- Add `webui.loglevel` to set log level in Falcosidekick-UI
- Add `webui.user` to set log user:password in Falcosidekick-UI
## 0.5.9
- Fix: remove `namespace` from `clusterrole` and `clusterrolebinding` metadata
## 0.5.8
- Support `storageEnabled` for `redis` to allow ephemeral installs
## 0.5.7
- Removing unused Kafka config values
## 0.5.6
- Fixing Syslog's port import in `secrets.yaml`
## 0.5.5
- Add `webui.externalRedis` with `enabled`, `url` and `port` to values to set an external Redis database with RediSearch > v2 for the WebUI
- Add `webui.redis.enabled` option to disable the deployment of the database.
- `webui.redis.enabled ` and `webui.externalRedis.enabled` are mutually exclusive
## 0.5.4
- Upgrade image to fix Panic of `Prometheus` output when `customfields` is set
- Add `extralabels` for `Loki` and `Prometheus` outputs to set fields to use as labels
- Add `expiresafter` for `AlertManager` output
## 0.5.3
- Support full configuration of `securityContext` blocks in falcosidekick and falcosidekick-ui deployments, and redis statefulset.
## 0.5.2
- Update Falcosidekick-UI image (fix wrong redirect to localhost when an ingress is used)
## 0.5.1
- Support `ingressClassName` field in falcosidekick ingresses.
## 0.5.0
### Major Changes
- Add `Policy Report` output
- Add `Syslog` output
- Add `AWS Kinesis` output
- Add `Zoho Cliq` output
- Support IRSA for AWS authentication
- Upgrade Falcosidekick-UI to v2.0.1
### Minor changes
- Allow to set custom Labels for pods
## 0.4.5
- Allow additional service-ui annotations
## 0.4.4
- Fix output after chart installation when ingress is enable
## 0.4.3
- Support `annotation` block in service
## 0.4.2
- Fix: Added the rule to use the podsecuritypolicy
- Fix: Added `ServiceAccountName` to the UI deployment
## 0.4.1
- Removes duplicate `Fission` keys from secret
## 0.4.0
### Major Changes
- Support Ingress API version `networking.k8s.io/v1`, see `ingress.hosts` and `webui.ingress.hosts` in [values.yaml](values.yaml) for a breaking change in the `path` parameter
## 0.3.17
- Fix: Remove the value for bucket of `Yandex S3`, it enabled the output by default
## 0.3.16
### Major Changes
- Fix: set correct new image 2.24.0
## 0.3.15
### Major Changes
- Add `Fission` output
## 0.3.14
### Major Changes
- Add `Grafana` output
- Add `Yandex Cloud S3` output
- Add `Kafka REST` output
### Minor changes
- Docker image is now available on AWS ECR Public Gallery (`--set image.registry=public.ecr.aws`)
## 0.3.13
### Minor changes
- Enable extra volumes and volumemounts for `falcosidekick` via values
## 0.3.12
- Add AWS configuration field `config.aws.rolearn`
## 0.3.11
### Minor changes
- Make image registries for `falcosidekick` and `falcosidekick-ui` configurable
## 0.3.10
### Minor changes
- Fix table formatting in `README.md`
## 0.3.9
### Fixes
- Add missing `imagePullSecrets` in `falcosidekick/templates/deployment-ui.yaml`
## 0.3.8
### Major Changes
- Add `GCP Cloud Run` output
- Add `GCP Cloud Functions` output
- Add `Wavefront` output
- Allow MutualTLS for some outputs
- Add basic auth for Elasticsearch output
## 0.3.7
### Minor changes
- Fix table formatting in `README.md`
- Fix `config.azure.eventHub` parameter name in `README.md`
## 0.3.6
### Fixes
- Point to the correct name of aadpodidentnity
## 0.3.5
### Minor Changes
- Fix link to Falco in the `README.md`
## 0.3.4
### Major Changes
- Bump up version (`v1.0.1`) of image for `falcosidekick-ui`
## 0.3.3
### Minor Changes
- Set default values for `OpenFaaS` output type parameters
- Fixes of documentation
## 0.3.2
### Fixes
- Add config checksum annotation to deployment pods to restart pods on config change
- Fix statsd config options in the secret to make them match the docs
## 0.3.1
### Fixes
- Fix for `s3.bucket`, it should be empty
## 0.3.0
### Major Changes
- Add `AWS S3` output
- Add `GCP Storage` output
- Add `RabbitMQ` output
- Add `OpenFaas` output
## 0.2.9
### Major Changes
- Updated falcosidekuck-ui default image version to `v0.2.0`
## 0.2.8
### Fixes
- Fixed to specify `kafka.hostPort` instead of `kafka.url`
## 0.2.7
### Fixes
- Fixed missing hyphen in podidentity
## 0.2.6
### Fixes
- Fix repo and tag for `ui` image
## 0.2.5
### Major Changes
- Add `CLOUDEVENTS` output
- Add `WEBUI` output
### Minor Changes
- Add details about syntax for adding `custom_fields`
## 0.2.4
### Minor Changes
- Add `DATADOG_HOST` to secret
## 0.2.3
### Minor Changes
- Allow additional pod annotations
- Remove namespace condition in aad-pod-identity
## 0.2.2
### Major Changes
- Add `Kubeless` output
## 0.2.1
### Major Changes
- Add `PagerDuty` output
## 0.2.0
### Major Changes
- Add option to use an existing secret
- Add option to add extra environment variables
- Add `Stan` output
### Minor Changes
- Use the Existing secret resource and add all possible variables to there, and make it simpler to read and less error-prone in the deployment resource
## 0.1.37
### Minor Changes
- Fix aws keys not being added to the deployment
## 0.1.36
### Minor Changes
- Fix helm test
## 0.1.35
### Major Changes
- Update image to use release 2.19.1
## 0.1.34
- New outputs can be set : `Kafka`, `AWS CloudWatchLogs`
## 0.1.33
### Minor Changes
- Fixed GCP Pub/Sub values references in `deployment.yaml`
## 0.1.32
### Major Changes
- Support release namespace configuration
## 0.1.31
### Major Changes
- New outputs can be set : `Googlechat`
## 0.1.30
### Major changes
- New output can be set : `GCP PubSub`
- Custom Headers can be set for `Webhook` output
- Fix typo `aipKey` for OpsGenie output
## 0.1.29
- Fix falcosidekick configuration table to use full path of configuration properties in the `README.md`
## 0.1.28
### Major changes
- New output can be set : `AWS SNS`
- Metrics in `prometheus` format can be scrapped from `/metrics` URI
## 0.1.27
### Minor Changes
- Replace extensions apiGroup/apiVersion because of deprecation
## 0.1.26
### Minor Changes
- Allow the creation of a PodSecurityPolicy, disabled by default
## 0.1.25
### Minor Changes
- Allow the configuration of the Pod securityContext, set default runAsUser and fsGroup values
## 0.1.24
### Minor Changes
- Remove duplicated `webhook` block in `values.yaml`
## 0.1.23
- fake release for triggering CI for auto-publishing
## 0.1.22
### Major Changes
- Add `imagePullSecrets`
## 0.1.21
### Minor Changes
- Fix `Azure Indentity` case sensitive value
## 0.1.20
### Major Changes
- New outputs can be set : `Azure Event Hubs`, `Discord`
### Minor Changes
- Fix wrong port name in output
## 0.1.17
### Major Changes
- New outputs can be set : `Mattermost`, `Rocketchat`
## 0.1.11
### Major Changes
- Add Pod Security Policy
## 0.1.11
### Minor Changes
- Fix wrong value reference for Elasticsearch output in deployment.yaml
## 0.1.10
### Major Changes
- New output can be set : `DogStatsD`
## 0.1.9
### Major Changes
- New output can be set : `StatsD`
## 0.1.7
### Major Changes
- New output can be set : `Opsgenie`
## 0.1.6
### Major Changes
- New output can be set : `NATS`
## 0.1.5
### Major Changes
- `Falcosidekick` and its chart are now part of `falcosecurity` organization
## 0.1.4
### Minor Changes
- Use more recent image with `Golang` 1.14
## 0.1.3
### Major Changes
- New output can be set : `Loki`
## 0.1.2
### Major Changes
- New output can be set : `SMTP`
## 0.1.1
### Major Changes
- New outputs can be set : `AWS Lambda`, `AWS SQS`, `Teams`
## 0.1.0
### Major Changes
- Initial release of Falcosidekick Helm Chart

View File

@ -1,187 +0,0 @@
# Falcosidekick
![falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falcosidekick_color.png)
![release](https://flat.badgen.net/github/release/falcosecurity/falcosidekick/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falcosidekick) ![licence](https://flat.badgen.net/badge/license/MIT/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/falcosecurity/falcosidekick?icon=docker)
## Description
A simple daemon for connecting [`Falco`](https://github.com/falcosecurity/falco) to your ecossytem. It takes a `Falco`'s events and
forward them to different outputs in a fan-out way.
It works as a single endpoint for as many as you want `Falco` instances :
![falco_with_falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falco_with_falcosidekick.png)
## Outputs
`Falcosidekick` manages a large variety of outputs with different purposes.
> **Note**
Follow the links to get the configuration of each output.
### Chat
- [**Slack**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/slack.md)
- [**Rocketchat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rocketchat.md)
- [**Mattermost**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mattermost.md)
- [**Teams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/teams.md)
- [**Discord**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/discord.md)
- [**Google Chat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/googlechat.md)
- [**Zoho Cliq**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cliq.md)
- [**Telegram**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/telegram.md)
### Metrics / Observability
- [**Datadog**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/datadog.md)
- [**Influxdb**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/influxdb.md)
- [**StatsD**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/statsd.md) (for monitoring of `falcosidekick`)
- [**DogStatsD**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dogstatsd.md) (for monitoring of `falcosidekick`)
- [**Prometheus**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/prometheus.md) (for both events and monitoring of `falcosidekick`)
- [**Wavefront**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/wavefront.md)
- [**Spyderbat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/spyderbat.md)
- [**TimescaleDB**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/timescaledb.md)
- [**Dynatrace**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dynatrace.md)
### Alerting
- [**AlertManager**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/alertmanager.md)
- [**Opsgenie**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/opsgenie.md)
- [**PagerDuty**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/pagerduty.md)
- [**Grafana OnCall**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/grafana_oncall.md)
### Logs
- [**Elasticsearch**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/elasticsearch.md)
- [**Loki**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/loki.md)
- [**AWS CloudWatchLogs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_cloudwatch_logs.md)
- [**Grafana**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/grafana.md)
- [**Syslog**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/syslog.md)
- [**Zincsearch**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs//zincsearch.md)
- [**OpenObserve**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openobserve.md)
### Object Storage
- [**AWS S3**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_s3.md)
- [**GCP Storage**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_storage.md)
- [**Yandex S3 Storage**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/yandex_s3.md)
### FaaS / Serverless
- [**AWS Lambda**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_lambda.md)
- [**GCP Cloud Run**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_cloud_run.md)
- [**GCP Cloud Functions**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_cloud_functions.md)
- [**Fission**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/fission.md)
- [**KNative (CloudEvents)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cloudevents.md)
- [**Kubeless**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kubeless.md)
- [**OpenFaaS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openfaas.md)
- [**Tekton**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/tekton.md)
### Message queue / Streaming
- [**NATS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/nats.md)
- [**STAN (NATS Streaming)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/stan.md)
- [**AWS SQS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_sqs.md)
- [**AWS SNS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_sns.md)
- [**AWS Kinesis**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_kinesis.md)
- [**GCP PubSub**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_pub_sub.md)
- [**Apache Kafka**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kafka.md)
- [**Kafka Rest Proxy**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kafkarest.md)
- [**RabbitMQ**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rabbitmq.md)
- [**Azure Event Hubs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/azure_event_hub.md)
- [**Yandex Data Streams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/yandex_datastreams.md)
- [**MQTT**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mqtt.md)
- [**Gotify**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gotify.md)
### Email
- [**SMTP**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/smtp.md)
### Database
- [**Redis**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/redis.md)
### Web
- [**Webhook**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/webhook.md)
- [**Node-RED**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/nodered.md)
- [**WebUI (Falcosidekick UI)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/falcosidekick-ui.md)
### SIEM
- [**AWS Security Lake**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_security_lake.md)
### Workflow
- [**n8n**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/n8n.md)
### Other
- [**Policy Report**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/policy_report.md)
## Adding `falcosecurity` repository
Prior to install the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
### Install Falco + Falcosidekick + Falcosidekick-ui
To install the chart with the release name `falcosidekick` run:
```bash
helm install falcosidekick falcosecurity/falcosidekick --set webui.enabled=true
```
### With Helm chart of Falco
`Falco`, `Falcosidekick` and `Falcosidekick-ui` can be installed together in one command. All values to configure `Falcosidekick` will have to be
prefixed with `falcosidekick.`.
```bash
helm install falco falcosecurity/falco --set falcosidekick.enabled=true --set falcosidekick.webui.enabled=true
```
After a few seconds, Falcosidekick should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
## Minimum Kubernetes version
The minimum Kubernetes version required is 1.17.x
## Uninstalling the Chart
To uninstall the `falcosidekick` deployment:
```bash
helm uninstall falcosidekick
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the Falcosidekick chart and their default values. See `values.yaml` for full list.
{{ template "chart.valuesSection" . }}
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
> **Tip**: You can use the default [values.yaml](values.yaml)
## Metrics
A `prometheus` endpoint can be scrapped at `/metrics`.
## Access Falcosidekick UI through an Ingress and a subpath
You may want to access the `WebUI (Falcosidekick UI)`](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/falcosidekick-ui.md) dashboard not from `/` but from `/subpath` and use an Ingress, here's an example of annotations to add to the Ingress for `nginx-ingress controller`:
```yaml
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
```

View File

@ -1,751 +0,0 @@
# Falcosidekick
![falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falcosidekick_color.png)
![release](https://flat.badgen.net/github/release/falcosecurity/falcosidekick/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falcosidekick) ![licence](https://flat.badgen.net/badge/license/MIT/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/falcosecurity/falcosidekick?icon=docker)
## Description
A simple daemon for connecting [`Falco`](https://github.com/falcosecurity/falco) to your ecossytem. It takes a `Falco`'s events and
forward them to different outputs in a fan-out way.
It works as a single endpoint for as many as you want `Falco` instances :
![falco_with_falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falco_with_falcosidekick.png)
## Outputs
`Falcosidekick` manages a large variety of outputs with different purposes.
> **Note**
Follow the links to get the configuration of each output.
### Chat
- [**Slack**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/slack.md)
- [**Rocketchat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rocketchat.md)
- [**Mattermost**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mattermost.md)
- [**Teams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/teams.md)
- [**Discord**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/discord.md)
- [**Google Chat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/googlechat.md)
- [**Zoho Cliq**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cliq.md)
- [**Telegram**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/telegram.md)
### Metrics / Observability
- [**Datadog**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/datadog.md)
- [**Influxdb**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/influxdb.md)
- [**StatsD**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/statsd.md) (for monitoring of `falcosidekick`)
- [**DogStatsD**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dogstatsd.md) (for monitoring of `falcosidekick`)
- [**Prometheus**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/prometheus.md) (for both events and monitoring of `falcosidekick`)
- [**Wavefront**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/wavefront.md)
- [**Spyderbat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/spyderbat.md)
- [**TimescaleDB**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/timescaledb.md)
- [**Dynatrace**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dynatrace.md)
### Alerting
- [**AlertManager**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/alertmanager.md)
- [**Opsgenie**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/opsgenie.md)
- [**PagerDuty**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/pagerduty.md)
- [**Grafana OnCall**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/grafana_oncall.md)
### Logs
- [**Elasticsearch**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/elasticsearch.md)
- [**Loki**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/loki.md)
- [**AWS CloudWatchLogs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_cloudwatch_logs.md)
- [**Grafana**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/grafana.md)
- [**Syslog**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/syslog.md)
- [**Zincsearch**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs//zincsearch.md)
- [**OpenObserve**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openobserve.md)
### Object Storage
- [**AWS S3**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_s3.md)
- [**GCP Storage**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_storage.md)
- [**Yandex S3 Storage**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/yandex_s3.md)
### FaaS / Serverless
- [**AWS Lambda**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_lambda.md)
- [**GCP Cloud Run**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_cloud_run.md)
- [**GCP Cloud Functions**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_cloud_functions.md)
- [**Fission**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/fission.md)
- [**KNative (CloudEvents)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cloudevents.md)
- [**Kubeless**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kubeless.md)
- [**OpenFaaS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openfaas.md)
- [**Tekton**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/tekton.md)
### Message queue / Streaming
- [**NATS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/nats.md)
- [**STAN (NATS Streaming)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/stan.md)
- [**AWS SQS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_sqs.md)
- [**AWS SNS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_sns.md)
- [**AWS Kinesis**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_kinesis.md)
- [**GCP PubSub**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_pub_sub.md)
- [**Apache Kafka**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kafka.md)
- [**Kafka Rest Proxy**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kafkarest.md)
- [**RabbitMQ**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rabbitmq.md)
- [**Azure Event Hubs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/azure_event_hub.md)
- [**Yandex Data Streams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/yandex_datastreams.md)
- [**MQTT**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mqtt.md)
- [**Gotify**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gotify.md)
### Email
- [**SMTP**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/smtp.md)
### Database
- [**Redis**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/redis.md)
### Web
- [**Webhook**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/webhook.md)
- [**Node-RED**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/nodered.md)
- [**WebUI (Falcosidekick UI)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/falcosidekick-ui.md)
### SIEM
- [**AWS Security Lake**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_security_lake.md)
### Workflow
- [**n8n**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/n8n.md)
### Other
- [**Policy Report**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/policy_report.md)
## Adding `falcosecurity` repository
Prior to install the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
### Install Falco + Falcosidekick + Falcosidekick-ui
To install the chart with the release name `falcosidekick` run:
```bash
helm install falcosidekick falcosecurity/falcosidekick --set webui.enabled=true
```
### With Helm chart of Falco
`Falco`, `Falcosidekick` and `Falcosidekick-ui` can be installed together in one command. All values to configure `Falcosidekick` will have to be
prefixed with `falcosidekick.`.
```bash
helm install falco falcosecurity/falco --set falcosidekick.enabled=true --set falcosidekick.webui.enabled=true
```
After a few seconds, Falcosidekick should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
## Minimum Kubernetes version
The minimum Kubernetes version required is 1.17.x
## Uninstalling the Chart
To uninstall the `falcosidekick` deployment:
```bash
helm uninstall falcosidekick
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the Falcosidekick chart and their default values. See `values.yaml` for full list.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity for the Sidekick pods |
| config.alertmanager.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.alertmanager.customseveritymap | string | `""` | comma separated list of tuple composed of a ':' separated Falco priority and Alertmanager severity that is used to override the severity label associated to the priority level of falco event. Example: debug:value_1,critical:value2. Default mapping: emergency:critical,alert:critical,critical:critical,error:warning,warning:warning,notice:information,informational:information,debug:information. |
| config.alertmanager.dropeventdefaultpriority | string | `"critical"` | default priority of dropped events, values are emergency|alert|critical|error|warning|notice|informational|debug |
| config.alertmanager.dropeventthresholds | string | `"10000:critical, 1000:critical, 100:critical, 10:warning, 1:warning"` | comma separated list of priority re-evaluation thresholds of dropped events composed of a ':' separated integer threshold and string priority. Example: `10000:critical, 100:warning, 1:informational` |
| config.alertmanager.endpoint | string | `"/api/v1/alerts"` | alertmanager endpoint on which falcosidekick posts alerts, choice is: `"/api/v1/alerts" or "/api/v2/alerts" , default is "/api/v1/alerts"` |
| config.alertmanager.expireafter | string | `""` | if set to a non-zero value, alert expires after that time in seconds (default: 0) |
| config.alertmanager.extraannotations | string | `""` | comma separated list of annotations composed of a ':' separated name and value that is added to the Alerts. Example: my_annotation_1:my_value_1, my_annotation_1:my_value_2 |
| config.alertmanager.extralabels | string | `""` | comma separated list of labels composed of a ':' separated name and value that is added to the Alerts. Example: my_label_1:my_value_1, my_label_1:my_value_2 |
| config.alertmanager.hostport | string | `""` | Comma separated list of http://{domain or ip}:{port} that will all receive the payload, if not empty, Alertmanager output is enabled |
| config.alertmanager.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.alertmanager.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.aws.accesskeyid | string | `""` | AWS Access Key Id (optionnal if you use EC2 Instance Profile) |
| config.aws.checkidentity | bool | `true` | check the identity credentials, set to false for locale developments |
| config.aws.cloudwatchlogs.loggroup | string | `""` | AWS CloudWatch Logs Group name, if not empty, CloudWatch Logs output is *enabled* |
| config.aws.cloudwatchlogs.logstream | string | `""` | AWS CloudWatch Logs Stream name, if empty, Falcosidekick will try to create a log stream |
| config.aws.cloudwatchlogs.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.externalid | string | `""` | External id for the role to assume (optional if you use EC2 Instance Profile) |
| config.aws.kinesis.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.kinesis.streamname | string | `""` | AWS Kinesis Stream Name, if not empty, Kinesis output is *enabled* |
| config.aws.lambda.functionname | string | `""` | AWS Lambda Function Name, if not empty, AWS Lambda output is *enabled* |
| config.aws.lambda.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.region | string | `""` | AWS Region (optionnal if you use EC2 Instance Profile) |
| config.aws.rolearn | string | `""` | AWS IAM role ARN for falcosidekick service account to associate with (optionnal if you use EC2 Instance Profile) |
| config.aws.s3.bucket | string | `""` | AWS S3, bucket name |
| config.aws.s3.endpoint | string | `""` | Endpoint URL that overrides the default generated endpoint, use this for S3 compatible APIs |
| config.aws.s3.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.s3.objectcannedacl | string | `"bucket-owner-full-control"` | Canned ACL (x-amz-acl) to use when creating the object |
| config.aws.s3.prefix | string | `""` | AWS S3, name of prefix, keys will have format: s3://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json |
| config.aws.secretaccesskey | string | `""` | AWS Secret Access Key (optionnal if you use EC2 Instance Profile) |
| config.aws.securitylake.accountid | string | `""` | Account ID |
| config.aws.securitylake.batchsize | int | `1000` | Max number of events by parquet file |
| config.aws.securitylake.bucket | string | `""` | Bucket for AWS SecurityLake data, if not empty, AWS SecurityLake output is enabled |
| config.aws.securitylake.interval | int | `5` | Time in minutes between two puts to S3 (must be between 5 and 60min) |
| config.aws.securitylake.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.securitylake.prefix | string | `""` | Prefix for keys |
| config.aws.securitylake.region | string | `""` | Bucket Region |
| config.aws.sns.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.sns.rawjson | bool | `false` | Send RawJSON from `falco` or parse it to AWS SNS |
| config.aws.sns.topicarn | string | `""` | AWS SNS TopicARN, if not empty, AWS SNS output is *enabled* |
| config.aws.sqs.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.aws.sqs.url | string | `""` | AWS SQS Queue URL, if not empty, AWS SQS output is *enabled* |
| config.aws.useirsa | bool | `true` | Use IRSA, if true, the rolearn value will be used to set the ServiceAccount annotations and not the env var |
| config.azure.eventHub.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.azure.eventHub.name | string | `""` | Name of the Hub, if not empty, EventHub is *enabled* |
| config.azure.eventHub.namespace | string | `""` | Name of the space the Hub is in |
| config.azure.podIdentityClientID | string | `""` | Azure Identity Client ID |
| config.azure.podIdentityName | string | `""` | Azure Identity name |
| config.azure.resourceGroupName | string | `""` | Azure Resource Group name |
| config.azure.subscriptionID | string | `""` | Azure Subscription ID |
| config.azure.workloadIdentityClientID | string | `""` | Azure Workload Identity Client ID |
| config.bracketreplacer | string | `""` | if not empty, the brackets in keys of Output Fields are replaced |
| config.cliq.icon | string | `""` | Cliq icon (avatar) |
| config.cliq.messageformat | string | `""` | a Go template to format Google Chat Text above Attachment, displayed in addition to the output from `cliq.outputformat`. If empty, no Text is displayed before sections. |
| config.cliq.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.cliq.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Cliq), `fields` (only fields are displayed in Cliq) |
| config.cliq.useemoji | bool | `true` | Prefix message text with an emoji |
| config.cliq.webhookurl | string | `""` | Zoho Cliq Channel URL (ex: <https://cliq.zoho.eu/api/v2/channelsbyname/XXXX/message?zapikey=YYYY>), if not empty, Cliq Chat output is *enabled* |
| config.cloudevents.address | string | `""` | CloudEvents consumer http address, if not empty, CloudEvents output is *enabled* |
| config.cloudevents.extension | string | `""` | Extensions to add in the outbound Event, useful for routing |
| config.cloudevents.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.customfields | string | `""` | a list of escaped comma separated custom fields to add to falco events, syntax is "key:value\,key:value" |
| config.customtags | string | `""` | a list of escaped comma separated custom tags to add to falco events, syntax is "tag\,tag" |
| config.datadog.apikey | string | `""` | Datadog API Key, if not `empty`, Datadog output is *enabled* |
| config.datadog.host | string | `""` | Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "<https://api.datadoghq.com>" |
| config.datadog.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.datadoglogs.apikey | string | `""` | Datadog API Key, if not empty, Datadog Logs output is enabled |
| config.datadoglogs.host | string | `""` | Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://http-intake.logs.datadoghq.com/" |
| config.datadoglogs.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default) |
| config.datadoglogs.service | string | `""` | The name of the application or service generating the log events. |
| config.debug | bool | `false` | DEBUG environment variable |
| config.discord.icon | string | `""` | Discord icon (avatar) |
| config.discord.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.discord.webhookurl | string | `""` | Discord WebhookURL (ex: <https://discord.com/api/webhooks/xxxxxxxxxx>...), if not empty, Discord output is *enabled* |
| config.dogstatsd.forwarder | string | `""` | The address for the DogStatsD forwarder, in the form <http://host:port>, if not empty DogStatsD is *enabled* |
| config.dogstatsd.namespace | string | `"falcosidekick."` | A prefix for all metrics |
| config.dogstatsd.tags | string | `""` | A comma-separated list of tags to add to all metrics |
| config.dynatrace.apitoken | string | `""` | Dynatrace API token with the "logs.ingest" scope, more info : https://dt-url.net/8543sda, if not empty, Dynatrace output is enabled |
| config.dynatrace.apiurl | string | `""` | Dynatrace API url, use https://ENVIRONMENTID.live.dynatrace.com/api for Dynatrace SaaS and https://YOURDOMAIN/e/ENVIRONMENTID/api for Dynatrace Managed, more info : https://dt-url.net/ej43qge |
| config.dynatrace.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.dynatrace.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
| config.elasticsearch.apikey | string | `""` | Use this APIKey to authenticate to Elasticsearch if the APIKey is not empty (default: "") |
| config.elasticsearch.batching | object | `{"batchsize":"5242880","enabled":true,"flushinterval":"1s"}` | batching configuration, improves throughput dramatically utilizing _bulk Elasticsearch API |
| config.elasticsearch.batching.batchsize | string | `"5242880"` | batch size in bytes (default: 5 MB) (use string to avoid the conversion into float64 by helm) |
| config.elasticsearch.batching.enabled | bool | `true` | if true enables batching |
| config.elasticsearch.batching.flushinterval | string | `"1s"` | batch fush interval (default: 1s) |
| config.elasticsearch.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.elasticsearch.createindextemplate | bool | `false` | Create an index template (default: false) |
| config.elasticsearch.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.elasticsearch.enablecompression | bool | `false` | if true enables gzip compression for http requests (default: false) |
| config.elasticsearch.flattenfields | bool | `false` | Replace . by _ to avoid mapping conflicts, force to true if createindextemplate==true (default: false) |
| config.elasticsearch.hostport | string | `""` | Elasticsearch <http://host:port>, if not `empty`, Elasticsearch is *enabled* |
| config.elasticsearch.index | string | `"falco"` | Elasticsearch index |
| config.elasticsearch.maxconcurrentrequests | int | `1` | max number of concurrent http requests (default: 1) |
| config.elasticsearch.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.elasticsearch.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.elasticsearch.numberofreplicas | int | `3` | Number of replicas set by the index template (default: 3) |
| config.elasticsearch.numberofshards | int | `3` | Number of shards set by the index template (default: 3) |
| config.elasticsearch.password | string | `""` | Use this password to authenticate to Elasticsearch if the password is not empty |
| config.elasticsearch.pipeline | string | `""` | Optional ingest pipeline name |
| config.elasticsearch.suffix | string | `"daily"` | Date suffix for index rotation : daily, monthly, annually, none |
| config.elasticsearch.type | string | `"_doc"` | Elasticsearch document type |
| config.elasticsearch.username | string | `""` | Use this username to authenticate to Elasticsearch if the username is not empty |
| config.existingSecret | string | `""` | Existing secret with configuration |
| config.extraArgs | list | `[]` | Extra command-line arguments |
| config.extraEnv | list | `[]` | Extra environment variables |
| config.fission.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.fission.function | string | `""` | Name of Fission function, if not empty, Fission is enabled |
| config.fission.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.fission.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.fission.routernamespace | string | `"fission"` | Namespace of Fission Router, "fission" (default) |
| config.fission.routerport | int | `80` | Port of service of Fission Router |
| config.fission.routerservice | string | `"router"` | Service of Fission Router, "router" (default) |
| config.gcp.cloudfunctions.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.gcp.cloudfunctions.name | string | `""` | The name of the Cloud Function which is in form `projects/<project_id>/locations/<region>/functions/<function_name>` |
| config.gcp.cloudrun.endpoint | string | `""` | the URL of the Cloud Run function |
| config.gcp.cloudrun.jwt | string | `""` | JWT for the private access to Cloud Run function |
| config.gcp.cloudrun.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.gcp.credentials | string | `""` | Base64 encoded JSON key file for the GCP service account |
| config.gcp.pubsub.customattributes | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.gcp.pubsub.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.gcp.pubsub.projectid | string | `""` | The GCP Project ID containing the Pub/Sub Topic |
| config.gcp.pubsub.topic | string | `""` | Name of the Pub/Sub topic |
| config.gcp.storage.bucket | string | `""` | The name of the bucket |
| config.gcp.storage.minimumpriority | string | `"debug"` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.gcp.storage.prefix | string | `""` | Name of prefix, keys will have format: gs://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json |
| config.googlechat.messageformat | string | `""` | a Go template to format Google Chat Text above Attachment, displayed in addition to the output from `config.googlechat.outputformat`. If empty, no Text is displayed before Attachment |
| config.googlechat.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.googlechat.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Google chat) |
| config.googlechat.webhookurl | string | `""` | Google Chat Webhook URL (ex: <https://chat.googleapis.com/v1/spaces/XXXXXX/YYYYYY>), if not `empty`, Google Chat output is *enabled* |
| config.gotify.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.gotify.format | string | `"markdown"` | Format of the messages (plaintext, markdown, json) |
| config.gotify.hostport | string | `""` | http://{domain or ip}:{port}, if not empty, Gotify output is enabled |
| config.gotify.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.gotify.token | string | `""` | API Token |
| config.grafana.allfieldsastags | bool | `false` | if true, all custom fields are added as tags (default: false) |
| config.grafana.apikey | string | `""` | API Key to authenticate to Grafana, if not empty, Grafana output is *enabled* |
| config.grafana.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.grafana.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.grafana.dashboardid | string | `""` | annotations are scoped to a specific dashboard. Optionnal. |
| config.grafana.hostport | string | `""` | <http://{domain> or ip}:{port}, if not empty, Grafana output is *enabled* |
| config.grafana.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.grafana.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.grafana.panelid | string | `""` | annotations are scoped to a specific panel. Optionnal. |
| config.grafanaoncall.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.grafanaoncall.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.grafanaoncall.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.grafanaoncall.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.grafanaoncall.webhookurl | string | `""` | if not empty, Grafana OnCall output is enabled |
| config.influxdb.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.influxdb.database | string | `"falco"` | Influxdb database |
| config.influxdb.hostport | string | `""` | Influxdb <http://host:port>, if not `empty`, Influxdb is *enabled* |
| config.influxdb.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.influxdb.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.influxdb.organization | string | `""` | Influxdb organization |
| config.influxdb.password | string | `""` | Password to use if auth is *enabled* in Influxdb |
| config.influxdb.precision | string | `"ns"` | write precision |
| config.influxdb.token | string | `""` | API token to use if auth in enabled in Influxdb (disables user and password) |
| config.influxdb.user | string | `""` | User to use if auth is *enabled* in Influxdb |
| config.kafka.async | bool | `false` | produce messages without blocking |
| config.kafka.balancer | string | `"round_robin"` | partition balancing strategy when producing |
| config.kafka.clientid | string | `""` | specify a client.id when communicating with the broker for tracing |
| config.kafka.compression | string | `"NONE"` | enable message compression using this algorithm, no compression (GZIP|SNAPPY|LZ4|ZSTD|NONE) |
| config.kafka.hostport | string | `""` | comma separated list of Apache Kafka bootstrap nodes for establishing the initial connection to the cluster (ex: localhost:9092,localhost:9093). Defaults to port 9092 if no port is specified after the domain, if not empty, Kafka output is *enabled* |
| config.kafka.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.kafka.password | string | `""` | use this password to authenticate to Kafka via SASL |
| config.kafka.requiredacks | string | `"NONE"` | number of acknowledges from partition replicas required before receiving |
| config.kafka.sasl | string | `""` | SASL authentication mechanism, if empty, no authentication (PLAIN|SCRAM_SHA256|SCRAM_SHA512) |
| config.kafka.tls | bool | `false` | Use TLS for the connections |
| config.kafka.topic | string | `""` | Name of the topic, if not empty, Kafka output is enabled |
| config.kafka.topiccreation | bool | `false` | auto create the topic if it doesn't exist |
| config.kafka.username | string | `""` | use this username to authenticate to Kafka via SASL |
| config.kafkarest.address | string | `""` | The full URL to the topic (example "http://kafkarest:8082/topics/test") |
| config.kafkarest.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.kafkarest.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.kafkarest.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.kafkarest.version | int | `2` | Kafka Rest Proxy API version 2|1 (default: 2) |
| config.kubeless.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.kubeless.function | string | `""` | Name of Kubeless function, if not empty, EventHub is *enabled* |
| config.kubeless.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.kubeless.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.kubeless.namespace | string | `""` | Namespace of Kubeless function (mandatory) |
| config.kubeless.port | int | `8080` | Port of service of Kubeless function. Default is `8080` |
| config.loki.apikey | string | `""` | API Key for Grafana Logs |
| config.loki.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.loki.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.loki.endpoint | string | `"/loki/api/v1/push"` | Loki endpoint URL path, more info: <https://grafana.com/docs/loki/latest/api/#post-apiprompush> |
| config.loki.extralabels | string | `""` | comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields |
| config.loki.format | string | `"text"` | Format for the log entry value: json, text (default) |
| config.loki.grafanaDashboard | object | `{"configMap":{"folder":"","name":"falcosidekick-loki-dashboard-grafana","namespace":""},"enabled":true}` | dashboard for Grafana |
| config.loki.grafanaDashboard.configMap | object | `{"folder":"","name":"falcosidekick-loki-dashboard-grafana","namespace":""}` | configmaps to be deployed that contain a grafana dashboard. |
| config.loki.grafanaDashboard.configMap.folder | string | `""` | folder where the dashboard is stored by grafana. |
| config.loki.grafanaDashboard.configMap.name | string | `"falcosidekick-loki-dashboard-grafana"` | name specifies the name for the configmap. |
| config.loki.grafanaDashboard.configMap.namespace | string | `""` | namespace specifies the namespace for the configmap. |
| config.loki.grafanaDashboard.enabled | bool | `true` | enabled specifies whether this dashboard should be deployed. |
| config.loki.hostport | string | `""` | Loki <http://host:port>, if not `empty`, Loki is *enabled* |
| config.loki.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.loki.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.loki.tenant | string | `""` | Loki tenant, if not `empty`, Loki tenant is *enabled* |
| config.loki.user | string | `""` | user for Grafana Logs |
| config.mattermost.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.mattermost.footer | string | `""` | Mattermost Footer |
| config.mattermost.icon | string | `""` | Mattermost icon (avatar) |
| config.mattermost.messageformat | string | `""` | a Go template to format Mattermost Text above Attachment, displayed in addition to the output from `slack.outputformat`. If empty, no Text is displayed before Attachment |
| config.mattermost.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.mattermost.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.mattermost.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Slack), `fields` (only fields are displayed in Mattermost) |
| config.mattermost.username | string | `""` | Mattermost username |
| config.mattermost.webhookurl | string | `""` | Mattermost Webhook URL (ex: <https://XXXX/hooks/YYYY>), if not `empty`, Mattermost output is *enabled* |
| config.mqtt.broker | string | `""` | Broker address, can start with tcp:// or ssl://, if not empty, MQTT output is enabled |
| config.mqtt.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.mqtt.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.mqtt.password | string | `""` | Password if the authentication is enabled in the broker |
| config.mqtt.qos | int | `0` | QOS for messages |
| config.mqtt.retained | bool | `false` | If true, messages are retained |
| config.mqtt.topic | string | `"falco/events"` | Topic for messages |
| config.mqtt.user | string | `""` | User if the authentication is enabled in the broker |
| config.mutualtlsclient.cacertfile | string | `""` | CA certification file for server certification for mutual TLS authentication, takes priority over mutualtlsfilespath if not empty |
| config.mutualtlsclient.certfile | string | `""` | client certification file for mutual TLS client certification, takes priority over mutualtlsfilespath if not empty |
| config.mutualtlsclient.keyfile | string | `""` | client key file for mutual TLS client certification, takes priority over mutualtlsfilespath if not empty |
| config.mutualtlsfilespath | string | `"/etc/certs"` | folder which will used to store client.crt, client.key and ca.crt files for mutual tls for outputs, will be deprecated in the future (default: "/etc/certs") |
| config.n8n.address | string | `""` | N8N address, if not empty, N8N output is enabled |
| config.n8n.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.n8n.headerauthname | string | `""` | Header Auth Key to authenticate with N8N |
| config.n8n.headerauthvalue | string | `""` | Header Auth Value to authenticate with N8N |
| config.n8n.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
| config.n8n.password | string | `""` | Password to authenticate with N8N in basic auth |
| config.n8n.user | string | `""` | Username to authenticate with N8N in basic auth |
| config.nats.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.nats.hostport | string | `""` | NATS "nats://host:port", if not `empty`, NATS is *enabled* |
| config.nats.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.nats.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.nats.subjecttemplate | string | `"falco.<priority>.<rule>"` | template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>) |
| config.nodered.address | string | `""` | Node-RED address, if not empty, Node-RED output is enabled |
| config.nodered.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.nodered.customheaders | string | `""` | Custom headers to add in POST, useful for Authentication, syntax is "key:value\,key:value" |
| config.nodered.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.nodered.password | string | `""` | Password if Basic Auth is enabled for 'http in' node in Node-RED |
| config.nodered.user | string | `""` | User if Basic Auth is enabled for 'http in' node in Node-RED |
| config.openfaas.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.openfaas.functionname | string | `""` | Name of OpenFaaS function, if not empty, OpenFaaS is *enabled* |
| config.openfaas.functionnamespace | string | `"openfaas-fn"` | Namespace of OpenFaaS function, "openfaas-fn" (default) |
| config.openfaas.gatewaynamespace | string | `"openfaas"` | Namespace of OpenFaaS Gateway, "openfaas" (default) |
| config.openfaas.gatewayport | int | `8080` | Port of service of OpenFaaS Gateway Default is `8080` |
| config.openfaas.gatewayservice | string | `"gateway"` | Service of OpenFaaS Gateway, "gateway" (default) |
| config.openfaas.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.openfaas.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.openobserve.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.openobserve.customheaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.openobserve.hostport | string | `""` | http://{domain or ip}:{port}, if not empty, OpenObserve output is enabled |
| config.openobserve.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
| config.openobserve.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.openobserve.organizationname | string | `"default"` | Organization name |
| config.openobserve.password | string | `""` | use this password to authenticate to OpenObserve if the password is not empty |
| config.openobserve.streamname | string | `"falco"` | Stream name |
| config.openobserve.username | string | `""` | use this username to authenticate to OpenObserve if the username is not empty |
| config.opsgenie.apikey | string | `""` | Opsgenie API Key, if not empty, Opsgenie output is *enabled* |
| config.opsgenie.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.opsgenie.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.opsgenie.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.opsgenie.region | `us` or `eu` | `""` | region of your domain |
| config.otlp.metrics.checkcert | bool | `true` | Set to false if you want to skip TLS certificate validation (only with https) (default: true) |
| config.otlp.metrics.endpoint | string | `""` | OTLP endpoint, typically in the form http{s}://{domain or ip}:4318/v1/metrics |
| config.otlp.metrics.extraattributes | string | `""` | Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields |
| config.otlp.metrics.extraenvvars | list | `[]` | Extra env vars (override the other settings) (default: "") |
| config.otlp.metrics.headers | string | `""` | List of headers to apply to all outgoing metrics in the form of "some-key=some-value,other-key=other-value" (default: "") |
| config.otlp.metrics.minimumpriority | string | `""` | Minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "") |
| config.otlp.metrics.protocol | string | `"grpc"` | OTLP transport protocol to be used for metrics data; it can be "grpc" or "http/protobuf" (default: "grpc") |
| config.otlp.metrics.timeout | int | `1000` | OTLP timeout for outgoing metrics in milliseconds (default: "" which uses SDK default: 10000) |
| config.otlp.traces.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.otlp.traces.duration | int | `1000` | Artificial span duration in milliseconds (default: 1000) |
| config.otlp.traces.endpoint | string | `""` | OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces, if not empty, OTLP Traces output is enabled |
| config.otlp.traces.extraenvvars | object | `{}` | Extra env vars (override the other settings) |
| config.otlp.traces.headers | string | `""` | OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "") |
| config.otlp.traces.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
| config.otlp.traces.protocol | string | `""` | OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json) |
| config.otlp.traces.synced | bool | `false` | Set to true if you want traces to be sent synchronously (default: false) |
| config.otlp.traces.timeout | int | `1000` | OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000) |
| config.outputFieldFormat | string | `""` | |
| config.pagerduty.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.pagerduty.region | string | `"us"` | Pagerduty Region, can be 'us' or 'eu' |
| config.pagerduty.routingkey | string | `""` | Pagerduty Routing Key, if not empty, Pagerduty output is *enabled* |
| config.policyreport.enabled | bool | `false` | if true; policyreport output is *enabled* |
| config.policyreport.kubeconfig | string | `"~/.kube/config"` | Kubeconfig file to use (only if falcosidekick is running outside the cluster) |
| config.policyreport.maxevents | int | `1000` | the max number of events that can be in a policyreport |
| config.policyreport.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.policyreport.prunebypriority | bool | `false` | if true; the events with lowest severity are pruned first, in FIFO order |
| config.prometheus.extralabels | string | `""` | comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields |
| config.quickwit.apiendpoint | string | `"/api/v1"` | API endpoint (containing the API version, overideable in case of quickwit behind a reverse proxy with URL rewriting) |
| config.quickwit.autocreateindex | bool | `false` | Autocreate a falco index mapping if it doesn't exists |
| config.quickwit.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.quickwit.customHeaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value,key:value" |
| config.quickwit.hostport | string | `""` | http://{domain or ip}:{port}, if not empty, Quickwit output is enabled |
| config.quickwit.index | string | `"falco"` | Index |
| config.quickwit.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.quickwit.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.quickwit.version | string | `"0.7"` | Version of quickwi |
| config.rabbitmq.minimumpriority | string | `"debug"` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.rabbitmq.queue | string | `""` | Rabbitmq Queue name |
| config.rabbitmq.url | string | `""` | Rabbitmq URL, if not empty, Rabbitmq output is *enabled* |
| config.redis.address | string | `""` | Redis address, if not empty, Redis output is enabled |
| config.redis.database | int | `0` | Redis database number |
| config.redis.key | string | `"falco"` | Redis storage key name for hashmap, list |
| config.redis.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
| config.redis.password | string | `""` | Password to authenticate with Redis |
| config.redis.storagetype | string | `"list"` | Redis storage type: hashmap or list |
| config.rocketchat.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.rocketchat.icon | string | `""` | Rocketchat icon (avatar) |
| config.rocketchat.messageformat | string | `""` | a Go template to format Rocketchat Text above Attachment, displayed in addition to the output from `slack.outputformat`. If empty, no Text is displayed before Attachment |
| config.rocketchat.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.rocketchat.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.rocketchat.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Rocketcaht), `fields` (only fields are displayed in Rocketchat) |
| config.rocketchat.username | string | `""` | Rocketchat username |
| config.rocketchat.webhookurl | string | `""` | Rocketchat Webhook URL (ex: <https://XXXX/hooks/YYYY>), if not `empty`, Rocketchat output is *enabled* |
| config.slack.channel | string | `""` | Slack channel (optionnal) |
| config.slack.footer | string | `""` | Slack Footer |
| config.slack.icon | string | `""` | Slack icon (avatar) |
| config.slack.messageformat | string | `""` | a Go template to format Slack Text above Attachment, displayed in addition to the output from `slack.outputformat`. If empty, no Text is displayed before Attachment |
| config.slack.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.slack.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Slack), `fields` (only fields are displayed in Slack) |
| config.slack.username | string | `""` | Slack username |
| config.slack.webhookurl | string | `""` | Slack Webhook URL (ex: <https://hooks.slack.com/services/XXXX/YYYY/ZZZZ>), if not `empty`, Slack output is *enabled* |
| config.smtp.authmechanism | string | `"plain"` | SASL Mechanisms : plain, oauthbearer, external, anonymous or "" (disable SASL) |
| config.smtp.from | string | `""` | Sender address (mandatory if SMTP output is *enabled*) |
| config.smtp.hostport | string | `""` | "host:port" address of SMTP server, if not empty, SMTP output is *enabled* |
| config.smtp.identity | string | `""` | identity string for Plain and External Mechanisms |
| config.smtp.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.smtp.outputformat | string | `"html"` | html, text |
| config.smtp.password | string | `""` | password to access SMTP server |
| config.smtp.tls | bool | `true` | use TLS connection (true/false) |
| config.smtp.to | string | `""` | comma-separated list of Recipident addresses, can't be empty (mandatory if SMTP output is *enabled*) |
| config.smtp.token | string | `""` | OAuthBearer token for OAuthBearer Mechanism |
| config.smtp.trace | string | `""` | trace string for Anonymous Mechanism |
| config.smtp.user | string | `""` | user to access SMTP server |
| config.spyderbat.apikey | string | `""` | Spyderbat API key with access to the organization |
| config.spyderbat.apiurl | string | `"https://api.spyderbat.com"` | Spyderbat API url |
| config.spyderbat.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.spyderbat.orguid | string | `""` | Organization to send output to, if not empty, Spyderbat output is enabled |
| config.spyderbat.source | string | `"falcosidekick"` | Spyderbat source ID, max 32 characters |
| config.spyderbat.sourcedescription | string | `""` | Spyderbat source description and display name if not empty, max 256 characters |
| config.stan.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.stan.clientid | string | `""` | Client ID, if not empty, STAN output is *enabled* |
| config.stan.clusterid | string | `""` | Cluster name, if not empty, STAN output is *enabled* |
| config.stan.hostport | string | `""` | Stan nats://{domain or ip}:{port}, if not empty, STAN output is *enabled* |
| config.stan.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.stan.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.stan.subjecttemplate | string | `"falco.<priority>.<rule>"` | template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>) |
| config.statsd.forwarder | string | `""` | The address for the StatsD forwarder, in the form <http://host:port>, if not empty StatsD is *enabled* |
| config.statsd.namespace | string | `"falcosidekick."` | A prefix for all metrics |
| config.sumologic.checkcert | bool | `true` | check if ssl certificate of the output is valid (default: true) |
| config.sumologic.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default) |
| config.sumologic.name | string | `""` | Override the default Sumologic Source Name |
| config.sumologic.receiverURL | string | `""` | Sumologic HTTP Source URL, if not empty, Sumologic output is enabled |
| config.sumologic.sourceCategory | string | `""` | Override the default Sumologic Source Category |
| config.sumologic.sourceHost | string | `""` | Override the default Sumologic Source Host |
| config.syslog.format | string | `"json"` | Syslog payload format. It can be either "json" or "cef" |
| config.syslog.host | string | `""` | Syslog Host, if not empty, Syslog output is *enabled* |
| config.syslog.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.syslog.port | string | `""` | Syslog endpoint port number |
| config.syslog.protocol | string | `"tcp"` | Syslog transport protocol. It can be either "tcp" or "udp" |
| config.talon.address | string | `""` | Talon address, if not empty, Talon output is enabled |
| config.talon.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.talon.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.teams.activityimage | string | `""` | Teams section image |
| config.teams.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.teams.outputformat | string | `"all"` | `all` (default), `text` (only text is displayed in Teams), `facts` (only facts are displayed in Teams) |
| config.teams.webhookurl | string | `""` | Teams Webhook URL (ex: <https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY>"), if not `empty`, Teams output is *enabled* |
| config.tekton.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.tekton.eventlistener | string | `""` | EventListener address, if not empty, Tekton output is enabled |
| config.tekton.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.telegram.chatid | string | `""` | telegram Identifier of the shared chat |
| config.telegram.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.telegram.messagethreadid | string | `""` | Telegram individual chats within the group |
| config.telegram.minimumpriority | string | `""` | minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" |
| config.telegram.token | string | `""` | telegram bot authentication token |
| config.templatedfields | string | `""` | a list of escaped comma separated Go templated fields to add to falco events, syntax is "key:template\,key:template" |
| config.timescaledb.database | string | `""` | TimescaleDB database used |
| config.timescaledb.host | string | `""` | TimescaleDB host, if not empty, TImescaleDB output is enabled |
| config.timescaledb.hypertablename | string | `"falco_events"` | Hypertable to store data events (default: falco_events) See TimescaleDB setup for more info |
| config.timescaledb.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.timescaledb.password | string | `"postgres"` | Password to authenticate with TimescaleDB |
| config.timescaledb.port | int | `5432` | TimescaleDB port (default: 5432) |
| config.timescaledb.user | string | `"postgres"` | Username to authenticate with TimescaleDB |
| config.tlsclient.cacertfile | string | `""` | CA certificate file for server certification on TLS connections, appended to the system CA pool if not empty |
| config.tlsserver.cacertfile | string | `"/etc/certs/server/ca.crt"` | CA certification file path for client certification if mutualtls is true |
| config.tlsserver.cacrt | string | `""` | |
| config.tlsserver.certfile | string | `"/etc/certs/server/server.crt"` | server certification file path for TLS Server |
| config.tlsserver.deploy | bool | `false` | if true TLS server will be deployed instead of HTTP |
| config.tlsserver.existingSecret | string | `""` | existing secret with server.crt, server.key and ca.crt files for TLS Server |
| config.tlsserver.keyfile | string | `"/etc/certs/server/server.key"` | server key file path for TLS Server |
| config.tlsserver.mutualtls | bool | `false` | if true mutual TLS server will be deployed instead of TLS, deploy also has to be true |
| config.tlsserver.notlspaths | string | `"/ping"` | a comma separated list of endpoints, if not empty, and tlsserver.deploy is true, a separate http server will be deployed for the specified endpoints (/ping endpoint needs to be notls for Kubernetes to be able to perform the healthchecks) |
| config.tlsserver.notlsport | int | `2810` | port to serve http server serving selected endpoints |
| config.tlsserver.servercrt | string | `""` | server.crt file for TLS Server |
| config.tlsserver.serverkey | string | `""` | server.key file for TLS Server |
| config.wavefront.batchsize | int | `10000` | Wavefront batch size. If empty uses the default 10000. Only used when endpointtype is 'direct' |
| config.wavefront.endpointhost | string | `""` | Wavefront endpoint address (only the host). If not empty, with endpointhost, Wavefront output is *enabled* |
| config.wavefront.endpointmetricport | int | `2878` | Port to send metrics. Only used when endpointtype is 'proxy' |
| config.wavefront.endpointtoken | string | `""` | Wavefront token. Must be used only when endpointtype is 'direct' |
| config.wavefront.endpointtype | string | `""` | Wavefront endpoint type, must be 'direct' or 'proxy'. If not empty, with endpointhost, Wavefront output is *enabled* |
| config.wavefront.flushintervalseconds | int | `1` | Wavefront flush interval in seconds. Defaults to 1 |
| config.wavefront.metricname | string | `"falco.alert"` | Metric to be created in Wavefront. Defaults to falco.alert |
| config.wavefront.minimumpriority | string | `"debug"` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.webex.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.webex.webhookurl | string | `""` | Webex WebhookURL, if not empty, Webex output is enabled |
| config.webhook.address | string | `""` | Webhook address, if not empty, Webhook output is *enabled* |
| config.webhook.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.webhook.customHeaders | string | `""` | a list of comma separated custom headers to add, syntax is "key:value\,key:value" |
| config.webhook.method | string | `"POST"` | HTTP method: POST or PUT |
| config.webhook.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.webhook.mutualtls | bool | `false` | if true, checkcert flag will be ignored (server cert will always be checked) |
| config.yandex.accesskeyid | string | `""` | yandex access key |
| config.yandex.datastreams.endpoint | string | `""` | yandex data streams endpoint (default: https://yds.serverless.yandexcloud.net) |
| config.yandex.datastreams.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.yandex.datastreams.streamname | string | `""` | stream name in format /${region}/${folder_id}/${ydb_id}/${stream_name} |
| config.yandex.region | string | `""` | yandex storage region (default: ru-central-1) |
| config.yandex.s3.bucket | string | `""` | Yandex storage, bucket name |
| config.yandex.s3.endpoint | string | `""` | yandex storage endpoint (default: https://storage.yandexcloud.net) |
| config.yandex.s3.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.yandex.s3.prefix | string | `""` | name of prefix, keys will have format: s3://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json |
| config.yandex.secretaccesskey | string | `""` | yandex secret access key |
| config.zincsearch.checkcert | bool | `true` | check if ssl certificate of the output is valid |
| config.zincsearch.hostport | string | `""` | http://{domain or ip}:{port}, if not empty, ZincSearch output is enabled |
| config.zincsearch.index | string | `"falco"` | index |
| config.zincsearch.minimumpriority | string | `""` | minimum priority of event to use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` |
| config.zincsearch.password | string | `""` | use this password to authenticate to ZincSearch |
| config.zincsearch.username | string | `""` | use this username to authenticate to ZincSearch |
| customAnnotations | object | `{}` | custom annotations to add to all resources |
| customLabels | object | `{}` | custom labels to add to all resources |
| extraVolumeMounts | list | `[]` | Extra volume mounts for sidekick deployment |
| extraVolumes | list | `[]` | Extra volumes for sidekick deployment |
| fullnameOverride | string | `""` | Override the name |
| grafana | object | `{"dashboards":{"configMaps":{"falcosidekick":{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}},"enabled":false}}` | grafana contains the configuration related to grafana. |
| grafana.dashboards | object | `{"configMaps":{"falcosidekick":{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}},"enabled":false}` | dashboards contains configuration for grafana dashboards. |
| grafana.dashboards.configMaps | object | `{"falcosidekick":{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}}` | configmaps to be deployed that contain a grafana dashboard. |
| grafana.dashboards.configMaps.falcosidekick | object | `{"folder":"","name":"falcosidekick-grafana-dashboard","namespace":""}` | falcosidekick contains the configuration for falcosidekick's dashboard. |
| grafana.dashboards.configMaps.falcosidekick.folder | string | `""` | folder where the dashboard is stored by grafana. |
| grafana.dashboards.configMaps.falcosidekick.name | string | `"falcosidekick-grafana-dashboard"` | name specifies the name for the configmap. |
| grafana.dashboards.configMaps.falcosidekick.namespace | string | `""` | namespace specifies the namespace for the configmap. |
| grafana.dashboards.enabled | bool | `false` | enabled specifies whether the dashboards should be deployed. |
| image | object | `{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"falcosecurity/falcosidekick","tag":"2.31.1"}` | number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) revisionHistoryLimit: 1 |
| image.pullPolicy | string | `"IfNotPresent"` | The image pull policy |
| image.registry | string | `"docker.io"` | The image registry to pull from |
| image.repository | string | `"falcosecurity/falcosidekick"` | The image repository to pull from |
| image.tag | string | `"2.31.1"` | The image tag to pull |
| imagePullSecrets | list | `[]` | Secrets for the registry |
| ingress.annotations | object | `{}` | Ingress annotations |
| ingress.enabled | bool | `false` | Whether to create the ingress |
| ingress.hosts | list | `[{"host":"falcosidekick.local","paths":[{"path":"/"}]}]` | Ingress hosts |
| ingress.ingressClassName | string | `""` | ingress class name |
| ingress.tls | list | `[]` | Ingress TLS configuration |
| nameOverride | string | `""` | Override name |
| nodeSelector | object | `{}` | Sidekick nodeSelector field |
| podAnnotations | object | `{}` | additions annotations on the pods |
| podLabels | object | `{}` | additions labels on the pods |
| podSecurityContext | object | `{"fsGroup":1234,"runAsUser":1234}` | Sidekick pod securityContext |
| podSecurityPolicy | object | `{"create":false}` | podSecurityPolicy |
| podSecurityPolicy.create | bool | `false` | Whether to create a podSecurityPolicy |
| priorityClassName | string | `""` | Name of the priority class to be used by the Sidekickpods, priority class needs to be created beforehand |
| prometheusRules.alerts.additionalAlerts | object | `{}` | |
| prometheusRules.alerts.alert.enabled | bool | `true` | enable the high rate rule for the alert events |
| prometheusRules.alerts.alert.rate_interval | string | `"5m"` | rate interval for the high rate rule for the alert events |
| prometheusRules.alerts.alert.threshold | int | `0` | threshold for the high rate rule for the alert events |
| prometheusRules.alerts.critical.enabled | bool | `true` | enable the high rate rule for the critical events |
| prometheusRules.alerts.critical.rate_interval | string | `"5m"` | rate interval for the high rate rule for the critical events |
| prometheusRules.alerts.critical.threshold | int | `0` | threshold for the high rate rule for the critical events |
| prometheusRules.alerts.emergency.enabled | bool | `true` | enable the high rate rule for the emergency events |
| prometheusRules.alerts.emergency.rate_interval | string | `"5m"` | rate interval for the high rate rule for the emergency events |
| prometheusRules.alerts.emergency.threshold | int | `0` | threshold for the high rate rule for the emergency events |
| prometheusRules.alerts.error.enabled | bool | `true` | enable the high rate rule for the error events |
| prometheusRules.alerts.error.rate_interval | string | `"5m"` | rate interval for the high rate rule for the error events |
| prometheusRules.alerts.error.threshold | int | `0` | threshold for the high rate rule for the error events |
| prometheusRules.alerts.output.enabled | bool | `true` | enable the high rate rule for the errors with the outputs |
| prometheusRules.alerts.output.rate_interval | string | `"5m"` | rate interval for the high rate rule for the errors with the outputs |
| prometheusRules.alerts.output.threshold | int | `0` | threshold for the high rate rule for the errors with the outputs |
| prometheusRules.alerts.warning.enabled | bool | `true` | enable the high rate rule for the warning events |
| prometheusRules.alerts.warning.rate_interval | string | `"5m"` | rate interval for the high rate rule for the warning events |
| prometheusRules.alerts.warning.threshold | int | `0` | threshold for the high rate rule for the warning events |
| prometheusRules.enabled | bool | `false` | enable the creation of PrometheusRules for alerting |
| replicaCount | int | `2` | number of running pods |
| resources | object | `{}` | The resources for falcosdekick pods |
| securityContext | object | `{}` | Sidekick container securityContext |
| service.annotations | object | `{"prometheus.io/scrape":"true"}` | Service annotations |
| service.port | int | `2801` | Service port |
| service.type | string | `"ClusterIP"` | Service type |
| serviceMonitor.additionalLabels | object | `{}` | specify Additional labels to be added on the Service Monitor. |
| serviceMonitor.additionalProperties | object | `{}` | allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. |
| serviceMonitor.enabled | bool | `false` | enable the deployment of a Service Monitor for the Prometheus Operator. |
| serviceMonitor.interval | string | `""` | specify a user defined interval. When not specified Prometheus default interval is used. |
| serviceMonitor.scrapeTimeout | string | `""` | specify a user defined scrape timeout. When not specified Prometheus default scrape timeout is used. |
| testConnection.affinity | object | `{}` | Affinity for the test connection pod |
| testConnection.nodeSelector | object | `{}` | test connection nodeSelector field |
| testConnection.tolerations | list | `[]` | Tolerations for pod assignment |
| tolerations | list | `[]` | Tolerations for pod assignment |
| webui.affinity | object | `{}` | Affinity for the Web UI pods |
| webui.allowcors | bool | `false` | Allow CORS |
| webui.disableauth | bool | `false` | Disable the basic auth |
| webui.enabled | bool | `false` | enable Falcosidekick-UI |
| webui.existingSecret | string | `""` | Existing secret with configuration |
| webui.externalRedis.enabled | bool | `false` | Enable or disable the usage of an external Redis. Is mutually exclusive with webui.redis.enabled. |
| webui.externalRedis.password | string | `""` | Set the password of the external Redis |
| webui.externalRedis.port | int | `6379` | The port of the external Redis database with RediSearch > v2 |
| webui.externalRedis.url | string | `""` | The URL of the external Redis database with RediSearch > v2 |
| webui.image.pullPolicy | string | `"IfNotPresent"` | The web UI image pull policy |
| webui.image.registry | string | `"docker.io"` | The web UI image registry to pull from |
| webui.image.repository | string | `"falcosecurity/falcosidekick-ui"` | The web UI image repository to pull from |
| webui.image.tag | string | `"2.2.0"` | The web UI image tag to pull |
| webui.ingress.annotations | object | `{}` | Web UI ingress annotations |
| webui.ingress.enabled | bool | `false` | Whether to create the Web UI ingress |
| webui.ingress.hosts | list | `[{"host":"falcosidekick-ui.local","paths":[{"path":"/"}]}]` | Web UI ingress hosts configuration |
| webui.ingress.ingressClassName | string | `""` | ingress class name |
| webui.ingress.tls | list | `[]` | Web UI ingress TLS configuration |
| webui.initContainer | object | `{"image":{"registry":"docker.io","repository":"redis/redis-stack","tag":"7.2.0-v11"},"resources":{},"securityContext":{}}` | Web UI wait-redis initContainer |
| webui.initContainer.image.registry | string | `"docker.io"` | wait-redis initContainer image registry to pull from |
| webui.initContainer.image.repository | string | `"redis/redis-stack"` | wait-redis initContainer image repository to pull from |
| webui.initContainer.image.tag | string | `"7.2.0-v11"` | wait-redis initContainer image tag to pull |
| webui.initContainer.resources | object | `{}` | wait-redis initContainer resources |
| webui.initContainer.securityContext | object | `{}` | wait-redis initContainer securityContext |
| webui.loglevel | string | `"info"` | Log level ("debug", "info", "warning", "error") |
| webui.nodeSelector | object | `{}` | Web UI nodeSelector field |
| webui.podAnnotations | object | `{}` | additions annotations on the pods web UI |
| webui.podLabels | object | `{}` | additions labels on the pods web UI |
| webui.podSecurityContext | object | `{"fsGroup":1234,"runAsUser":1234}` | Web UI pod securityContext |
| webui.priorityClassName | string | `""` | Name of the priority class to be used by the Web UI pods, priority class needs to be created beforehand |
| webui.redis.affinity | object | `{}` | Affinity for the Web UI Redis pods |
| webui.redis.customAnnotations | object | `{}` | custom annotations to add to all resources |
| webui.redis.customConfig | object | `{}` | List of Custom config overrides for Redis |
| webui.redis.customLabels | object | `{}` | custom labels to add to all resources |
| webui.redis.enabled | bool | `true` | Is mutually exclusive with webui.externalRedis.enabled |
| webui.redis.existingSecret | string | `""` | Existing secret with configuration |
| webui.redis.image.pullPolicy | string | `"IfNotPresent"` | The web UI image pull policy |
| webui.redis.image.registry | string | `"docker.io"` | The web UI Redis image registry to pull from |
| webui.redis.image.repository | string | `"redis/redis-stack"` | The web UI Redis image repository to pull from |
| webui.redis.image.tag | string | `"7.2.0-v11"` | The web UI Redis image tag to pull from |
| webui.redis.nodeSelector | object | `{}` | Web UI Redis nodeSelector field |
| webui.redis.password | string | `""` | Set a password for Redis |
| webui.redis.podAnnotations | object | `{}` | additions annotations on the pods |
| webui.redis.podLabels | object | `{}` | additions labels on the pods |
| webui.redis.podSecurityContext | object | `{}` | Web UI Redis pod securityContext |
| webui.redis.priorityClassName | string | `""` | Name of the priority class to be used by the Web UI Redis pods, priority class needs to be created beforehand |
| webui.redis.resources | object | `{}` | The resources for the redis pod |
| webui.redis.securityContext | object | `{}` | Web UI Redis container securityContext |
| webui.redis.service.annotations | object | `{}` | The web UI Redis service annotations (use this to set a internal LB, for example.) |
| webui.redis.service.port | int | `6379` | The web UI Redis service port dor the falcosidekick-ui |
| webui.redis.service.targetPort | int | `6379` | The web UI Redis service targetPort |
| webui.redis.service.type | string | `"ClusterIP"` | The web UI Redis service type (i. e: LoadBalancer) |
| webui.redis.storageClass | string | `""` | Storage class of the PVC for the redis pod |
| webui.redis.storageEnabled | bool | `true` | Enable the PVC for the redis pod |
| webui.redis.storageSize | string | `"1Gi"` | Size of the PVC for the redis pod |
| webui.redis.tolerations | list | `[]` | Tolerations for pod assignment |
| webui.replicaCount | int | `2` | number of running pods |
| webui.resources | object | `{}` | The resources for the web UI pods |
| webui.securityContext | object | `{}` | Web UI container securityContext |
| webui.service.annotations | object | `{}` | The web UI service annotations (use this to set a internal LB, for example.) |
| webui.service.nodePort | int | `30282` | The web UI service nodePort |
| webui.service.port | int | `2802` | The web UI service port dor the falcosidekick-ui |
| webui.service.targetPort | int | `2802` | The web UI service targetPort |
| webui.service.type | string | `"ClusterIP"` | The web UI service type |
| webui.tolerations | list | `[]` | Tolerations for pod assignment |
| webui.ttl | int | `0` | TTL for keys, the syntax in X<unit>, with <unit>: s, m, d, w (0 for no ttl) |
| webui.user | string | `"admin:admin"` | User in format <login>:<password> |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
> **Tip**: You can use the default [values.yaml](values.yaml)
## Metrics
A `prometheus` endpoint can be scrapped at `/metrics`.
## Access Falcosidekick UI through an Ingress and a subpath
You may want to access the `WebUI (Falcosidekick UI)`](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/falcosidekick-ui.md) dashboard not from `/` but from `/subpath` and use an Ingress, here's an example of annotations to add to the Ingress for `nginx-ingress controller`:
```yaml
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
```

View File

@ -1,714 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 5,
"links": [],
"panels": [
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"editorMode": "builder",
"expr": "count by(priority) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Priority counts",
"type": "piechart"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 2,
"options": {
"displayLabels": [
"value",
"percent"
],
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true,
"values": []
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"editorMode": "builder",
"expr": "count by(rule) (rate({priority=~\".+\", rule!=\"Falco internal: metrics snapshot\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Rules counts",
"type": "piechart"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": "left",
"cellOptions": {
"type": "auto",
"wrapText": false
},
"filterable": true,
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Value #A"
},
"properties": [
{
"id": "displayName",
"value": "Number of Messages"
}
]
},
{
"matcher": {
"id": "byName",
"options": "Time"
},
"properties": [
{
"id": "custom.hidden",
"value": true
}
]
},
{
"matcher": {
"id": "byName",
"options": "k8s_ns"
},
"properties": [
{
"id": "custom.width",
"value": 96
}
]
},
{
"matcher": {
"id": "byName",
"options": "priority"
},
"properties": [
{
"id": "custom.width",
"value": 91
}
]
},
{
"matcher": {
"id": "byName",
"options": "rule"
},
"properties": [
{
"id": "custom.width",
"value": 450
}
]
},
{
"matcher": {
"id": "byName",
"options": "k8s_pod_name"
},
"properties": [
{
"id": "custom.width",
"value": 184
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 8
},
"id": 5,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"enablePagination": false,
"fields": "",
"reducer": [
"last"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": false,
"displayName": "k8s_pod_name"
}
]
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"editorMode": "builder",
"expr": "count by(k8s_pod_name, rule, priority, k8s_ns) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
"legendFormat": "",
"queryType": "instant",
"refId": "A"
}
],
"transformations": [
{
"id": "sortBy",
"options": {
"fields": {},
"sort": [
{
"desc": true,
"field": "Value #A"
}
]
}
}
],
"type": "table"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 16
},
"id": 6,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"direction": "backward",
"editorMode": "builder",
"expr": "{priority=~\".+\"} |= `$line_filter` | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority`",
"queryType": "range",
"refId": "A"
}
],
"title": "Realtime logs",
"type": "logs"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "stepBefore",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 1,
"pointSize": 4,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"fieldMinMax": false,
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "count by(priority) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [1m]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Priorities Rates",
"type": "timeseries"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "stepBefore",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 1,
"pointSize": 4,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"fieldMinMax": false,
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "count by(rule) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [1m]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Rules Rates",
"type": "timeseries"
}
],
"refresh": "auto",
"schemaVersion": 39,
"tags": [],
"templating": {
"list": [
{
"allValue": "",
"current": {
"selected": true,
"text": [
"arr",
"core",
"falco",
"kube-system",
"media",
"monitoring",
"rook",
"rook-cluster",
"storage",
"utilities",
"webs"
],
"value": [
"arr",
"core",
"falco",
"kube-system",
"media",
"monitoring",
"rook",
"rook-cluster",
"storage",
"utilities",
"webs"
]
},
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"definition": "",
"description": "",
"hide": 0,
"includeAll": false,
"label": "namespace",
"multi": true,
"name": "namespace",
"options": [],
"query": {
"label": "namespace",
"refId": "LokiVariableQueryEditor-VariableQuery",
"stream": "",
"type": 1
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": false,
"text": "Loki",
"value": "loki"
},
"hide": 0,
"includeAll": false,
"label": "datasource",
"multi": false,
"name": "datasource",
"options": [],
"query": "loki",
"queryValue": "",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {
"selected": true,
"text": [
"Critical"
],
"value": [
"Critical"
]
},
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"definition": "",
"hide": 0,
"includeAll": true,
"label": "priority",
"multi": true,
"name": "priority",
"options": [],
"query": {
"label": "priority",
"refId": "LokiVariableQueryEditor-VariableQuery",
"stream": "",
"type": 1
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"description": "Text to filter lines",
"hide": 0,
"label": "line_filter",
"name": "line_filter",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Falco logs",
"uid": "de6ixj4nl1kowc",
"version": 2,
"weekStart": ""
}

View File

@ -1,44 +0,0 @@
1. Get the URL for Falcosidekick by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falcosidekick.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "falcosidekick.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "falcosidekick.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward svc/{{ include "falcosidekick.name" . }} {{ .Values.service.port }}:{{ .Values.service.port }} --namespace {{ .Release.Namespace }}
echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application"
{{- end }}
{{- if .Values.webui.enabled }}
2. Get the URL for Falcosidekick-UI (WebUI) by running these commands:
{{- if .Values.webui.ingress.enabled }}
{{- range $host := .Values.webui.ingress.hosts }}
http{{ if $.Values.webui.ingress.tls }}s{{ end }}://{{ $host.host }}{{ index $host.paths 0 }}
{{- end }}
{{- else if contains "NodePort" .Values.webui.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falcosidekick.fullname" . }})-ui
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT/ui
{{- else if contains "LoadBalancer" .Values.webui.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "falcosidekick.fullname" . }}-ui'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "falcosidekick.fullname" . }}-ui -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.webui.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward svc/{{ include "falcosidekick.name" . }}-ui {{ .Values.webui.service.port }}:{{ .Values.webui.service.port }} --namespace {{ .Release.Namespace }}
echo "Visit http://127.0.0.1:{{ .Values.webui.service.port }}/ui to use your application"
{{- end }}
{{ else }}
2. Try to enable Falcosidekick-UI (WebUI) by adding this argument to your command:
--set webui.enabled=true
{{- end }}

View File

@ -1,80 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "falcosidekick.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "falcosidekick.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falcosidekick.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "falcosidekick.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
{{- print "networking.k8s.io/v1" -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "falcosidekick.labels" -}}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
{{ include "falcosidekick.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/part-of: {{ include "falcosidekick.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "falcosidekick.selectorLabels" -}}
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "falcosidekick.ingress.isStable" -}}
{{- eq (include "falcosidekick.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "falcosidekick.ingress.supportsPathType" -}}
{{- or (eq (include "falcosidekick.ingress.isStable" .) "true") (and (eq (include "falcosidekick.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}

View File

@ -1,26 +0,0 @@
{{- if and .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falcosidekick.fullname" . }}-certs
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: core
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{ $key := .Values.config.tlsserver.serverkey }}
server.key: {{ $key | b64enc | quote }}
{{ $crt := .Values.config.tlsserver.servercrt }}
server.crt: {{ $crt | b64enc | quote }}
falcosidekick.pem: {{ print $key $crt | b64enc | quote }}
ca.crt: {{ .Values.config.tlsserver.cacrt | b64enc | quote }}
ca.pem: {{ .Values.config.tlsserver.cacrt | b64enc | quote }}
{{- end }}

View File

@ -1,28 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.falcosidekick.name }}
{{ if .Values.grafana.dashboards.configMaps.falcosidekick.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.falcosidekick.namespace }}
{{- else -}}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
grafana_dashboard: "1"
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.grafana.dashboards.configMaps.falcosidekick.folder }}
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.falcosidekick.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.falcosidekick.folder }}
{{- end }}
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
data:
falco-dashboard.json: |-
{{- .Files.Get "dashboards/falcosidekick-grafana-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,46 +0,0 @@
{{- if and (.Values.webui.enabled) (or (.Values.webui.redis.enabled) (.Values.webui.externalRedis.enabled)) -}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui-redis
data:
{{- if .Values.webui.redis.customConfig }}
redis-stack.conf: |-
{{ range .Values.webui.redis.customConfig }}
{{- . }}
{{ end -}}
{{- end }}
ping-redis.sh: |-
#!/bin/bash
for i in {1..10};
do
response=$(
timeout -s 3 30 \
redis-cli \
{{- if .Values.webui.redis.enabled }}
-h {{ include "falcosidekick.fullname" . }}-ui-redis -p 6379 \
{{- if .Values.webui.redis.password }}
-a ${REDIS_PASSWORD} \
{{- end }}
{{- end }}
{{- if .Values.webui.externalRedis.enabled }}
-h {{ .Values.webui.externalRedis.url }} \
-p {{ .Values.webui.externalRedis.port }} \
{{- if .Values.webui.externalRedis.password }}
-a ${REDIS_PASSWORD} \
{{- end }}
{{- end }}
ping
)
if [ "$response" = "PONG" ]; then
exit 0
fi
sleep 3
done
exit 1
{{- end }}

View File

@ -1,293 +0,0 @@
{{- if .Values.webui.enabled }}
{{- if and .Values.webui.redis.enabled .Values.webui.externalRedis.enabled }}
{{ fail "Both webui.redis and webui.externalRedis modules are enabled. Please disable one of them." }}
{{- else if and (not .Values.webui.redis.enabled) (not .Values.webui.externalRedis.enabled) }}
{{ fail "Neither the included Redis nor the external Redis is enabled. Please enable one of them." }}
{{- end }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui
{{- with .Values.webui.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.webui.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.webui.replicaCount }}
{{- if .Values.webui.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.webui.revisionHistoryLimit }}
{{- end }}
selector:
matchLabels:
{{- include "falcosidekick.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: ui
template:
metadata:
labels:
{{- include "falcosidekick.labels" . | nindent 8 }}
app.kubernetes.io/component: ui
{{- if .Values.webui.podLabels }}
{{ toYaml .Values.webui.podLabels | indent 8 }}
{{- end }}
{{- if .Values.webui.podAnnotations }}
annotations:
{{ toYaml .Values.webui.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}-ui
{{- if .Values.webui.priorityClassName }}
priorityClassName: "{{ .Values.webui.priorityClassName }}"
{{- end }}
{{- if .Values.webui.podSecurityContext }}
securityContext:
{{- toYaml .Values.webui.podSecurityContext | nindent 8}}
{{- end }}
initContainers:
- name: wait-redis
image: "{{ .Values.webui.initContainer.image.registry }}/{{ .Values.webui.initContainer.image.repository }}:{{ .Values.webui.initContainer.image.tag }}"
command:
- sh
- -c
- /scripts/ping-redis.sh
{{- if .Values.webui.initContainer.resources }}
resources:
{{- toYaml .Values.webui.initContainer.resources | nindent 12 }}
{{- end }}
{{- if .Values.webui.initContainer.securityContext }}
securityContext:
{{- toYaml .Values.webui.initContainer.securityContext | nindent 12}}
{{- end }}
volumeMounts:
- name: scripts
mountPath: /scripts/ping-redis.sh
subPath: ping-redis.sh
envFrom:
- secretRef:
name: {{ include "falcosidekick.fullname" . }}-ui
{{- if .Values.webui.existingSecret }}
- secretRef:
name: {{ .Values.webui.existingSecret }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-ui
image: "{{ .Values.webui.image.registry }}/{{ .Values.webui.image.repository }}:{{ .Values.webui.image.tag }}"
imagePullPolicy: {{ .Values.webui.image.pullPolicy }}
envFrom:
- secretRef:
name: {{ include "falcosidekick.fullname" . }}-ui
{{- if .Values.webui.existingSecret }}
- secretRef:
name: {{ .Values.webui.existingSecret }}
{{- end }}
args:
- "-r"
{{- if .Values.webui.redis.enabled }}
- {{ include "falcosidekick.fullname" . }}-ui-redis{{ if .Values.webui.redis.fullfqdn }}.{{ .Release.Namespace }}.svc.cluster.local{{ end }}:{{ .Values.webui.redis.service.port | default "6379" }}
{{- else if .Values.webui.externalRedis.enabled }}
- "{{ required "External Redis is enabled. Please set the URL to the database." .Values.webui.externalRedis.url }}:{{ .Values.webui.externalRedis.port | default "6379" }}"
{{- end}}
{{- if .Values.webui.ttl }}
- "-t"
- {{ .Values.webui.ttl | quote }}
{{- end}}
{{- if .Values.webui.loglevel }}
- "-l"
- {{ .Values.webui.loglevel }}
{{- end}}
{{- if .Values.webui.allowcors }}
- "-x"
{{- end}}
{{- if .Values.webui.disableauth }}
- "-d"
{{- end}}
ports:
- name: http
containerPort: 2802
protocol: TCP
livenessProbe:
httpGet:
path: /api/v1/healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /api/v1/healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.webui.securityContext }}
securityContext:
{{- toYaml .Values.webui.securityContext | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.webui.resources | nindent 12 }}
{{- with .Values.webui.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: scripts
configMap:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
defaultMode: 0555
items:
- key: ping-redis.sh
path: ping-redis.sh
{{- if .Values.webui.redis.enabled }}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui-redis
spec:
replicas: 1
serviceName: {{ include "falcosidekick.fullname" . }}-ui-redis
selector:
matchLabels:
{{- include "falcosidekick.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: ui-redis
template:
metadata:
labels:
{{- include "falcosidekick.labels" . | nindent 8 }}
app.kubernetes.io/component: ui-redis
{{- if .Values.webui.redis.podLabels }}
{{ toYaml .Values.webui.redis.podLabels | indent 8 }}
{{- end }}
{{- if .Values.webui.redis.podAnnotations }}
annotations:
{{ toYaml .Values.webui.redis.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}-ui
{{- if .Values.webui.redis.priorityClassName }}
priorityClassName: "{{ .Values.webui.redis.priorityClassName }}"
{{- end }}
{{- if .Values.webui.redis.podSecurityContext }}
securityContext:
{{- toYaml .Values.webui.redis.podSecurityContext | nindent 8}}
{{- end }}
containers:
- name: redis
image: "{{ .Values.webui.redis.image.registry }}/{{ .Values.webui.redis.image.repository }}:{{ .Values.webui.redis.image.tag }}"
imagePullPolicy: {{ .Values.webui.redis.image.pullPolicy }}
{{- if .Values.webui.redis.password }}
envFrom:
- secretRef:
{{- if .Values.webui.redis.existingSecret }}
name: {{ .Values.webui.redis.existingSecret }}
{{- else }}
name: {{ include "falcosidekick.fullname" . }}-ui-redis
{{- end }}
{{- end}}
args: []
ports:
- name: redis
containerPort: 6379
protocol: TCP
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 3
readinessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 3
{{- if .Values.webui.redis.securityContext }}
securityContext:
{{- toYaml .Values.webui.redis.securityContext | nindent 12 }}
{{- end }}
{{- if or (.Values.webui.redis.storageEnabled) (.Values.webui.redis.customConfig) }}
volumeMounts:
{{- if .Values.webui.redis.storageEnabled }}
- name: {{ include "falcosidekick.fullname" . }}-ui-redis-data
mountPath: /data
{{- end }}
{{- if .Values.webui.redis.customConfig }}
- name: config
mountPath: /redis-stack.conf
subPath: redis-stack.conf
{{- end }}
{{- end }}
resources:
{{- toYaml .Values.webui.redis.resources | nindent 12 }}
{{- with .Values.webui.redis.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.redis.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.redis.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{ if .Values.webui.redis.customConfig }}
volumes:
- name: config
configMap:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
defaultMode: 0444
items:
- key: redis-stack.conf
path: redis-stack.conf
{{ end }}
{{- if .Values.webui.redis.storageEnabled }}
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: {{ .Values.webui.redis.storageSize }}
{{- if .Values.webui.redis.storageClass }}
storageClassName: {{ .Values.webui.redis.storageClass }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,192 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: core
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
{{- if .Values.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- end }}
selector:
matchLabels:
{{- include "falcosidekick.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: core
template:
metadata:
labels:
{{- include "falcosidekick.labels" . | nindent 8 }}
app.kubernetes.io/component: core
{{- if and .Values.config.azure.podIdentityClientID .Values.config.azure.podIdentityName }}
aadpodidbinding: {{ include "falcosidekick.fullname" . }}
{{- end }}
{{- if .Values.config.azure.workloadIdentityClientID }}
azure.workload.identity/use: "true"
{{- end }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 2801
protocol: TCP
{{- if .Values.config.tlsserver.deploy }}
- name: http-notls
containerPort: 2810
protocol: TCP
{{- end }}
livenessProbe:
httpGet:
path: /ping
{{- if .Values.config.tlsserver.deploy }}
port: http-notls
{{- else }}
port: http
{{- end }}
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /ping
{{- if .Values.config.tlsserver.deploy }}
port: http-notls
{{- else }}
port: http
{{- end }}
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
{{- if .Values.config.extraArgs }}
args:
{{ toYaml .Values.config.extraArgs | nindent 12 }}
{{- end }}
envFrom:
- secretRef:
name: {{ include "falcosidekick.fullname" . }}
{{- if .Values.config.existingSecret }}
- secretRef:
name: {{ .Values.config.existingSecret }}
{{- end }}
env:
- name: DEBUG
value: {{ .Values.config.debug | quote }}
- name: CUSTOMFIELDS
value: {{ .Values.config.customfields | quote }}
- name: TEMPLATEDFIELDS
value: {{ .Values.config.templatedfields | quote }}
- name: CUSTOMTAGS
value: {{ .Values.config.customtags | quote }}
- name: OUTPUTFIELDFORMAT
value: {{ .Values.config.outputFieldFormat | quote }}
- name: BRACKETREPLACER
value: {{ .Values.config.bracketreplacer | quote }}
- name: MUTUALTLSFILESPATH
value: {{ .Values.config.mutualtlsfilespath | quote }}
- name: MUTUALTLSCLIENT_CERTFILE
value: {{ .Values.config.mutualtlsclient.certfile | quote }}
- name: MUTUALTLSCLIENT_KEYFILE
value: {{ .Values.config.mutualtlsclient.keyfile | quote }}
- name: MUTUALTLSCLIENT_CACERTFILE
value: {{ .Values.config.mutualtlsclient.cacertfile | quote }}
- name: TLSCLIENT_CACERTFILE
value: {{ .Values.config.tlsclient.cacertfile | quote }}
{{- if .Values.config.tlsserver.deploy }}
- name: TLSSERVER_DEPLOY
value: {{ .Values.config.tlsserver.deploy | quote }}
- name: TLSSERVER_CERTFILE
value: {{ .Values.config.tlsserver.certfile | quote }}
- name: TLSSERVER_KEYFILE
value: {{ .Values.config.tlsserver.keyfile | quote }}
- name: TLSSERVER_CACERTFILE
value: {{ .Values.config.tlsserver.cacertfile | quote }}
- name: TLSSERVER_MUTUALTLS
value: {{ .Values.config.tlsserver.mutualtls | quote }}
- name: TLSSERVER_NOTLSPORT
value: {{ .Values.config.tlsserver.notlsport | quote }}
- name: TLSSERVER_NOTLSPATHS
value: {{ .Values.config.tlsserver.notlspaths | quote }}
{{- end }}
{{- if .Values.config.otlp.traces.extraenvvars }}
{{ toYaml .Values.config.otlp.traces.extraenvvars | nindent 12 }}
{{- end }}
{{- if .Values.config.extraEnv }}
{{ toYaml .Values.config.extraEnv | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if or .Values.extraVolumeMounts (and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt)) }}
volumeMounts:
{{- if and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt) }}
- mountPath: /etc/certs/server
name: certs-volume
readOnly: true
{{- end }}
{{- if or .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 12 }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.extraVolumes (and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt)) }}
volumes:
{{- if and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt) }}
- name: certs-volume
secret:
{{- if .Values.config.tlsserver.existingSecret }}
secretName: {{.Values.config.tlsserver.existingSecret }}
{{- else }}
secretName: {{ include "falcosidekick.fullname" . }}-certs
{{- end }}
{{- end }}
{{- if or .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 8 }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More