Compare commits

..

No commits in common. "master" and "falcosidekick-0.5.11" have entirely different histories.

189 changed files with 8784 additions and 25888 deletions

75
.circleci/config.yml Normal file
View File

@ -0,0 +1,75 @@
version: 2.1
jobs:
lint-scripts:
docker:
- image: koalaman/shellcheck-alpine
steps:
- checkout
- run:
command: |
shellcheck -x tests/e2e-kind.sh
shellcheck -x .circleci/install_tools.sh
shellcheck -x .circleci/release.sh
lint-charts:
docker:
- image: quay.io/helmpack/chart-testing:latest
steps:
- checkout
- run:
name: lint
command: ct lint --config tests/ct.yaml
install-charts:
machine:
image: ubuntu-2004:202111-02
resource_class: medium
steps:
- checkout
- run:
command: tests/e2e-kind.sh
no_output_timeout: 1h
release-charts:
docker:
- image: cimg/base:stable
steps:
- run:
name: checkout
command: |
git clone https://${GITHUB_TOKEN}@github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}.git .
- run:
name: release
command: |
echo "export CR_REPO_URL=https://falcosecurity.github.io/charts" >> $BASH_ENV
echo "export GIT_USERNAME=$CIRCLE_PROJECT_USERNAME" >> $BASH_ENV
echo "export GIT_REPOSITORY_NAME=$CIRCLE_PROJECT_REPONAME" >> $BASH_ENV
.circleci/install_tools.sh
.circleci/release.sh
workflows:
version: 2
release:
jobs:
- lint-scripts:
filters:
branches:
ignore: gh-pages
- lint-charts:
filters:
branches:
ignore: gh-pages
- install-charts:
filters:
branches:
ignore: gh-pages
requires:
- lint-scripts
- lint-charts
- release-charts:
context: falco
filters:
tags:
ignore: /.*/
branches:
only: master

20
.circleci/install_tools.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -o errexit
readonly HELM_VERSION=3.9.0
readonly CHART_RELEASER_VERSION=1.0.0-beta.1
echo "Installing Helm..."
curl -LO "https://get.helm.sh/helm-v$HELM_VERSION-linux-amd64.tar.gz"
sudo mkdir -p "/usr/local/helm-v$HELM_VERSION"
sudo tar -xzf "helm-v$HELM_VERSION-linux-amd64.tar.gz" -C "/usr/local/helm-v$HELM_VERSION"
sudo ln -s "/usr/local/helm-v$HELM_VERSION/linux-amd64/helm" /usr/local/bin/helm
rm -f "helm-v$HELM_VERSION-linux-amd64.tar.gz"
echo "Installing chart-releaser..."
curl -LO "https://github.com/helm/chart-releaser/releases/download/v${CHART_RELEASER_VERSION}/chart-releaser_${CHART_RELEASER_VERSION}_linux_amd64.tar.gz"
sudo mkdir -p "/usr/local/chart-releaser-v$CHART_RELEASER_VERSION"
sudo tar -xzf "chart-releaser_${CHART_RELEASER_VERSION}_linux_amd64.tar.gz" -C "/usr/local/chart-releaser-v$CHART_RELEASER_VERSION"
sudo ln -s "/usr/local/chart-releaser-v$CHART_RELEASER_VERSION/cr" /usr/local/bin/cr
rm -f "chart-releaser_${CHART_RELEASER_VERSION}_linux_amd64.tar.gz"

91
.circleci/release.sh Executable file
View File

@ -0,0 +1,91 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
: "${GITHUB_TOKEN:?Environment variable GITHUB_TOKEN must be set}"
: "${CR_REPO_URL:?Environment variable CR_REPO_URL must be set}"
: "${GIT_USERNAME:?Environment variable GIT_USERNAME must be set}"
: "${GIT_REPOSITORY_NAME:?Environment variable GIT_REPOSITORY_NAME must be set}"
readonly REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel)}"
export CR_TOKEN="$GITHUB_TOKEN"
main() {
pushd "$REPO_ROOT" > /dev/null
echo "Fetching tags..."
git fetch --tags
echo "Fetching charts..."
local changed_charts=()
# iterate over all charts and skip those that already have a tag matching the current version
for chart_config in */Chart.yaml; do
local chart_name
local chart_ver
local tag
chart_name=$(awk '/^name: /{print $NF}' < "$chart_config" )
chart_ver=$(awk '/^version: /{print $NF}' < "$chart_config")
tag="${chart_name}-${chart_ver}"
if git rev-parse "$tag" >/dev/null 2>&1; then
echo "Chart '$chart_name': tag '$tag' already exists, skipping."
else
echo "Chart '$chart_name': new version '$chart_ver' detected."
changed_charts+=("$chart_name")
fi
done
# preparing dirs
rm -rf .cr-release-packages
mkdir -p .cr-release-packages
rm -rf .cr-index
mkdir -p .cr-index
# only release those charts for which a new version has been detected
if [[ -n "${changed_charts[*]}" ]]; then
for chart in "${changed_charts[@]}"; do
echo "Packaging chart '$chart'..."
package_chart "$chart"
done
release_charts
# the newly created GitHub releases may not be available yet; let's wait a bit to be sure.
sleep 5
update_index
else
echo "Nothing to do. No chart changes detected."
fi
popd > /dev/null
}
package_chart() {
local chart="$1"
helm package "$chart" --destination .cr-release-packages --dependency-update
}
release_charts() {
cr upload -o "$GIT_USERNAME" -r "$GIT_REPOSITORY_NAME"
}
update_index() {
cr index -o "$GIT_USERNAME" -r "$GIT_REPOSITORY_NAME" -c "$CR_REPO_URL"
git config user.email "poiana@users.noreply.github.com"
git config user.name "poiana"
git checkout gh-pages
cp --force .cr-index/index.yaml index.yaml
git add index.yaml
git commit --message="Update index.yaml" --signoff
git push origin gh-pages
}
main

View File

@ -35,14 +35,12 @@ Please remove the leading whitespace before the `/kind <>` you uncommented.
> /area falco-chart
> /area falco-exporter-chart
> /area falcosidekick-chart
> /area falco-talon-chart
> /area event-generator-chart
> /area k8s-metacollector-chart
<!--
Please remove the leading whitespace before the `/area <>` you uncommented.
-->
@ -61,7 +59,6 @@ Fixes #
**Special notes for your reviewer**:
**Checklist**
<!--
Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.

View File

@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@ -1,35 +0,0 @@
name: Check Helm Docs
on:
pull_request:
jobs:
readme:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run Helm Docs and check the outcome
run: |
for chart in $(ls ./charts); do
docker run \
--rm \
--workdir=/helm-docs \
--volume "$(pwd):/helm-docs" \
-u $(id -u) \
jnorwood/helm-docs:v1.11.0 \
helm-docs -c ./charts/$chart -t ./README.gotmpl -o ./README.md
done
exit_code=$(git diff --exit-code)
exit ${exit_code}
- name: Print a comment in case of failure
run: |
echo "The README.md files are not up to date.
Please, run \"make docs\" before pushing."
exit 1
if: |
failure() && github.event.pull_request.head.repo.full_name == github.repository

View File

@ -1,24 +0,0 @@
name: Links
on:
push:
branches:
- main
- master
pull_request:
jobs:
linkChecker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Link Checker
uses: lycheeverse/lychee-action@5c4ee84814c983aa7164eaee476f014e53ff3963 #v2.5.0
with:
args: --no-progress './**/*.yml' './**/*.yaml' './**/*.md' './**/*.gotmpl' './**/*.tpl' './**/OWNERS' './**/LICENSE'
token: ${{ secrets.GITHUB_TOKE }}
fail: true

View File

@ -1,66 +0,0 @@
name: Release Charts
on:
push:
branches:
- main
- master
paths:
- "charts/**"
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
id-token: write
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install Cosign
uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3.9.1
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
- name: Add dependency chart repos
run: |
helm repo add falcosecurity https://falcosecurity.github.io/charts
- name: Run chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
with:
charts_dir: charts
config: cr.yaml
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Publish and Sign OCI Charts
run: |
for chart in `find .cr-release-packages -name '*.tgz' -print`; do
helm push ${chart} oci://ghcr.io/${GITHUB_REPOSITORY} |& tee helm-push-output.log
file_name=${chart##*/}
chart_name=${file_name%-*}
digest=$(awk -F "[, ]+" '/Digest/{print $NF}' < helm-push-output.log)
cosign sign "ghcr.io/${GITHUB_REPOSITORY}/${chart_name}@${digest}"
done
env:
COSIGN_YES: true

View File

@ -1,74 +0,0 @@
name: Test Charts
on:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: "3.14.0"
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- name: Set up chart-testing
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
- name: Run chart-testing (lint)
run: ct lint --config ct.yaml
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ct.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Create KIND Cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
with:
config: ./tests/kind-config.yaml
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --exclude-deprecated --config ct.yaml
go-unit-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: "3.10.3"
- name: Update repo deps
run: helm dependency update ./charts/falco
- name: Setup Go
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
with:
go-version: "1.21"
check-latest: true
- name: K8s-metacollector unit tests
run: go test ./charts/k8s-metacollector/tests/unit/...
- name: Falco unit tests
run: go test ./charts/falco/tests/unit/...

6
.gitignore vendored
View File

@ -1,6 +0,0 @@
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
.vscode

View File

@ -1,22 +0,0 @@
nats:/host:port
https://yds.serverless.yandexcloud.net/
http:/host:port
https://chat.googleapis.com/v1/spaces/XXXXXX/YYYYYY
https://xxxx/hooks/YYYY
https://cliq.zoho.eu/api/v2/channelsbyname/XXXX/message*
https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY
https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY
https://discord.com/api/webhooks/xxxxxxxxxx
http://kafkarest:8082/topics/test
https://api.spyderbat.com/
https://hooks.slack.com/services/XXXX/YYYY/ZZZZ
http://\{domain*
https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick
http://some.url/some/path/
https://localhost:32765/k8s-audit
https://some.url/some/path/
http://localhost:8765/versions
https://environmentid.live.dynatrace.com/api
https://yourdomain/e/ENVIRONMENTID/api
http://falco-talon:2803
https://http-intake.logs.datadoghq.com/

View File

@ -1,40 +0,0 @@
DOCS_IMAGE_VERSION="v1.11.0"
LINT_IMAGE_VERSION="v3.8.0"
# Charts's path relative to the current directory.
CHARTS := $(wildcard ./charts/*)
CHARTS_NAMES := $(notdir $(CHARTS))
.PHONY: lint
lint: helm-deps-update $(addprefix lint-, $(CHARTS_NAMES))
lint-%:
@docker run \
-it \
-e HOME=/home/ct \
--mount type=tmpfs,destination=/home/ct \
--workdir=/data \
--volume $$(pwd):/data \
-u $$(id -u) \
quay.io/helmpack/chart-testing:$(LINT_IMAGE_VERSION) \
ct lint --config ./ct.yaml --charts ./charts/$*
.PHONY: docs
docs: $(addprefix docs-, $(CHARTS_NAMES))
docs-%:
@docker run \
--rm \
--workdir=/helm-docs \
--volume "$$(pwd):/helm-docs" \
-u $$(id -u) \
jnorwood/helm-docs:$(DOCS_IMAGE_VERSION) \
helm-docs -c ./charts/$* -t ./README.gotmpl -o ./README.md
.PHONY: helm-deps-update
helm-deps-update: $(addprefix helm-deps-update-, $(CHARTS_NAMES))
helm-deps-update-%:
helm dependency update ./charts/$*

3
OWNERS
View File

@ -2,10 +2,9 @@ approvers:
- leogr
- Issif
- cpanato
- alacuku
- ekoops
reviewers:
- bencer
- alacuku
emeritus_approvers:
- leodido
- fntlnz

View File

@ -1,10 +1,8 @@
# Falco Helm Charts
[![Falco Core Repository](https://github.com/falcosecurity/evolution/blob/main/repos/badges/falco-core-blue.svg)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#core-scope) [![Stable](https://img.shields.io/badge/status-stable-brightgreen?style=for-the-badge)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#stable) [![License](https://img.shields.io/github/license/falcosecurity/charts?style=for-the-badge)](./LICENSE)
This GitHub project is the source for our [Helm chart repository](https://v3.helm.sh/docs/topics/chart_repository/).
This GitHub project is the source for the [Falco](https://github.com/falcosecurity/falco) Helm chart repository that you can use to [deploy](https://falco.org/docs/getting-started/deployment/) Falco in your Kubernetes infrastructure.
The purpose of this repository is to provide a place for maintaining and contributing Charts related to the Falco project, with CI processes in place for managing the releasing of Charts into [our Helm Chart Repository](https://falcosecurity.github.io/charts).
The purpose of this repository is to provide a place for maintaining and contributing Charts related to the Falco project, with CI processes in place for managing the releasing of Charts into [our Helm Chart Repository]((https://falcosecurity.github.io/charts)).
For more information about installing and using Helm, see the
[Helm Docs](https://helm.sh/docs/).
@ -12,21 +10,19 @@ For more information about installing and using Helm, see the
## Repository Structure
This GitHub repository contains the source for the packaged and versioned charts released to [https://falcosecurity.github.io/charts](https://falcosecurity.github.io/charts) (our Helm Chart Repository).
We also, are publishing the charts in a OCI Image and it is hosted in [GitHub Packages](https://github.com/orgs/falcosecurity/packages?repo_name=charts)
The Charts in this repository are organized into folders: each directory that contains a `Chart.yaml` is a chart.
The Charts in the `master` branch (with a corresponding [GitHub release](https://github.com/falcosecurity/charts/releases)) match the latest packaged Charts in [our Helm Chart Repository](https://falcosecurity.github.io/charts), though there may be previous versions of a Chart available in that Chart Repository.
The Charts in the `master` branch (with a corresponding [GitHub release](https://github.com/falcosecurity/charts/releases)) match the latest packaged Charts in [our Helm Chart Repository]((https://falcosecurity.github.io/charts)), though there may be previous versions of a Chart available in that Chart Repository.
## Charts
Charts currently available are listed below.
- [falco](./charts/falco)
- [falcosidekick](./charts/falcosidekick)
- [event-generator](./charts/event-generator)
- [k8s-metacollector](./charts/k8s-metacollector)
- [falco-talon](./charts/falco-talon)
- [falco](falco)
- [falco-exporter](falco-exporter)
- [falcosidekick](falcosidekick)
- [event-generator](event-generator)
## Usage
@ -41,7 +37,7 @@ helm repo update
### Installing a chart
Please refer to the instruction provided by the Chart you want to install. For installing Falco via Helm, the documentation is [here](https://github.com/falcosecurity/charts/tree/master/charts/falco#adding-falcosecurity-repository).
Please refer to the instruction provided by the Chart you want to install. For installing Falco via Helm, the documentation is [here](https://github.com/falcosecurity/charts/tree/master/falco#adding-falcosecurity-repository).
## Contributing
@ -52,8 +48,9 @@ So, we ask you to follow these simple steps when making your PR:
- The [DCO](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md#developer-certificate-of-origin) is required to contribute to a `falcosecurity` project. So ensure that all your commits have been signed off. We will not be able to merge the PR if a commit is not signed off.
- Bump the version number of the chart by modifying the `version` value in the chart's `Chart.yaml` file. This is particularly important, as it allows our CI to release a new chart version. If the version has not been increased, we will not be able to merge the PR.
- Add a new section in the chart's `CHANGELOG.md` file with the new version number of the chart.
- If your changes affect any chart variables, please update the chart's `README.gotmpl` file accordingly and run `make docs` in the main folder.
- If your changes affect any chart variables, please update the chart's `README.md` file accordingly and run `make docs` in the chart folder.
Finally, when opening your PR, please fill in the provided PR template, including the final checklist of items to indicate that all the steps above have been performed.
Finally, when opening your PR, please fill in the provided PR template, including the final checklist of items to indicate that all the steps above have been performed.
If you have any questions, please feel free to contact us via [GitHub issues](https://github.com/falcosecurity/charts/issues).
If you have any questions, please feel free to contact us via [GitHub issues](https://github.com/falcosecurity/charts/issues).

View File

@ -1,145 +0,0 @@
# Event-generator
[event-generator](https://github.com/falcosecurity/event-generator) is a tool designed to generate events for both syscalls and k8s audit. The tool can be used to check if Falco is working properly. It does so by performing a variety of suspects actions which trigger security events. The event-event generator implements a [minimalistic framework](https://github.com/falcosecurity/event-generator/tree/master/events) which makes easy to implement new actions.
## Introduction
This chart helps to deploy the event-generator in a kubernetes cluster in order to test an already deployed Falco instance.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with default values and release name `event-generator` run:
```bash
helm install event-generator falcosecurity/event-generator
```
After a few seconds, event-generator should be running in the `default` namespace.
In order to install the event-generator in a custom namespace run:
```bash
# change the name of the namespace to fit your requirements.
kubectl create ns "ns-event-generator"
helm install event-generator falcosecurity/event-generator --namespace "ns-event-generator"
```
When the event-generator is installed using the default values in `values.yaml` file it is deployed using a k8s job, running the `run` command and, generates activity only for the k8s audit.
For more info check the next section.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
### Commands, actions and options
The event-generator tool accepts two commands: `run` and `test`. The first just generates activity, the later one, which is more sophisticated, also checks that for each generated activity Falco triggers the expected rule. Both of them accepts an argument that determines the actions to be performed:
```bash
event-generator run/test [regexp]
```
Without arguments, all actions are performed; otherwise, only those actions matching the given regular expression. If we want to `test` just the actions related to k8s the following command does the trick:
```bash
event-generator test ^k8saudit
```
The list of the supported actions can be found [here](https://github.com/falcosecurity/event-generator#list-actions)
Before diving in how this helm chart deploys and manages instances of the event-generator in kubernetes there are two more options that we need to talk about:
+ `--loop` to run actions in a loop
+ `--sleep` to set the length of time to wait before running an action (default to 1s)
### Deployment modes in k8s
Based on commands, actions and options configured the event-generator could be deployed as a k8s `job` or `deployment`. If the `config.loop` value is set a `deployment` is used since it is long running process, otherwise a `job`.
A configuration like the one below, set in the `values.yaml` file, will deploy the even-generator using a `deployment` with the `run` command passed to it and will will generate activity only for the syscalls:
```yaml
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator
command: run
# -- Regular expression used to select the actions to be run.
actions: "^syscall"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: true
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: false
# -- Path to the Falco grpc socket.
bindAddress: "unix:///var/run/falco/falco.sock"
```
The following configuration will use a k8s `job` since we want to perform the k8s activity once and check that Falco reacts properly to those actions:
```yaml
config:
# -- The event-generator accepts two commands (run, test):
# run: runs actions.
# test: runs and tests actions.
# For more info see: https://github.com/falcosecurity/event-generator
command: test
# -- Regular expression used to select the actions to be run.
actions: "^k8saudit"
# -- Runs in a loop the actions.
# If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job.
loop: false
# -- The length of time to wait before running an action. Non-zero values should contain
# a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms)
sleep: ""
grpc:
# -- Set it to true if you are deploying in "test" mode.
enabled: true
# -- Path to the Falco grpc socket.
bindAddress: "unix:///var/run/falco/falco.sock"
```
Note that **grpc.enabled is set to true when running with the test command. Be sure that Falco exposes the grpc socket and emits output to it**.
## Uninstalling the Chart
To uninstall the `event-generator` release:
```bash
helm uninstall event-generator
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the event-generator chart v0.3.4 and their default values. See `values.yaml` for full list.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity, like the nodeSelector but with more expressive syntax. |
| config.actions | string | `"^syscall"` | Regular expression used to select the actions to be run. |
| config.command | string | `"run"` | The event-generator accepts two commands (run, test): run: runs actions. test: runs and tests actions. For more info see: https://github.com/falcosecurity/event-generator. |
| config.grpc.bindAddress | string | `"unix:///run/falco/falco.sock"` | Path to the Falco grpc socket. |
| config.grpc.enabled | bool | `false` | Set it to true if you are deploying in "test" mode. |
| config.loop | bool | `true` | Runs in a loop the actions. If set to "true" the event-generator is deployed using a k8s deployment otherwise a k8s job. |
| config.sleep | string | `""` | The length of time to wait before running an action. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means no sleep. (default 100ms) |
| fullnameOverride | string | `""` | Used to override the chart full name. |
| image | object | `{"pullPolicy":"IfNotPresent","repository":"falcosecurity/event-generator","tag":"latest"}` | Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) revisionHistoryLimit: 1 |
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the event-generator image |
| image.repository | string | `"falcosecurity/event-generator"` | Repository from where the image is pulled. |
| image.tag | string | `"latest"` | Images' tag to select a development/custom version of event-generator instead of a release. Overrides the image tag whose default is the chart appVersion. |
| imagePullSecrets | list | `[]` | Secrets used to pull the image from a private repository. |
| nameOverride | string | `""` | Used to override the chart name. |
| nodeSelector | object | `{}` | Selectors to choose a given node where to run the pods. |
| podAnnotations | object | `{}` | Annotations to be added to the pod. |
| podSecurityContext | object | `{}` | Security context for the pod. |
| replicasCount | int | `1` | Number of replicas of the event-generator (meaningful when installed as a deployment). |
| securityContext | object | `{}` | Security context for the containers. |
| tolerations | list | `[]` | Tolerations to allow the pods to be scheduled on nodes whose taints the pod tolerates. |

View File

@ -1,39 +0,0 @@
# Change Log
This file documents all notable changes to Falco Talon Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## 0.3.0 - 2024-02-07
- bump up version to `v0.3.0`
- fix missing usage of the `imagePullSecrets`
## 0.2.3 - 2024-12-18
- add a Grafana dashboard for the Prometheus metrics
## 0.2.1 - 2024-12-09
- bump up version to `v0.2.1` for bug fixes
## 0.2.0 - 2024-11-26
- configure pod to not rollout on configmap change
- configure pod to rollout on secret change
- add config.rulesOverride allowing users to override config rules
## 0.1.3 - 2024-11-08
- change the key for the range over the rules files
## 0.1.2 - 2024-10-14
- remove all refs to the previous org
## 0.1.1 - 2024-10-01
- Use version `0.1.1`
- Fix wrong port for the `serviceMonitor`
## 0.1.0 - 2024-09-05
- First release

View File

@ -1,18 +0,0 @@
apiVersion: v1
appVersion: 0.3.0
description: React to the events from Falco
name: falco-talon
version: 0.3.0
keywords:
- falco
- monitoring
- security
- response-engine
home: https://github.com/falcosecurity/falco-talon
sources:
- https://github.com/falcosecurity/falco-talon
maintainers:
- name: Issif
email: issif+github@gadz.org
- name: IgorEulalio
email: igoreulalio.ie@gmail.com

View File

@ -1,76 +0,0 @@
# Falco Talon
![release](https://flat.badgen.net/github/release/falcosecurity/falco-talon/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falco-talon) ![licence](https://flat.badgen.net/badge/license/Apache2.0/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/issif/falco-talon?icon=docker)
## Description
`Falco Talon` is a Response Engine for managing threats in your Kubernetes. It enhances the solutions proposed by the Falco community with a no-code tailor made solution. With easy rules, you can react to `events` from [`Falco`](https://falco.org) in milliseconds.
## Architecture
`Falco Talon` can receive the `events` from [`Falco`](https://falco.org) or [`Falcosidekick`](https://github.com/falcosecurity/falcosidekick):
```mermaid
flowchart LR
falco
falcosidekick
falco-talon
falco -- event --> falcosidekick
falco -- event --> falco-talon
falcosidekick -- event --> falco-talon
kubernetes -- context --> falco-talon
falco-talon -- action --> aws
falco-talon -- output --> minio
falco-talon -- action --> kubernetes
falco-talon -- notification --> slack
```
## Documentation
The full documentation is available on its own website: [https://docs.falco-talon.org/docs](https://docs.falco-talon.org/docs).
## Installation
```shell
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
helm install falco-talon falcosecurity/falco-talon -n falco --create-namespace -f values.yaml
```
### Update the rules
Update `rules.yaml` then:
```
helm upgrade falco-talon falcosecurity/falco-talon -n falco -f values.yaml
```
### Uninstall Falco Talon
```
helm delete falco-talon -n falco
````
## Configuration
{{ template "chart.valuesSection" . }}
## Connect Falcosidekick
Once you have installed `Falco Talon` with Helm, you need to connect `Falcosidekick` by adding the flag `--set falcosidekick.config.webhook.address=http://falco-talon:2803`
```shell
helm upgrade -i falco falcosecurity/falco --namespace falco \
--create-namespace \
--set tty=true \
--set falcosidekick.enabled=true \
--set falcosidekick.config.talon.address=http://falco-talon:2803
```
## License
Falco Talon is licensed to you under the **Apache 2.0** open source license.
## Author
Thomas Labarussias (https://github.com/Issif)

View File

@ -1,184 +0,0 @@
# Falco Talon
![release](https://flat.badgen.net/github/release/falcosecurity/falco-talon/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falco-talon) ![licence](https://flat.badgen.net/badge/license/Apache2.0/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/issif/falco-talon?icon=docker)
## Description
`Falco Talon` is a Response Engine for managing threats in your Kubernetes. It enhances the solutions proposed by the Falco community with a no-code tailor made solution. With easy rules, you can react to `events` from [`Falco`](https://falco.org) in milliseconds.
## Architecture
`Falco Talon` can receive the `events` from [`Falco`](https://falco.org) or [`Falcosidekick`](https://github.com/falcosecurity/falcosidekick):
```mermaid
flowchart LR
falco
falcosidekick
falco-talon
falco -- event --> falcosidekick
falco -- event --> falco-talon
falcosidekick -- event --> falco-talon
kubernetes -- context --> falco-talon
falco-talon -- action --> aws
falco-talon -- output --> minio
falco-talon -- action --> kubernetes
falco-talon -- notification --> slack
```
## Documentation
The full documentation is available on its own website: [https://docs.falco-talon.org/docs](https://docs.falco-talon.org/docs).
## Installation
```shell
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
helm install falco-talon falcosecurity/falco-talon -n falco --create-namespace -f values.yaml
```
### Update the rules
Update `rules.yaml` then:
```
helm upgrade falco-talon falcosecurity/falco-talon -n falco -f values.yaml
```
### Uninstall Falco Talon
```
helm delete falco-talon -n falco
````
## Configuration
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | affinity |
| config | object | `{"aws":{"accesKey":"","externalId":"","region":"","roleArn":"","secretKey":""},"deduplication":{"leaderElection":true,"timeWindowSeconds":5},"defaultNotifiers":["k8sevents"],"listenAddress":"0.0.0.0","listenPort":2803,"minio":{"accessKey":"","endpoint":"","secretKey":"","useSsl":false},"notifiers":{"elasticsearch":{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""},"loki":{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""},"slack":{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""},"smtp":{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""},"webhook":{"url":""}},"otel":{"collectorEndpoint":"","collectorPort":4317,"collectorUseInsecureGrpc":false,"metricsEnabled":false,"tracesEnabled":false},"printAllEvents":false,"rulesOverride":"- action: Terminate Pod\n actionner: kubernetes:terminate\n parameters:\n ignore_daemonsets: true\n ignore_statefulsets: true\n grace_period_seconds: 20\n","watchRules":true}` | config of Falco Talon (See https://docs.falco-talon.org/docs/configuration/) |
| config.aws | object | `{"accesKey":"","externalId":"","region":"","roleArn":"","secretKey":""}` | aws |
| config.aws.accesKey | string | `""` | access key (if not specified, default access_key from provider credential chain will be used) |
| config.aws.externalId | string | `""` | external id |
| config.aws.region | string | `""` | region (if not specified, default region from provider credential chain will be used) |
| config.aws.roleArn | string | `""` | role arn |
| config.aws.secretKey | string | `""` | secret key (if not specified, default secret_key from provider credential chain will be used) |
| config.deduplication | object | `{"leaderElection":true,"timeWindowSeconds":5}` | deduplication of the Falco events |
| config.deduplication.leaderElection | bool | `true` | enable the leader election for cluster mode |
| config.deduplication.timeWindowSeconds | int | `5` | duration in seconds for the deduplication time window |
| config.defaultNotifiers | list | `["k8sevents"]` | default notifiers for all rules |
| config.listenAddress | string | `"0.0.0.0"` | listen address |
| config.listenPort | int | `2803` | listen port |
| config.minio | object | `{"accessKey":"","endpoint":"","secretKey":"","useSsl":false}` | minio |
| config.minio.accessKey | string | `""` | access key |
| config.minio.endpoint | string | `""` | endpoint |
| config.minio.secretKey | string | `""` | secret key |
| config.minio.useSsl | bool | `false` | use ssl |
| config.notifiers | object | `{"elasticsearch":{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""},"loki":{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""},"slack":{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""},"smtp":{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""},"webhook":{"url":""}}` | notifiers (See https://docs.falco-talon.org/docs/notifiers/list/ for the settings) |
| config.notifiers.elasticsearch | object | `{"createIndexTemplate":true,"numberOfReplicas":1,"numberOfShards":1,"url":""}` | elasticsearch |
| config.notifiers.elasticsearch.createIndexTemplate | bool | `true` | create the index template |
| config.notifiers.elasticsearch.numberOfReplicas | int | `1` | number of replicas |
| config.notifiers.elasticsearch.numberOfShards | int | `1` | number of shards |
| config.notifiers.elasticsearch.url | string | `""` | url |
| config.notifiers.loki | object | `{"apiKey":"","customHeaders":[],"hostPort":"","tenant":"","user":""}` | loki |
| config.notifiers.loki.apiKey | string | `""` | api key |
| config.notifiers.loki.customHeaders | list | `[]` | custom headers |
| config.notifiers.loki.hostPort | string | `""` | host:port |
| config.notifiers.loki.tenant | string | `""` | tenant |
| config.notifiers.loki.user | string | `""` | user |
| config.notifiers.slack | object | `{"footer":"https://github.com/falcosecurity/falco-talon","format":"long","icon":"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg","username":"Falco Talon","webhookUrl":""}` | slack |
| config.notifiers.slack.footer | string | `"https://github.com/falcosecurity/falco-talon"` | footer |
| config.notifiers.slack.format | string | `"long"` | format |
| config.notifiers.slack.icon | string | `"https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg"` | icon |
| config.notifiers.slack.username | string | `"Falco Talon"` | username |
| config.notifiers.slack.webhookUrl | string | `""` | webhook url |
| config.notifiers.smtp | object | `{"format":"html","from":"","hostPort":"","password":"","tls":false,"to":"","user":""}` | smtp |
| config.notifiers.smtp.format | string | `"html"` | format |
| config.notifiers.smtp.from | string | `""` | from |
| config.notifiers.smtp.hostPort | string | `""` | host:port |
| config.notifiers.smtp.password | string | `""` | password |
| config.notifiers.smtp.tls | bool | `false` | enable tls |
| config.notifiers.smtp.to | string | `""` | to |
| config.notifiers.smtp.user | string | `""` | user |
| config.notifiers.webhook | object | `{"url":""}` | webhook |
| config.notifiers.webhook.url | string | `""` | url |
| config.otel | object | `{"collectorEndpoint":"","collectorPort":4317,"collectorUseInsecureGrpc":false,"metricsEnabled":false,"tracesEnabled":false}` | open telemetry parameters |
| config.otel.collectorEndpoint | string | `""` | collector endpoint |
| config.otel.collectorPort | int | `4317` | collector port |
| config.otel.collectorUseInsecureGrpc | bool | `false` | use insecure grpc |
| config.otel.metricsEnabled | bool | `false` | enable otel metrics |
| config.otel.tracesEnabled | bool | `false` | enable otel traces |
| config.printAllEvents | bool | `false` | print in stdout all received events, not only those which match a rule |
| config.watchRules | bool | `true` | auto reload the rules when the files change |
| extraEnv | list | `[{"name":"LOG_LEVEL","value":"warning"}]` | extra env |
| grafana | object | `{"dashboards":{"configMaps":{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}},"enabled":false}}` | grafana contains the configuration related to grafana. |
| grafana.dashboards | object | `{"configMaps":{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}},"enabled":false}` | dashboards contains configuration for grafana dashboards. |
| grafana.dashboards.configMaps | object | `{"talon":{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}}` | configmaps to be deployed that contain a grafana dashboard. |
| grafana.dashboards.configMaps.talon | object | `{"folder":"","name":"falco-talon-grafana-dashboard","namespace":""}` | falco-talon contains the configuration for falco talon's dashboard. |
| grafana.dashboards.configMaps.talon.folder | string | `""` | folder where the dashboard is stored by grafana. |
| grafana.dashboards.configMaps.talon.name | string | `"falco-talon-grafana-dashboard"` | name specifies the name for the configmap. |
| grafana.dashboards.configMaps.talon.namespace | string | `""` | namespace specifies the namespace for the configmap. |
| grafana.dashboards.enabled | bool | `false` | enabled specifies whether the dashboards should be deployed. |
| image | object | `{"pullPolicy":"Always","registry":"falco.docker.scarf.sh","repository":"falcosecurity/falco-talon","tag":""}` | image parameters |
| image.pullPolicy | string | `"Always"` | The image pull policy |
| image.registry | string | `"falco.docker.scarf.sh"` | The image registry to pull from |
| image.repository | string | `"falcosecurity/falco-talon"` | The image repository to pull from |
| image.tag | string | `""` | Override the image tag to pull |
| imagePullSecrets | list | `[]` | one or more secrets to be used when pulling images |
| ingress | object | `{"annotations":{},"enabled":false,"hosts":[{"host":"falco-talon.local","paths":[{"path":"/"}]}],"tls":[]}` | ingress parameters |
| ingress.annotations | object | `{}` | annotations of the ingress |
| ingress.enabled | bool | `false` | enable the ingress |
| ingress.hosts | list | `[{"host":"falco-talon.local","paths":[{"path":"/"}]}]` | hosts |
| ingress.tls | list | `[]` | tls |
| nameOverride | string | `""` | override name |
| nodeSelector | object | `{}` | node selector |
| podAnnotations | object | `{}` | pod annotations |
| podSecurityContext | object | `{"fsGroup":1234,"runAsUser":1234}` | pod security context |
| podSecurityContext.fsGroup | int | `1234` | group |
| podSecurityContext.runAsUser | int | `1234` | user id |
| podSecurityPolicy | object | `{"create":false}` | pod security policy |
| podSecurityPolicy.create | bool | `false` | enable the creation of the PSP |
| priorityClassName | string | `""` | priority class name |
| rbac | object | `{"caliconetworkpolicies":["get","update","patch","create"],"ciliumnetworkpolicies":["get","update","patch","create"],"clusterroles":["get","delete"],"configmaps":["get","delete"],"daemonsets":["get","delete"],"deployments":["get","delete"],"events":["get","update","patch","create"],"leases":["get","update","patch","watch","create"],"namespaces":["get","delete"],"networkpolicies":["get","update","patch","create"],"nodes":["get","update","patch","watch","create"],"pods":["get","update","patch","delete","list"],"podsEphemeralcontainers":["patch","create"],"podsEviction":["get","create"],"podsExec":["get","create"],"podsLog":["get"],"replicasets":["get","delete"],"roles":["get","delete"],"secrets":["get","delete"],"serviceAccount":{"create":true,"name":""},"statefulsets":["get","delete"]}` | rbac |
| rbac.serviceAccount.create | bool | `true` | create the service account. If create is false, name is required |
| rbac.serviceAccount.name | string | `""` | name of the service account |
| replicaCount | int | `2` | number of running pods |
| resources | object | `{}` | resources |
| service | object | `{"annotations":{},"port":2803,"type":"ClusterIP"}` | service parameters |
| service.annotations | object | `{}` | annotations of the service |
| service.port | int | `2803` | port of the service |
| service.type | string | `"ClusterIP"` | type of service |
| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"interval":"30s","path":"/metrics","port":"http","relabelings":[],"scheme":"http","scrapeTimeout":"10s","targetLabels":[],"tlsConfig":{}}` | serviceMonitor holds the configuration for the ServiceMonitor CRD. |
| serviceMonitor.additionalLabels | object | `{}` | additionalLabels specifies labels to be added on the Service Monitor. |
| serviceMonitor.enabled | bool | `false` | enable the deployment of a Service Monitor for the Prometheus Operator. |
| serviceMonitor.interval | string | `"30s"` | interval specifies the time interval at which Prometheus should scrape metrics from the service. |
| serviceMonitor.path | string | `"/metrics"` | path at which the metrics are exposed |
| serviceMonitor.port | string | `"http"` | portname at which the metrics are exposed |
| serviceMonitor.relabelings | list | `[]` | relabelings configures the relabeling rules to apply the targets metadata labels. |
| serviceMonitor.scheme | string | `"http"` | scheme specifies network protocol used by the metrics endpoint. In this case HTTP. |
| serviceMonitor.scrapeTimeout | string | `"10s"` | scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request. If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for that target. |
| serviceMonitor.targetLabels | list | `[]` | targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics. |
| serviceMonitor.tlsConfig | object | `{}` | tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when scraping metrics from a service. It allows you to define the details of the TLS connection, such as CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support TLS configuration for the metrics endpoint. |
| tolerations | list | `[]` | tolerations |
## Connect Falcosidekick
Once you have installed `Falco Talon` with Helm, you need to connect `Falcosidekick` by adding the flag `--set falcosidekick.config.webhook.address=http://falco-talon:2803`
```shell
helm upgrade -i falco falcosecurity/falco --namespace falco \
--create-namespace \
--set tty=true \
--set falcosidekick.enabled=true \
--set falcosidekick.config.talon.address=http://falco-talon:2803
```
## License
Falco Talon is licensed to you under the **Apache 2.0** open source license.
## Author
Thomas Labarussias (https://github.com/Issif)

View File

@ -1,8 +0,0 @@
- action: Terminate Pod
actionner: kubernetes:terminate
- action: Label Pod as Suspicious
actionner: kubernetes:label
parameters:
labels:
analysis/status: "suspicious"

View File

@ -1,73 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "falco-talon.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falco-talon.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "falco-talon.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
{{- print "networking.k8s.io/v1" -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "falco-talon.labels" -}}
helm.sh/chart: {{ include "falco-talon.chart" . }}
app.kubernetes.io/part-of: {{ include "falco-talon.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Name }}
{{ include "falco-talon.selectorLabels" . }}
{{- if .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag }}
{{- else }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "falco-talon.selectorLabels" -}}
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "falco-talon.ingress.isStable" -}}
{{- eq (include "falco-talon.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "falco-talon.ingress.supportsPathType" -}}
{{- or (eq (include "falco-talon.ingress.isStable" .) "true") (and (eq (include "falco-talon.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Validate if either serviceAccount create is set to true or serviceAccount name is passed
*/}}
{{- define "falco-talon.validateServiceAccount" -}}
{{- if and (not .Values.rbac.serviceAccount.create) (not .Values.rbac.serviceAccount.name) -}}
{{- fail ".Values.rbac.serviceAccount.create is set to false and .Values.rbac.serviceAccount.name is not provided or is provided as empty string." -}}
{{- end -}}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if .Values.podSecurityPolicy.create }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "falco-talon.name" .}}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falco-talon.name" . }}
verbs:
- use
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.talon.name }}
{{ if .Values.grafana.dashboards.configMaps.talon.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.talon.namespace }}
{{- else -}}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
grafana_dashboard: "1"
annotations:
{{- if .Values.grafana.dashboards.configMaps.talon.folder }}
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.talon.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.talon.folder }}
{{- end }}
data:
falco-talon-grafana-dashboard.json: |-
{{- .Files.Get "dashboards/falco-talon-grafana-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falco-talon.name" . }}-rules
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
data:
rules.yaml: |-
{{ $.Files.Get "rules.yaml" | nindent 4 }}
{{- if .Values.config.rulesOverride }}
{{ .Values.config.rulesOverride | nindent 4 }}
{{- end }}

View File

@ -1,101 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
annotations:
secret-checksum: {{ (lookup "v1" "Secret" .Release.Namespace (include "falco-talon.name" . | cat "-config")).data | toJson | sha256sum }}
spec:
serviceAccountName: {{ include "falco-talon.name" . }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
securityContext:
runAsUser: {{ .Values.podSecurityContext.runAsUser }}
fsGroup: {{ .Values.podSecurityContext.fsGroup }}
restartPolicy: Always
containers:
- name: {{ .Chart.Name }}
{{- if .Values.image.registry }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
{{- else }}
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: ["server", "-c", "/etc/falco-talon/config.yaml", "-r", "/etc/falco-talon/rules.yaml"]
ports:
- name: http
containerPort: 2803
protocol: TCP
- name: nats
containerPort: 4222
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.extraEnv }}
env:
{{- toYaml .Values.extraEnv | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: "config"
mountPath: "/etc/falco-talon/config.yaml"
subPath: config.yaml
readOnly: true
- name: "rules"
mountPath: "/etc/falco-talon/rules.yaml"
subPath: rules.yaml
readOnly: true
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: "rules"
configMap:
name: "{{ include "falco-talon.name" . }}-rules"
- name: "config"
secret:
secretName: "{{ include "falco-talon.name" . }}-config"

View File

@ -1,50 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $name := include "falco-talon.name" . -}}
{{- $ingressApiIsStable := eq (include "falco-talon.ingress.isStable" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "falco-talon.ingress.supportsPathType" .) "true" -}}
---
apiVersion: {{ include "falco-talon.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $name }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if $ingressSupportsPathType }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $name }}
port:
name: http
{{- else }}
serviceName: {{ $name }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,32 +0,0 @@
{{- if .Values.podSecurityPolicy.create}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "falco-talon.name" . }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
{{- end }}

View File

@ -1,216 +0,0 @@
{{- include "falco-talon.validateServiceAccount" . -}}
---
{{- if .Values.rbac.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
helm.sh/chart: {{ include "falco-talon.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
{{- if .Values.rbac.namespaces }}
- apiGroups:
- ""
resources:
- namespaces
verbs:
{{ toYaml .Values.rbac.namespaces | indent 6 }}
{{- end }}
{{- if .Values.rbac.pods }}
- apiGroups:
- ""
resources:
- pods
verbs:
{{ toYaml .Values.rbac.pods | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsEphemeralcontainers }}
- apiGroups:
- ""
resources:
- pods/ephemeralcontainers
verbs:
{{ toYaml .Values.rbac.podsEphemeralcontainers | indent 6 }}
{{- end }}
{{- if .Values.rbac.nodes }}
- apiGroups:
- ""
resources:
- nodes
verbs:
{{ toYaml .Values.rbac.nodes | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsLog }}
- apiGroups:
- ""
resources:
- pods/log
verbs:
{{ toYaml .Values.rbac.podsLog | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsExec }}
- apiGroups:
- ""
resources:
- pods/exec
verbs:
{{ toYaml .Values.rbac.podsExec | indent 6 }}
{{- end }}
{{- if .Values.rbac.podsEviction }}
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
{{ toYaml .Values.rbac.podsEviction | indent 6 }}
{{- end }}
{{- if .Values.rbac.events }}
- apiGroups:
- ""
resources:
- events
verbs:
{{ toYaml .Values.rbac.events | indent 6 }}
{{- end }}
{{- if .Values.rbac.daemonsets }}
- apiGroups:
- "apps"
resources:
- daemonsets
verbs:
{{ toYaml .Values.rbac.daemonsets | indent 6 }}
{{- end }}
{{- if .Values.rbac.deployments }}
- apiGroups:
- "apps"
resources:
- deployments
verbs:
{{ toYaml .Values.rbac.deployments | indent 6 }}
{{- end }}
{{- if .Values.rbac.replicasets }}
- apiGroups:
- "apps"
resources:
- replicasets
verbs:
{{ toYaml .Values.rbac.replicasets | indent 6 }}
{{- end }}
{{- if .Values.rbac.statefulsets }}
- apiGroups:
- "apps"
resources:
- statefulsets
verbs:
{{ toYaml .Values.rbac.statefulsets | indent 6 }}
{{- end }}
{{- if .Values.rbac.networkpolicies }}
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
{{ toYaml .Values.rbac.networkpolicies | indent 6 }}
{{- end }}
{{- if .Values.rbac.caliconetworkpolicies }}
- apiGroups:
- "projectcalico.org"
resources:
- caliconetworkpolicies
verbs:
{{ toYaml .Values.rbac.caliconetworkpolicies | indent 6 }}
{{- end }}
{{- if .Values.rbac.ciliumnetworkpolicies }}
- apiGroups:
- "cilium.io"
resources:
- ciliumnetworkpolicies
verbs:
{{ toYaml .Values.rbac.ciliumnetworkpolicies | indent 6 }}
{{- end }}
{{- if .Values.rbac.roles }}
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- roles
verbs:
{{ toYaml .Values.rbac.roles | indent 6 }}
{{- end }}
{{- if .Values.rbac.clusterroles }}
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- clusterroles
verbs:
{{ toYaml .Values.rbac.clusterroles | indent 6 }}
{{- end }}
{{- if .Values.rbac.configmaps }}
- apiGroups:
- ""
resources:
- configmaps
verbs:
{{ toYaml .Values.rbac.configmaps | indent 6 }}
{{- end }}
{{- if .Values.rbac.secrets }}
- apiGroups:
- ""
resources:
- secrets
verbs:
{{ toYaml .Values.rbac.secrets | indent 6 }}
{{- end }}
{{- if .Values.rbac.leases }}
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
{{ toYaml .Values.rbac.leases | indent 6 }}
{{- end }}
{{- if .Values.podSecurityPolicy.create }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falco-talon.name" . }}
verbs:
- use
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falco-talon.name" . }}
helm.sh/chart: {{ include "falco-talon.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "falco-talon.name" . }}
subjects:
- kind: ServiceAccount
{{- if .Values.rbac.serviceAccount.create }}
name: {{ include "falco-talon.name" . }}
{{- else }}
name: {{ .Values.rbac.serviceAccount.name }}
{{- end }}
namespace: {{ .Release.Namespace }}

View File

@ -1,71 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco-talon.name" . }}-config
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
stringData:
config.yaml: |
listen_address: {{ default "0.0.0.0" .Values.config.listenAddress }}
listen_port: {{ default 2803 .Values.config.listenPort }}
watch_rules: {{ default true .Values.config.watchRules }}
print_all_events: {{ default false .Values.config.printAllEvents }}
deduplication:
leader_election: {{ default true .Values.config.deduplication.leaderElection }}
time_window_seconds: {{ default 5 .Values.config.deduplication.timeWindowSeconds }}
default_notifiers:
{{- range .Values.config.defaultNotifiers }}
- {{ . -}}
{{ end }}
otel:
traces_enabled: {{ default false .Values.config.otel.tracesEnabled }}
metrics_enabled: {{ default false .Values.config.otel.metricsEnabled }}
collector_port: {{ default 4317 .Values.config.otel.collectorPort }}
collector_endpoint: {{ .Values.config.otel.collectorEndpoint }}
collector_use_insecure_grpc: {{ default false .Values.config.otel.collectorUseInsecureGrpc }}
notifiers:
slack:
webhook_url: {{ .Values.config.notifiers.slack.webhookUrl }}
icon: {{ .Values.config.notifiers.slack.icon }}
username: {{ .Values.config.notifiers.slack.username }}
footer: {{ .Values.config.notifiers.slack.footer }}
format: {{ .Values.config.notifiers.slack.format }}
webhook:
url: {{ .Values.config.notifiers.webhook.url }}
smtp:
host_port: {{ .Values.config.notifiers.smtp.hostPort }}
from: {{ .Values.config.notifiers.smtp.from }}
to: {{ .Values.config.notifiers.smtp.to }}
user: {{ .Values.config.notifiers.smtp.user }}
password: {{ .Values.config.notifiers.smtp.password }}
format: {{ .Values.config.notifiers.smtp.format }}
tls: {{ .Values.config.notifiers.smtp.tls }}
loki:
url: {{ .Values.config.notifiers.loki.url }}
user: {{ .Values.config.notifiers.loki.user }}
api_key: {{ .Values.config.notifiers.loki.apiKey }}
tenant: {{ .Values.config.notifiers.loki.tenant }}
custom_headers:
{{- range .Values.config.notifiers.loki.customHeaders }}
- {{ . -}}
{{ end }}
elasticsearch:
url: {{ .Values.config.notifiers.elasticsearch.url }}
create_index_template: {{ .Values.config.notifiers.loki.createIndexTemplate }}
number_of_shards: {{ .Values.config.notifiers.loki.numberOfShards }}
number_of_replicas: {{ .Values.config.notifiers.loki.numberOfReplicas }}
aws:
role_arn: {{ .Values.config.aws.roleArn }}
external_id: {{ .Values.config.aws.externalId }}
region: {{ .Values.config.aws.region }}
access_key: {{ .Values.config.aws.accessKey }}
secret_key: {{ .Values.config.aws.secretKey }}
minio:
endpoint: {{ .Values.config.minio.endpoint }}
access_key: {{ .Values.config.minio.accessKey }}
secret_key: {{ .Values.config.minio.secretKey }}
use_ssl: {{ .Values.config.minio.useSsl }}

View File

@ -1,44 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- port: {{ .Values.serviceMonitor.port }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "falco-talon.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,21 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falco-talon.name" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-talon.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "falco-talon.selectorLabels" . | nindent 4 }}

View File

@ -1,309 +0,0 @@
# Default values for falco-talon.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- number of running pods
replicaCount: 2
# -- image parameters
image:
# -- The image registry to pull from
registry: falco.docker.scarf.sh
# -- The image repository to pull from
repository: falcosecurity/falco-talon
# -- Override the image tag to pull
tag: ""
# -- The image pull policy
pullPolicy: Always
# -- pod security policy
podSecurityPolicy:
# -- enable the creation of the PSP
create: false
# -- pod security context
podSecurityContext:
# -- user id
runAsUser: 1234
# -- group
fsGroup: 1234
# -- one or more secrets to be used when pulling images
imagePullSecrets: []
# - registrySecretName
# -- override name
nameOverride: ""
# -- extra env
extraEnv:
- name: LOG_LEVEL
value: warning
# - name: AWS_REGION # Specify if running on EKS, ECS or EC2
# value: us-east-1
# -- priority class name
priorityClassName: ""
# -- pod annotations
podAnnotations: {}
# -- service parameters
service:
# -- type of service
type: ClusterIP
# -- port of the service
port: 2803
# -- annotations of the service
annotations: {}
# networking.gke.io/load-balancer-type: Internal
# -- ingress parameters
ingress:
# -- enable the ingress
enabled: false
# -- annotations of the ingress
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- hosts
hosts:
- host: falco-talon.local
paths:
- path: /
# -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.)
# pathType: Prefix
# -- tls
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- resources
resources: {}
# -- limits
# limits:
# # -- cpu limit
# cpu: 100m
# # -- memory limit
# memory: 128Mi
# -- requests
# requests:
# # -- cpu request
# cpu: 100m
# # -- memory request
# memory: 128Mi
# -- node selector
nodeSelector: {}
# -- tolerations
tolerations: []
# -- affinity
affinity: {}
# -- rbac
rbac:
serviceAccount:
# -- create the service account. If create is false, name is required
create: true
# -- name of the service account
name: ""
namespaces: ["get", "delete"]
pods: ["get", "update", "patch", "delete", "list"]
podsEphemeralcontainers: ["patch", "create"]
nodes: ["get", "update", "patch", "watch", "create"]
podsLog: ["get"]
podsExec: ["get", "create"]
podsEviction: ["get", "create"]
events: ["get", "update", "patch", "create"]
daemonsets: ["get", "delete"]
deployments: ["get", "delete"]
replicasets: ["get", "delete"]
statefulsets: ["get", "delete"]
networkpolicies: ["get", "update", "patch", "create"]
caliconetworkpolicies: ["get", "update", "patch", "create"]
ciliumnetworkpolicies: ["get", "update", "patch", "create"]
roles: ["get", "delete"]
clusterroles: ["get", "delete"]
configmaps: ["get", "delete"]
secrets: ["get", "delete"]
leases: ["get", "update", "patch", "watch", "create"]
# -- config of Falco Talon (See https://docs.falco-talon.org/docs/configuration/)
config:
# -- listen address
listenAddress: 0.0.0.0
# -- listen port
listenPort: 2803
# -- default notifiers for all rules
defaultNotifiers:
# - slack
- k8sevents
# -- auto reload the rules when the files change
watchRules: true
# -- deduplication of the Falco events
deduplication:
# -- enable the leader election for cluster mode
leaderElection: true
# -- duration in seconds for the deduplication time window
timeWindowSeconds: 5
# -- print in stdout all received events, not only those which match a rule
printAllEvents: false
# User-defined additional rules for rules_override.yaml
rulesOverride: |
- action: Terminate Pod
actionner: kubernetes:terminate
parameters:
ignore_daemonsets: true
ignore_statefulsets: true
grace_period_seconds: 20
# -- open telemetry parameters
otel:
# -- enable otel traces
tracesEnabled: false
# -- enable otel metrics
metricsEnabled: false
# -- collector port
collectorPort: 4317
# -- collector endpoint
collectorEndpoint: ""
# -- use insecure grpc
collectorUseInsecureGrpc: false
# -- notifiers (See https://docs.falco-talon.org/docs/notifiers/list/ for the settings)
notifiers:
# -- slack
slack:
# -- webhook url
webhookUrl: ""
# -- icon
icon: "https://upload.wikimedia.org/wikipedia/commons/2/26/Circaetus_gallicus_claw.jpg"
# -- username
username: "Falco Talon"
# -- footer
footer: "https://github.com/falcosecurity/falco-talon"
# -- format
format: "long"
# -- webhook
webhook:
# -- url
url: ""
# -- smtp
smtp:
# -- host:port
hostPort: ""
# -- from
from: ""
# -- to
to: ""
# -- user
user: ""
# -- password
password: ""
# -- format
format: "html"
# -- enable tls
tls: false
# -- loki
loki:
# -- host:port
hostPort: ""
# -- user
user: ""
# -- api key
apiKey: ""
# -- tenant
tenant: ""
# -- custom headers
customHeaders: []
# -- elasticsearch
elasticsearch:
# -- url
url: ""
# -- create the index template
createIndexTemplate: true
# -- number of shards
numberOfShards: 1
# -- number of replicas
numberOfReplicas: 1
# -- aws
aws:
# -- role arn
roleArn: ""
# -- external id
externalId: ""
# -- region (if not specified, default region from provider credential chain will be used)
region: ""
# -- access key (if not specified, default access_key from provider credential chain will be used)
accesKey: ""
# -- secret key (if not specified, default secret_key from provider credential chain will be used)
secretKey: ""
# -- minio
minio:
# -- endpoint
endpoint: ""
# -- access key
accessKey: ""
# -- secret key
secretKey: ""
# -- use ssl
useSsl: false
# -- serviceMonitor holds the configuration for the ServiceMonitor CRD.
serviceMonitor:
# -- enable the deployment of a Service Monitor for the Prometheus Operator.
enabled: false
# -- portname at which the metrics are exposed
port: http
# -- path at which the metrics are exposed
path: /metrics
# -- additionalLabels specifies labels to be added on the Service Monitor.
additionalLabels: {}
# -- interval specifies the time interval at which Prometheus should scrape metrics from the service.
interval: "30s"
# -- scheme specifies network protocol used by the metrics endpoint. In this case HTTP.
scheme: http
# -- scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request.
# If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for
# that target.
scrapeTimeout: "10s"
# -- relabelings configures the relabeling rules to apply the targets metadata labels.
relabelings: []
# -- targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics.
targetLabels: []
# -- tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when
# scraping metrics from a service. It allows you to define the details of the TLS connection, such as
# CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support
# TLS configuration for the metrics endpoint.
tlsConfig: {}
# insecureSkipVerify: false
# caFile: /path/to/ca.crt
# certFile: /path/to/client.crt
# keyFile: /path/to/client.key
# -- grafana contains the configuration related to grafana.
grafana:
# -- dashboards contains configuration for grafana dashboards.
dashboards:
# -- enabled specifies whether the dashboards should be deployed.
enabled: false
# --configmaps to be deployed that contain a grafana dashboard.
configMaps:
# -- falco-talon contains the configuration for falco talon's dashboard.
talon:
# -- name specifies the name for the configmap.
name: falco-talon-grafana-dashboard
# -- namespace specifies the namespace for the configmap.
namespace: ""
# -- folder where the dashboard is stored by grafana.
folder: ""

View File

@ -1,247 +0,0 @@
# Helm chart Breaking Changes
- [5.0.0](#500)
- [Default Falco Image](#default-falco-image)
- [4.0.0](#400)
- [Drivers](#drivers)
- [K8s Collector](#k8s-collector)
- [Plugins](#plugins)
- [3.0.0](#300)
- [Falcoctl](#falcoctl-support)
- [Rulesfiles](#rulesfiles)
- [Falco Images](#drop-support-for-falcosecurityfalco-image)
- [Driver Loader Init Container](#driver-loader-simplified-logic)
## 6.0.0
### Falco Talon configuration changes
The following backward-incompatible changes have been made to `values.yaml`:
- `falcotalon` configuration has been renamed to `falco-talon`
- `falcotalon.enabled` has been renamed to `responseActions.enabled`
## 5.0.0
### Default Falco Image
**Starting with version 5.0.0, the Helm chart now uses the default Falco container image, which is a distroless image without any additional tools installed.**
Previously, the chart used the `debian` image with the several tools included to avoid breaking changes during upgrades. The new image is more secure and lightweight, but it does not include these tools.
If you rely on some tool—for example, when using the `program_output` feature—you can manually override the `image.tag` value to use a different image flavor. For instance, setting `image.tag` to `0.41.0-debian` will restore access to the tools available in the Debian-based image.
## 4.0.0
### Drivers
The `driver` section has been reworked based on the following PR: https://github.com/falcosecurity/falco/pull/2413.
It is an attempt to uniform how a driver is configured in Falco.
It also groups the configuration based on the driver type.
Some of the drivers has been renamed:
* kernel modules has been renamed from `module` to `kmod`;
* the ebpf probe has not been changed. It's still `ebpf`;
* the modern ebpf probe has been renamed from `modern-bpf` to `modern_ebpf`.
The `gvisor` configuration has been moved under the `driver` section since it is considered a driver on its own.
### K8s Collector
The old Kubernetes client has been removed in Falco 0.37.0. For more info checkout this issue: https://github.com/falcosecurity/falco/issues/2973#issuecomment-1877803422.
The [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) and [k8s-meta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) substitute
the old implementation.
The following resources needed by Falco to connect to the API server are no longer needed and has been removed from the chart:
* service account;
* cluster role;
* cluster role binding.
When the `collectors.kubernetes` is enabled the chart deploys the [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) and configures Falco to load the
[k8s-meta](https://github.com/falcosecurity/plugins/tree/master/plugins/k8smeta) plugin.
By default, the `collectors.kubernetes.enabled` is off; for more info, see the following issue: https://github.com/falcosecurity/falco/issues/2995.
### Plugins
The Falco docker image does not ship anymore the plugins: https://github.com/falcosecurity/falco/pull/2997.
For this reason, the `resolveDeps` is now enabled in relevant values files (ie. `values-k8saudit.yaml`).
When installing `rulesfile` artifacts `falcoctl` will try to resolve its dependencies and install the required plugins.
## 3.0.0
The new chart deploys new *k8s* resources and new configuration variables have been added to the `values.yaml` file. People upgrading the chart from `v2.x.y` have to port their configuration variables to the new `values.yaml` file used by the `v3.0.0` chart.
If you still want to use the old values, because you do not want to take advantage of the new and shiny **falcoctl** tool then just run:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
This way you will upgrade Falco to `v0.34.0`.
**NOTE**: The new version of Falco itself, installed by the chart, does not introduce breaking changes. You can port your previous Falco configuration to the new `values.yaml` by copy-pasting it.
### Falcoctl support
[Falcoctl](https://github.com/falcosecurity/falcoctl) is a new tool born to automatize operations when deploying Falco.
Before the `v3.0.0` of the charts *rulesfiles* and *plugins* were shipped bundled in the Falco docker image. It precluded the possibility to update the *rulesfiles* and *plugins* until a new version of Falco was released. Operators had to manually update the *rulesfiles or add new *plugins* to Falco. The process was cumbersome and error-prone. Operators had to create their own Falco docker images with the new plugins baked into it or wait for a new Falco release.
Starting from the `v3.0.0` chart release, we add support for **falcoctl** in the charts. By deploying it alongside Falco it allows to:
- *install* artifacts of the Falco ecosystem (i.e plugins and rules at the moment of writing)
- *follow* those artifacts(only *rulesfile* artifacts are recommended), to keep them up-to-date with the latest releases of the Falcosecurity organization. This allows, for instance, to update rules detecting new vulnerabilities or security issues without the need to redeploy Falco.
The chart deploys *falcoctl* using an *init container* and/or *sidecar container*. The first one is used to install artifacts and make them available to Falco at start-up time, the latter runs alongside Falco and updates the local artifacts when new updates are detected.
Based on your deployment scenario:
1. Falco without *plugins* and you just want to upgrade to the new Falco version:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
When upgrading an existing release, *helm* uses the new chart version. Since we added new template files and changed the values schema(added new parameters) we explicitly disable the **falcoctl** tool. By doing so, the command will reuse the existing configuration but will deploy Falco version `0.34.0`
2. Falco without *plugins* and you want to automatically get new *falco-rules* as soon as they are released:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
```
Helm first applies the values coming from the new chart version, then overrides them using the values of the previous release. The outcome is a new release of Falco that:
* uses the previous configuration;
* runs Falco version `0.34.0`;
* uses **falcoctl** to install and automatically update the [*falco-rules*](https://github.com/falcosecurity/rules/);
* checks for new updates every 6h (default value).
3. Falco with *plugins* and you want just to upgrade Falco:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
Very similar to scenario `1.`
4. Falco with plugins and you want to use **falcoctl** to download the plugins' *rulesfiles*:
* Save **falcoctl** configuration to file:
```yaml=
cat << EOF > ./falcoctl-values.yaml
####################
# falcoctl config #
####################
falcoctl:
image:
# -- The image pull policy.
pullPolicy: IfNotPresent
# -- The image registry to pull from.
registry: docker.io
# -- The image repository to pull from.
repository: falcosecurity/falcoctl
# -- Overrides the image tag whose default is the chart appVersion.
tag: "main"
artifact:
# -- Runs "falcoctl artifact install" command as an init container. It is used to install artfacts before
# Falco starts. It provides them to Falco by using an emptyDir volume.
install:
enabled: true
# -- Extra environment variables that will be pass onto falcoctl-artifact-install init container.
env: {}
# -- Arguments to pass to the falcoctl-artifact-install init container.
args: ["--verbose"]
# -- Resources requests and limits for the falcoctl-artifact-install init container.
resources: {}
# -- Security context for the falcoctl init container.
securityContext: {}
# -- Runs "falcoctl artifact follow" command as a sidecar container. It is used to automatically check for
# updates given a list of artifacts. If an update is found it downloads and installs it in a shared folder (emptyDir)
# that is accessible by Falco. Rulesfiles are automatically detected and loaded by Falco once they are installed in the
# correct folder by falcoctl. To prevent new versions of artifacts from breaking Falco, the tool checks if it is compatible
# with the running version of Falco before installing it.
follow:
enabled: true
# -- Extra environment variables that will be pass onto falcoctl-artifact-follow sidecar container.
env: {}
# -- Arguments to pass to the falcoctl-artifact-follow sidecar container.
args: ["--verbose"]
# -- Resources requests and limits for the falcoctl-artifact-follow sidecar container.
resources: {}
# -- Security context for the falcoctl-artifact-follow sidecar container.
securityContext: {}
# -- Configuration file of the falcoctl tool. It is saved in a configmap and mounted on the falcotl containers.
config:
# -- List of indexes that falcoctl downloads and uses to locate and download artiafcts. For more info see:
# https://github.com/falcosecurity/falcoctl/blob/main/proposals/20220916-rules-and-plugin-distribution.md#index-file-overview
indexes:
- name: falcosecurity
url: https://falcosecurity.github.io/falcoctl/index.yaml
# -- Configuration used by the artifact commands.
artifact:
# -- List of artifact types that falcoctl will handle. If the configured refs resolves to an artifact whose type is not contained
# in the list it will refuse to downloade and install that artifact.
allowedTypes:
- rulesfile
install:
# -- Do not resolve the depenencies for artifacts. By default is true, but for our use carse we disable it.
resolveDeps: false
# -- List of artifacts to be installed by the falcoctl init container.
refs: [k8saudit-rules:0.5]
# -- Directory where the *rulesfiles* are saved. The path is relative to the container, which in this case is an emptyDir
# mounted also by the Falco pod.
rulesfilesDir: /rulesfiles
# -- Same as the one above but for the artifacts.
pluginsDir: /plugins
follow:
# -- List of artifacts to be installed by the falcoctl init container.
refs: [k8saudit-rules:0.5]
# -- Directory where the *rulesfiles* are saved. The path is relative to the container, which in this case is an emptyDir
# mounted also by the Falco pod.
rulesfilesDir: /rulesfiles
# -- Same as the one above but for the artifacts.
pluginsDir: /plugins
EOF
```
* Set `falcoctl.artifact.install.enabled=true` to install *rulesfiles* of the loaded plugins. Configure **falcoctl** to install the *rulesfiles* of the plugins you are loading with Falco. For example, if you are loading **k8saudit** plugin then you need to set `falcoctl.config.artifact.install.refs=[k8saudit-rules:0.5]`. When Falco is deployed the **falcoctl** init container will download the specified artifacts based on their tag.
* Set `falcoctl.artifact.follow.enabled=true` to keep updated *rulesfiles* of the loaded plugins.
* Proceed to upgrade your Falco release by running:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--values=./falcoctl-values.yaml
```
5. Falco with **multiple sources** enabled (syscalls + plugins):
1. Upgrading Falco to the new version:
```bash=
helm upgrade falco falcosecurity/falco \
--namespace=falco \
--reuse-values \
--set falcoctl.artifact.install.enabled=false \
--set falcoctl.artifact.follow.enabled=false
```
2. Upgrading Falco and leveraging **falcoctl** for rules and plugins. Refer to point 4. for **falcoctl** configuration.
### Rulesfiles
Starting from `v0.3.0`, the chart drops the bundled **rulesfiles**. The previous version was used to create a configmap containing the following **rulesfiles**:
* application_rules.yaml
* aws_cloudtrail_rules.yaml
* falco_rules.local.yaml
* falco_rules.yaml
* k8s_audit_rules.yaml
The reason why we are dropping them is pretty simple, the files are already shipped within the Falco image and do not apport any benefit. On the other hand, we had to manually update those files for each Falco release.
For users out there, do not worry, we have you covered. As said before the **rulesfiles** are already shipped inside
the Falco image. Still, this solution has some drawbacks such as users having to wait for the next releases of Falco
to get the latest version of those **rulesfiles**. Or they could manually update them by using the [custom rules](.
/README.md#loading-custom-rules).
We came up with a better solution and that is **falcoctl**. Users can configure the **falcoctl** tool to fetch and install the latest **rulesfiles** as provided by the *falcosecurity* organization. For more info, please check the **falcoctl** section.
**NOTE**: if any user (wrongly) used to customize those files before deploying Falco please switch to using the
[custom rules](./README.md#loading-custom-rules).
### Drop support for `falcosecurity/falco` image
Starting from version `v2.0.0` of the chart the`falcosecurity/falco-no-driver` is the default image. We were still supporting the `falcosecurity/falco` image in `v2.0.0`. But in `v2.2.0` we broke the chart when using the `falcosecurity/falco` image. For more info please check out the following issue: https://github.com/falcosecurity/charts/issues/419
#### Driver-loader simplified logic
There is only one switch to **enable/disable** the driver-loader init container: driver.loader.enabled=true. This simplification comes as a direct consequence of dropping support for the `falcosecurity/falco` image. For more info: https://github.com/falcosecurity/charts/issues/418

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
{{- if eq .Values.controller.kind "daemonset" }}
Falco agents are spinning up on each node in your cluster. After a few
seconds, they are going to start monitoring your containers looking for
security issues.
{{printf "\n" }}
{{- end}}
{{- if .Values.integrations }}
WARNING: The following integrations have been deprecated and removed
- gcscc
- natsOutput
- snsOutput
- pubsubOutput
Consider to use falcosidekick (https://github.com/falcosecurity/falcosidekick) as replacement.
{{- else }}
No further action should be required.
{{- end }}
{{printf "\n" }}
{{- if not .Values.falcosidekick.enabled }}
Tip:
You can easily forward Falco events to Slack, Kafka, AWS Lambda and more with falcosidekick.
Full list of outputs: https://github.com/falcosecurity/charts/tree/master/charts/falcosidekick.
You can enable its deployment with `--set falcosidekick.enabled=true` or in your values.yaml.
See: https://github.com/falcosecurity/charts/blob/master/charts/falcosidekick/values.yaml for configuration values.
{{- end}}
{{- if (has .Values.driver.kind (list "module" "modern-bpf")) -}}
{{- println }}
WARNING(drivers):
{{- printf "\nThe driver kind: \"%s\" is an alias and might be removed in the future.\n" .Values.driver.kind -}}
{{- $driver := "" -}}
{{- if eq .Values.driver.kind "module" -}}
{{- $driver = "kmod" -}}
{{- else if eq .Values.driver.kind "modern-bpf" -}}
{{- $driver = "modern_ebpf" -}}
{{- end -}}
{{- printf "Please use \"%s\" instead." $driver}}
{{- end -}}
{{- if and (not (empty .Values.falco.load_plugins)) (or .Values.falcoctl.artifact.follow.enabled .Values.falcoctl.artifact.install.enabled) }}
NOTICE:
{{ printf "It seems you are loading the following plugins %v, please make sure to install them by specifying the correct reference to falcoctl.config.artifact.install.refs: %v" .Values.falco.load_plugins .Values.falcoctl.config.artifact.install.refs -}}
{{ printf "Ignore this notice if the value of falcoctl.config.artifact.install.refs is correct already." -}}
{{- end }}

View File

@ -1,561 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "falco.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "falco.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falco.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Allow the release namespace to be overridden
*/}}
{{- define "falco.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "falco.labels" -}}
helm.sh/chart: {{ include "falco.chart" . }}
{{ include "falco.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "falco.selectorLabels" -}}
app.kubernetes.io/name: {{ include "falco.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Renders a value that contains template.
Usage:
{{ include "falco.renderTemplate" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "falco.renderTemplate" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "falco.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "falco.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Return the proper Falco image name
*/}}
{{- define "falco.image" -}}
{{- with .Values.image.registry -}}
{{- . }}/
{{- end -}}
{{- .Values.image.repository }}:
{{- .Values.image.tag | default (printf "%s" .Chart.AppVersion) -}}
{{- end -}}
{{/*
Return the proper Falco driver loader image name
*/}}
{{- define "falco.driverLoader.image" -}}
{{- with .Values.driver.loader.initContainer.image.registry -}}
{{- . }}/
{{- end -}}
{{- .Values.driver.loader.initContainer.image.repository }}:
{{- .Values.driver.loader.initContainer.image.tag | default .Chart.AppVersion -}}
{{- end -}}
{{/*
Return the proper Falcoctl image name
*/}}
{{- define "falcoctl.image" -}}
{{ printf "%s/%s:%s" .Values.falcoctl.image.registry .Values.falcoctl.image.repository .Values.falcoctl.image.tag }}
{{- end -}}
{{/*
Extract the unixSocket's directory path
*/}}
{{- define "falco.unixSocketDir" -}}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (hasPrefix "unix://" .Values.falco.grpc.bind_address) -}}
{{- .Values.falco.grpc.bind_address | trimPrefix "unix://" | dir -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "rbac.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Build http url for falcosidekick.
*/}}
{{- define "falcosidekick.url" -}}
{{- if not .Values.falco.http_output.url -}}
{{- $falcoName := include "falco.fullname" . -}}
{{- $listenPort := .Values.falcosidekick.listenport | default "2801" -}}
{{- if .Values.falcosidekick.fullfqdn -}}
{{- printf "http://%s-falcosidekick.%s.svc.cluster.local:%s" $falcoName .Release.Namespace $listenPort -}}
{{- else -}}
{{- printf "http://%s-falcosidekick:%s" $falcoName $listenPort -}}
{{- end -}}
{{- else -}}
{{- .Values.falco.http_output.url -}}
{{- end -}}
{{- end -}}
{{/*
Set appropriate falco configuration if falcosidekick has been configured.
*/}}
{{- define "falco.falcosidekickConfig" -}}
{{- if .Values.falcosidekick.enabled -}}
{{- $_ := set .Values.falco "json_output" true -}}
{{- $_ := set .Values.falco "json_include_output_property" true -}}
{{- $_ := set .Values.falco.http_output "enabled" true -}}
{{- $_ := set .Values.falco.http_output "url" (include "falcosidekick.url" .) -}}
{{- end -}}
{{- end -}}
{{/*
Get port from .Values.falco.grpc.bind_addres.
*/}}
{{- define "grpc.port" -}}
{{- $error := "unable to extract listenPort from .Values.falco.grpc.bind_address. Make sure it is in the correct format" -}}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.bind_address (not (hasPrefix "unix://" .Values.falco.grpc.bind_address)) -}}
{{- $tokens := split ":" .Values.falco.grpc.bind_address -}}
{{- if $tokens._1 -}}
{{- $tokens._1 -}}
{{- else -}}
{{- fail $error -}}
{{- end -}}
{{- else -}}
{{- fail $error -}}
{{- end -}}
{{- end -}}
{{/*
Disable the syscall source if some conditions are met.
By default the syscall source is always enabled in falco. If no syscall source is enabled, falco
exits. Here we check that no producers for syscalls event has been configured, and if true
we just disable the sycall source.
*/}}
{{- define "falco.configSyscallSource" -}}
{{- $userspaceDisabled := true -}}
{{- $gvisorDisabled := (ne .Values.driver.kind "gvisor") -}}
{{- $driverDisabled := (not .Values.driver.enabled) -}}
{{- if or (has "-u" .Values.extra.args) (has "--userspace" .Values.extra.args) -}}
{{- $userspaceDisabled = false -}}
{{- end -}}
{{- if and $driverDisabled $userspaceDisabled $gvisorDisabled }}
- --disable-source
- syscall
{{- end -}}
{{- end -}}
{{/*
We need the falco binary in order to generate the configuration for gVisor. This init container
is deployed within the Falco pod when gVisor is enabled. The image is the same as the one of Falco we are
deploying and the configuration logic is a bash script passed as argument on the fly. This solution should
be temporary and will stay here until we move this logic to the falcoctl tool.
*/}}
{{- define "falco.gvisor.initContainer" -}}
- name: {{ .Chart.Name }}-gvisor-init
image: {{ include "falco.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /bin/bash
- -c
- |
set -o errexit
set -o nounset
set -o pipefail
root={{ .Values.driver.gvisor.runsc.root }}
config={{ .Values.driver.gvisor.runsc.config }}
echo "* Configuring Falco+gVisor integration...".
# Check if gVisor is configured on the node.
echo "* Checking for /host${config} file..."
if [[ -f /host${config} ]]; then
echo "* Generating the Falco configuration..."
/usr/bin/falco --gvisor-generate-config=${root}/falco.sock > /host${root}/pod-init.json
sed -E -i.orig '/"ignore_missing" : true,/d' /host${root}/pod-init.json
if [[ -z $(grep pod-init-config /host${config}) ]]; then
echo "* Updating the runsc config file /host${config}..."
echo " pod-init-config = \"${root}/pod-init.json\"" >> /host${config}
fi
# Endpoint inside the container is different from outside, add
# "/host" to the endpoint path inside the container.
echo "* Setting the updated Falco configuration to /gvisor-config/pod-init.json..."
sed 's/"endpoint" : "\/run/"endpoint" : "\/host\/run/' /host${root}/pod-init.json > /gvisor-config/pod-init.json
else
echo "* File /host${config} not found."
echo "* Please make sure that the gVisor is configured in the current node and/or the runsc root and config file path are correct"
exit -1
fi
echo "* Falco+gVisor correctly configured."
exit 0
volumeMounts:
- mountPath: /host{{ .Values.driver.gvisor.runsc.path }}
name: runsc-path
readOnly: true
- mountPath: /host{{ .Values.driver.gvisor.runsc.root }}
name: runsc-root
- mountPath: /host{{ .Values.driver.gvisor.runsc.config }}
name: runsc-config
- mountPath: /gvisor-config
name: falco-gvisor-config
{{- end -}}
{{- define "falcoctl.initContainer" -}}
- name: falcoctl-artifact-install
image: {{ include "falcoctl.image" . }}
imagePullPolicy: {{ .Values.falcoctl.image.pullPolicy }}
args:
- artifact
- install
{{- with .Values.falcoctl.artifact.install.args }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.falcoctl.artifact.install.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
securityContext:
{{- if .Values.falcoctl.artifact.install.securityContext }}
{{- toYaml .Values.falcoctl.artifact.install.securityContext | nindent 4 }}
{{- end }}
volumeMounts:
- mountPath: {{ .Values.falcoctl.config.artifact.install.pluginsDir }}
name: plugins-install-dir
- mountPath: {{ .Values.falcoctl.config.artifact.install.rulesfilesDir }}
name: rulesfiles-install-dir
- mountPath: /etc/falcoctl
name: falcoctl-config-volume
{{- with .Values.falcoctl.artifact.install.mounts.volumeMounts }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.falcoctl.artifact.install.env }}
env:
{{- include "falco.renderTemplate" ( dict "value" .Values.falcoctl.artifact.install.env "context" $) | nindent 4 }}
{{- end }}
{{- end -}}
{{- define "falcoctl.sidecar" -}}
- name: falcoctl-artifact-follow
image: {{ include "falcoctl.image" . }}
imagePullPolicy: {{ .Values.falcoctl.image.pullPolicy }}
args:
- artifact
- follow
{{- with .Values.falcoctl.artifact.follow.args }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.falcoctl.artifact.follow.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
securityContext:
{{- if .Values.falcoctl.artifact.follow.securityContext }}
{{- toYaml .Values.falcoctl.artifact.follow.securityContext | nindent 4 }}
{{- end }}
volumeMounts:
- mountPath: {{ .Values.falcoctl.config.artifact.follow.pluginsDir }}
name: plugins-install-dir
- mountPath: {{ .Values.falcoctl.config.artifact.follow.rulesfilesDir }}
name: rulesfiles-install-dir
- mountPath: /etc/falcoctl
name: falcoctl-config-volume
{{- with .Values.falcoctl.artifact.follow.mounts.volumeMounts }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.falcoctl.artifact.follow.env }}
env:
{{- include "falco.renderTemplate" ( dict "value" .Values.falcoctl.artifact.follow.env "context" $) | nindent 4 }}
{{- end }}
{{- end -}}
{{/*
Build configuration for k8smeta plugin and update the relevant variables.
* The configuration that needs to be built up is the initconfig section:
init_config:
collectorPort: 0
collectorHostname: ""
nodeName: ""
The falco chart exposes this configuriotino through two variable:
* collectors.kubenetetes.collectorHostname;
* collectors.kubernetes.collectorPort;
If those two variable are not set, then we take those values from the k8smetacollector subchart.
The hostname is built using the name of the service that exposes the collector endpoints and the
port is directly taken form the service's port that exposes the gRPC endpoint.
We reuse the helpers from the k8smetacollector subchart, by passing down the variables. There is a
hardcoded values that is the chart name for the k8s-metacollector chart.
* The falcoctl configuration is updated to allow plugin artifacts to be installed. The refs in the install
section are updated by adding the reference for the k8s meta plugin that needs to be installed.
NOTE: It seems that the named templates run during the validation process. And then again during the
render fase. In our case we are setting global variable that persist during the various phases.
We need to make the helper idempotent.
*/}}
{{- define "k8smeta.configuration" -}}
{{- if and .Values.collectors.kubernetes.enabled .Values.driver.enabled -}}
{{- $hostname := "" -}}
{{- if .Values.collectors.kubernetes.collectorHostname -}}
{{- $hostname = .Values.collectors.kubernetes.collectorHostname -}}
{{- else -}}
{{- $collectorContext := (dict "Release" .Release "Values" (index .Values "k8s-metacollector") "Chart" (dict "Name" "k8s-metacollector")) -}}
{{- $hostname = printf "%s.%s.svc" (include "k8s-metacollector.fullname" $collectorContext) (include "k8s-metacollector.namespace" $collectorContext) -}}
{{- end -}}
{{- $hasConfig := false -}}
{{- range .Values.falco.plugins -}}
{{- if eq (get . "name") "k8smeta" -}}
{{ $hasConfig = true -}}
{{- end -}}
{{- end -}}
{{- if not $hasConfig -}}
{{- $listenPort := default (index .Values "k8s-metacollector" "service" "ports" "broker-grpc" "port") .Values.collectors.kubernetes.collectorPort -}}
{{- $listenPort = int $listenPort -}}
{{- $pluginConfig := dict "name" "k8smeta" "library_path" "libk8smeta.so" "init_config" (dict "collectorHostname" $hostname "collectorPort" $listenPort "nodeName" "${FALCO_K8S_NODE_NAME}" "verbosity" .Values.collectors.kubernetes.verbosity "hostProc" .Values.collectors.kubernetes.hostProc) -}}
{{- $newConfig := append .Values.falco.plugins $pluginConfig -}}
{{- $_ := set .Values.falco "plugins" ($newConfig | uniq) -}}
{{- $loadedPlugins := append .Values.falco.load_plugins "k8smeta" -}}
{{- $_ = set .Values.falco "load_plugins" ($loadedPlugins | uniq) -}}
{{- end -}}
{{- $_ := set .Values.falcoctl.config.artifact.install "refs" ((append .Values.falcoctl.config.artifact.install.refs .Values.collectors.kubernetes.pluginRef) | uniq)}}
{{- $_ = set .Values.falcoctl.config.artifact "allowedTypes" ((append .Values.falcoctl.config.artifact.allowedTypes "plugin") | uniq)}}
{{- end -}}
{{- end -}}
{{/*
Based on the user input it populates the driver configuration in the falco config map.
*/}}
{{- define "falco.engineConfiguration" -}}
{{- if .Values.driver.enabled -}}
{{- $supportedDrivers := list "kmod" "ebpf" "modern_ebpf" "gvisor" "auto" -}}
{{- $aliasDrivers := list "module" "modern-bpf" -}}
{{- if and (not (has .Values.driver.kind $supportedDrivers)) (not (has .Values.driver.kind $aliasDrivers)) -}}
{{- fail (printf "unsupported driver kind: \"%s\". Supported drivers %s, alias %s" .Values.driver.kind $supportedDrivers $aliasDrivers) -}}
{{- end -}}
{{- if or (eq .Values.driver.kind "kmod") (eq .Values.driver.kind "module") -}}
{{- $kmodConfig := dict "kind" "kmod" "kmod" (dict "buf_size_preset" .Values.driver.kmod.bufSizePreset "drop_failed_exit" .Values.driver.kmod.dropFailedExit) -}}
{{- $_ := set .Values.falco "engine" $kmodConfig -}}
{{- else if eq .Values.driver.kind "ebpf" -}}
{{- $ebpfConfig := dict "kind" "ebpf" "ebpf" (dict "buf_size_preset" .Values.driver.ebpf.bufSizePreset "drop_failed_exit" .Values.driver.ebpf.dropFailedExit "probe" .Values.driver.ebpf.path) -}}
{{- $_ := set .Values.falco "engine" $ebpfConfig -}}
{{- else if or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf") -}}
{{- $ebpfConfig := dict "kind" "modern_ebpf" "modern_ebpf" (dict "buf_size_preset" .Values.driver.modernEbpf.bufSizePreset "drop_failed_exit" .Values.driver.modernEbpf.dropFailedExit "cpus_for_each_buffer" .Values.driver.modernEbpf.cpusForEachBuffer) -}}
{{- $_ := set .Values.falco "engine" $ebpfConfig -}}
{{- else if eq .Values.driver.kind "gvisor" -}}
{{- $root := printf "/host%s/k8s.io" .Values.driver.gvisor.runsc.root -}}
{{- $gvisorConfig := dict "kind" "gvisor" "gvisor" (dict "config" "/gvisor-config/pod-init.json" "root" $root) -}}
{{- $_ := set .Values.falco "engine" $gvisorConfig -}}
{{- else if eq .Values.driver.kind "auto" -}}
{{- $engineConfig := dict "kind" "modern_ebpf" "kmod" (dict "buf_size_preset" .Values.driver.kmod.bufSizePreset "drop_failed_exit" .Values.driver.kmod.dropFailedExit) "ebpf" (dict "buf_size_preset" .Values.driver.ebpf.bufSizePreset "drop_failed_exit" .Values.driver.ebpf.dropFailedExit "probe" .Values.driver.ebpf.path) "modern_ebpf" (dict "buf_size_preset" .Values.driver.modernEbpf.bufSizePreset "drop_failed_exit" .Values.driver.modernEbpf.dropFailedExit "cpus_for_each_buffer" .Values.driver.modernEbpf.cpusForEachBuffer) -}}
{{- $_ := set .Values.falco "engine" $engineConfig -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
It returns "true" if the driver loader has to be enabled, otherwise false.
*/}}
{{- define "driverLoader.enabled" -}}
{{- if or (eq .Values.driver.kind "modern_ebpf") (eq .Values.driver.kind "modern-bpf") (eq .Values.driver.kind "gvisor") (not .Values.driver.enabled) (not .Values.driver.loader.enabled) -}}
false
{{- else -}}
true
{{- end -}}
{{- end -}}
{{/*
Based on the user input it populates the metrics configuration in the falco config map.
*/}}
{{- define "falco.metricsConfiguration" -}}
{{- if .Values.metrics.enabled -}}
{{- $_ := set .Values.falco.webserver "prometheus_metrics_enabled" true -}}
{{- $_ = set .Values.falco.webserver "enabled" true -}}
{{- $_ = set .Values.falco.metrics "enabled" .Values.metrics.enabled -}}
{{- $_ = set .Values.falco.metrics "interval" .Values.metrics.interval -}}
{{- $_ = set .Values.falco.metrics "output_rule" .Values.metrics.outputRule -}}
{{- $_ = set .Values.falco.metrics "rules_counters_enabled" .Values.metrics.rulesCountersEnabled -}}
{{- $_ = set .Values.falco.metrics "resource_utilization_enabled" .Values.metrics.resourceUtilizationEnabled -}}
{{- $_ = set .Values.falco.metrics "state_counters_enabled" .Values.metrics.stateCountersEnabled -}}
{{- $_ = set .Values.falco.metrics "kernel_event_counters_enabled" .Values.metrics.kernelEventCountersEnabled -}}
{{- $_ = set .Values.falco.metrics "kernel_event_counters_per_cpu_enabled" .Values.metrics.kernelEventCountersPerCPUEnabled -}}
{{- $_ = set .Values.falco.metrics "libbpf_stats_enabled" .Values.metrics.libbpfStatsEnabled -}}
{{- $_ = set .Values.falco.metrics "convert_memory_to_mb" .Values.metrics.convertMemoryToMB -}}
{{- $_ = set .Values.falco.metrics "include_empty_values" .Values.metrics.includeEmptyValues -}}
{{- end -}}
{{- end -}}
{{/*
This helper is used to add the container plugin to the falco configuration.
*/}}
{{ define "falco.containerPlugin" -}}
{{ if and .Values.driver.enabled .Values.collectors.enabled -}}
{{ if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{ else if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled .Values.collectors.containerEngine.enabled -}}
{{ if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled -}}
{{ $_ := set .Values.collectors.containerEngine.engines.docker "enabled" .Values.collectors.docker.enabled -}}
{{ $_ = set .Values.collectors.containerEngine.engines.docker "sockets" (list .Values.collectors.docker.socket) -}}
{{ $_ = set .Values.collectors.containerEngine.engines.containerd "enabled" .Values.collectors.containerd.enabled -}}
{{ $_ = set .Values.collectors.containerEngine.engines.containerd "sockets" (list .Values.collectors.containerd.socket) -}}
{{ $_ = set .Values.collectors.containerEngine.engines.cri "enabled" .Values.collectors.crio.enabled -}}
{{ $_ = set .Values.collectors.containerEngine.engines.cri "sockets" (list .Values.collectors.crio.socket) -}}
{{ $_ = set .Values.collectors.containerEngine.engines.podman "enabled" false -}}
{{ $_ = set .Values.collectors.containerEngine.engines.lxc "enabled" false -}}
{{ $_ = set .Values.collectors.containerEngine.engines.libvirt_lxc "enabled" false -}}
{{ $_ = set .Values.collectors.containerEngine.engines.bpm "enabled" false -}}
{{ end -}}
{{ $hasConfig := false -}}
{{ range .Values.falco.plugins -}}
{{ if eq (get . "name") "container" -}}
{{ $hasConfig = true -}}
{{ end -}}
{{ end -}}
{{ if not $hasConfig -}}
{{ $pluginConfig := dict -}}
{{ with .Values.collectors.containerEngine -}}
{{ $pluginConfig = dict "name" "container" "library_path" "libcontainer.so" "init_config" (dict "label_max_len" .labelMaxLen "with_size" .withSize "hooks" .hooks "engines" .engines) -}}
{{ end -}}
{{ $newConfig := append .Values.falco.plugins $pluginConfig -}}
{{ $_ := set .Values.falco "plugins" ($newConfig | uniq) -}}
{{ $loadedPlugins := append .Values.falco.load_plugins "container" -}}
{{ $_ = set .Values.falco "load_plugins" ($loadedPlugins | uniq) -}}
{{ end -}}
{{ $_ := set .Values.falcoctl.config.artifact.install "refs" ((append .Values.falcoctl.config.artifact.install.refs .Values.collectors.containerEngine.pluginRef) | uniq) -}}
{{ $_ = set .Values.falcoctl.config.artifact "allowedTypes" ((append .Values.falcoctl.config.artifact.allowedTypes "plugin") | uniq) -}}
{{ end -}}
{{ end -}}
{{ end -}}
{{/*
This helper is used to add container plugin volumes to the falco pod.
*/}}
{{- define "falco.containerPluginVolumes" -}}
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{- end -}}
{{ $volumes := list -}}
{{- if .Values.collectors.docker.enabled -}}
{{ $volumes = append $volumes (dict "name" "docker-socket" "hostPath" (dict "path" .Values.collectors.docker.socket)) -}}
{{- end -}}
{{- if .Values.collectors.crio.enabled -}}
{{ $volumes = append $volumes (dict "name" "crio-socket" "hostPath" (dict "path" .Values.collectors.crio.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerd.enabled -}}
{{ $volumes = append $volumes (dict "name" "containerd-socket" "hostPath" (dict "path" .Values.collectors.containerd.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerEngine.enabled -}}
{{- $seenPaths := dict -}}
{{- $idx := 0 -}}
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
{{- range $engineName := $engineOrder -}}
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
{{- if and $val $val.enabled -}}
{{- range $index, $socket := $val.sockets -}}
{{- $mountPath := print "/host" $socket -}}
{{- if not (hasKey $seenPaths $mountPath) -}}
{{ $volumes = append $volumes (dict "name" (printf "container-engine-socket-%d" $idx) "hostPath" (dict "path" $socket)) -}}
{{- $idx = add $idx 1 -}}
{{- $_ := set $seenPaths $mountPath true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if gt (len $volumes) 0 -}}
{{ toYaml $volumes -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
This helper is used to add container plugin volumeMounts to the falco pod.
*/}}
{{- define "falco.containerPluginVolumeMounts" -}}
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{- end -}}
{{ $volumeMounts := list -}}
{{- if .Values.collectors.docker.enabled -}}
{{ $volumeMounts = append $volumeMounts (dict "name" "docker-socket" "mountPath" (print "/host" .Values.collectors.docker.socket)) -}}
{{- end -}}
{{- if .Values.collectors.crio.enabled -}}
{{ $volumeMounts = append $volumeMounts (dict "name" "crio-socket" "mountPath" (print "/host" .Values.collectors.crio.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerd.enabled -}}
{{ $volumeMounts = append $volumeMounts (dict "name" "containerd-socket" "mountPath" (print "/host" .Values.collectors.containerd.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerEngine.enabled -}}
{{- $seenPaths := dict -}}
{{- $idx := 0 -}}
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
{{- range $engineName := $engineOrder -}}
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
{{- if and $val $val.enabled -}}
{{- range $index, $socket := $val.sockets -}}
{{- $mountPath := print "/host" $socket -}}
{{- if not (hasKey $seenPaths $mountPath) -}}
{{ $volumeMounts = append $volumeMounts (dict "name" (printf "container-engine-socket-%d" $idx) "mountPath" $mountPath) -}}
{{- $idx = add $idx 1 -}}
{{- $_ := set $seenPaths $mountPath true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if gt (len $volumeMounts) 0 -}}
{{ toYaml ($volumeMounts) }}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if and .Values.certs.client.key .Values.certs.client.crt .Values.certs.ca.crt }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco.fullname" . }}-client-certs
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco.labels" $ | nindent 4 }}
type: Opaque
data:
{{ $key := .Values.certs.client.key }}
client.key: {{ $key | b64enc | quote }}
{{ $crt := .Values.certs.client.crt }}
client.crt: {{ $crt | b64enc | quote }}
falcoclient.pem: {{ print $key $crt | b64enc | quote }}
ca.crt: {{ .Values.certs.ca.crt | b64enc | quote }}
ca.pem: {{ .Values.certs.ca.crt | b64enc | quote }}
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.falco.name }}
{{ if .Values.grafana.dashboards.configMaps.falco.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.falco.namespace }}
{{- else -}}
namespace: {{ include "falco.namespace" . }}
{{- end }}
labels:
{{- include "falco.labels" . | nindent 4 }}
grafana_dashboard: "1"
{{- if .Values.grafana.dashboards.configMaps.falco.folder }}
annotations:
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.falco.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.falco.folder }}
{{- end }}
data:
falco-dashboard.json: |-
{{- .Files.Get "dashboards/falco-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,14 +0,0 @@
{{- if or .Values.falcoctl.artifact.install.enabled .Values.falcoctl.artifact.follow.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falco.fullname" . }}-falcoctl
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
data:
falcoctl.yaml: |-
{{- include "k8smeta.configuration" . -}}
{{- include "falco.containerPlugin" . -}}
{{- toYaml .Values.falcoctl.config | nindent 4 }}
{{- end }}

View File

@ -1,17 +0,0 @@
{{- if and .Values.rbac.create (eq .Values.driver.kind "auto")}}
kind: Role
apiVersion: {{ include "rbac.apiVersion" . }}
metadata:
name: {{ include "falco.fullname" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- update
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if and .Values.metrics.enabled .Values.metrics.service.create }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "falco.fullname" . }}-metrics
namespace: {{ include "falco.namespace" . }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- with .Values.metrics.service.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
type: "falco-metrics"
{{- with .Values.metrics.service.annotations }}
annotations:
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.metrics.service.type }}
ports:
- port: {{ .Values.metrics.service.ports.metrics.port }}
targetPort: {{ .Values.metrics.service.ports.metrics.targetPort }}
protocol: {{ .Values.metrics.service.ports.metrics.protocol }}
name: "metrics"
selector:
{{- include "falco.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -1,51 +0,0 @@
{{- if .Values.serviceMonitor.create }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "falco.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ tpl .Values.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ include "falco.namespace" . }}
{{- end }}
labels:
{{- include "falco.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: "{{ .Values.serviceMonitor.endpointPort }}"
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "falco.selectorLabels" . | nindent 6 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 6 }}
{{- end }}
type: "falco-metrics"
namespaceSelector:
matchNames:
- {{ include "falco.namespace" . }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,35 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"gopkg.in/yaml.v3"
)
// ChartInfo returns chart's information.
func ChartInfo(t *testing.T, chartPath string) (map[string]interface{}, error) {
// Get chart info.
output, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "show", "chart", chartPath)
if err != nil {
return nil, err
}
chartInfo := map[string]interface{}{}
err = yaml.Unmarshal([]byte(output), &chartInfo)
return chartInfo, err
}

View File

@ -1,29 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
const (
// ReleaseName is the name of the release we expect in the rendered resources.
ReleaseName = "rendered-resources"
// PatternK8sMetacollectorFiles is the regex pattern we expect to find in the rendered resources.
PatternK8sMetacollectorFiles = `# Source: falco/charts/k8s-metacollector/templates/([^\n]+)`
// K8sMetaPluginName is the name of the k8smeta plugin we expect in the falco configuration.
K8sMetaPluginName = "k8smeta"
// ContainerPluginName name of the container plugin we expect in the falco configuration.
ContainerPluginName = "container"
// ChartPath is the path to the chart.
ChartPath = "../../.."
)

View File

@ -1,13 +0,0 @@
package containerPlugin
var volumeNames = []string{
"docker-socket",
"containerd-socket",
"crio-socket",
"container-engine-socket-0",
"container-engine-socket-1",
"container-engine-socket-2",
"container-engine-socket-3",
"container-engine-socket-4",
"container-engine-socket-5",
}

View File

@ -1,767 +0,0 @@
package containerPlugin
import (
"path/filepath"
"slices"
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
)
func TestContainerPluginConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
// Check engines configurations.
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok, "checking if engines section exists")
require.Len(t, engines, 7, "checking number of engines")
var engineConfig ContainerEngineConfig
// Unmarshal the engines configuration.
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check the default values for each engine.
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
require.True(t, engineConfig.LXC.Enabled)
require.True(t, engineConfig.LibvirtLXC.Enabled)
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "changeDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/custom/docker.sock"}, engineConfig.Docker.Sockets)
},
},
{
name: "changeCriSocket",
values: map[string]string{
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/cri.sock"}, engineConfig.CRI.Sockets)
},
},
{
name: "disableDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.False(t, engineConfig.Docker.Enabled)
},
},
{
name: "disableCriSocket",
values: map[string]string{
"collectors.containerEngine.engines.cri.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.False(t, engineConfig.CRI.Enabled)
},
},
{
name: "changeContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/custom/containerd.sock"}, engineConfig.Containerd.Sockets)
},
},
{
name: "disableContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.containerd.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
require.False(t, engineConfig.Containerd.Enabled)
},
},
{
name: "defaultContainerEngineConfig",
values: map[string]string{},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
require.Equal(t, float64(100), initConfigMap["label_max_len"])
require.False(t, initConfigMap["with_size"].(bool))
hooks := initConfigMap["hooks"].([]interface{})
require.Len(t, hooks, 1)
require.Contains(t, hooks, "create")
engines := initConfigMap["engines"].(map[string]interface{})
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check default engine configurations
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
require.True(t, engineConfig.LXC.Enabled)
require.True(t, engineConfig.LibvirtLXC.Enabled)
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "customContainerEngineConfig",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.labelMaxLen": "200",
"collectors.containerEngine.withSize": "true",
"collectors.containerEngine.hooks[0]": "create",
"collectors.containerEngine.hooks[1]": "start",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.lxc.enabled": "false",
"collectors.containerEngine.engines.libvirt_lxc.enabled": "false",
"collectors.containerEngine.engines.bpm.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
require.Equal(t, float64(200), initConfigMap["label_max_len"])
require.True(t, initConfigMap["with_size"].(bool))
hooks := initConfigMap["hooks"].([]interface{})
require.Len(t, hooks, 2)
require.Contains(t, hooks, "create")
require.Contains(t, hooks, "start")
engines := initConfigMap["engines"].(map[string]interface{})
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check custom engine configurations
require.False(t, engineConfig.Docker.Enabled)
require.False(t, engineConfig.Podman.Enabled)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/custom/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/crio.sock"}, engineConfig.CRI.Sockets)
require.False(t, engineConfig.LXC.Enabled)
require.False(t, engineConfig.LibvirtLXC.Enabled)
require.False(t, engineConfig.BPM.Enabled)
},
},
{
name: "customDockerEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check Docker engine configuration
require.False(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/custom/docker.sock", "/custom/docker.sock2"}, engineConfig.Docker.Sockets)
},
},
{
name: "customContainerdEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.containerd.sockets[1]": "/custom/containerd.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check Containerd engine configuration
require.False(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/custom/containerd.sock", "/custom/containerd.sock2"}, engineConfig.Containerd.Sockets)
},
},
{
name: "customPodmanEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "true",
"collectors.containerEngine.engines.podman.sockets[0]": "/custom/podman.sock",
"collectors.containerEngine.engines.podman.sockets[1]": "/custom/podman.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check Podman engine configuration
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/custom/podman.sock", "/custom/podman.sock2"}, engineConfig.Podman.Sockets)
},
},
{
name: "customCRIEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/cri.sock2",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check CRI engine configuration
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/cri.sock", "/custom/cri.sock2"}, engineConfig.CRI.Sockets)
},
},
{
name: "customLXCEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.lxc.enabled": "true",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check LXC engine configuration
require.True(t, engineConfig.LXC.Enabled)
},
},
{
name: "customLibvirtLXCEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.libvirt_lxc.enabled": "true",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check LibvirtLXC engine configuration
require.True(t, engineConfig.LibvirtLXC.Enabled)
},
},
{
name: "customBPMEngineConfigInContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.bpm.enabled": "true",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"].(map[string]interface{})
require.True(t, ok)
var engineConfig ContainerEngineConfig
data, err := yaml.Marshal(engines)
require.NoError(t, err)
err = yaml.Unmarshal(data, &engineConfig)
require.NoError(t, err)
// Check BPM engine configuration
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "allCollectorsDisabled",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "false",
},
expected: func(t *testing.T, config any) {
// When config is nil, it means the plugin wasn't found in the configuration
require.Nil(t, config, "container plugin should not be present in configuration when all collectors are disabled")
// If somehow the config exists (which it shouldn't), verify there are no engine configurations
if config != nil {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
if ok {
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"]
if ok {
engineMap := engines.(map[string]interface{})
require.Empty(t, engineMap, "engines configuration should be empty when all collectors are disabled")
}
}
}
},
},
{
name: "allCollectorsDisabledTopLevel",
values: map[string]string{
"collectors.enabled": "false",
},
expected: func(t *testing.T, config any) {
// When config is nil, it means the plugin wasn't found in the configuration
require.Nil(t, config, "container plugin should not be present in configuration when all collectors are disabled")
// If somehow the config exists (which it shouldn't), verify there are no engine configurations
if config != nil {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
if ok {
initConfigMap := initConfig.(map[string]interface{})
engines, ok := initConfigMap["engines"]
if ok {
engineMap := engines.(map[string]interface{})
require.Empty(t, engineMap, "engines configuration should be empty when all collectors are disabled")
}
}
}
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the chart with the given options.
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
// Unmarshal the output into a ConfigMap object.
helm.UnmarshalK8SYaml(t, output, &cm)
// Unmarshal the data field of the ConfigMap into a map.
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
// Extract the container plugin configuration.
plugins, ok := config["plugins"]
require.True(t, ok, "checking if plugins section exists")
pluginsList := plugins.([]interface{})
found := false
// Get the container plugin configuration.
for _, plugin := range pluginsList {
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.ContainerPluginName {
testCase.expected(t, plugin)
found = true
}
}
if found {
// Check that the plugin has been added to the ones that are enabled.
loadPlugins := config["load_plugins"]
require.True(t, slices.Contains(loadPlugins.([]interface{}), unit.ContainerPluginName))
} else {
testCase.expected(t, nil)
loadPlugins := config["load_plugins"]
require.False(t, slices.Contains(loadPlugins.([]interface{}), unit.ContainerPluginName))
}
})
}
}
func TestInvalidCollectorConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expectedErr string
}{
{
name: "dockerAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectoars.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
{
name: "containerdAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "true",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
{
name: "crioAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectoars.containerd.enabled": "false",
"collectors.crio.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Attempt to render the template, expect an error
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
})
}
}
// Test that the helper does not overwrite user's configuration.
// And that the container reference is added to the configmap.
func TestFalcoctlRefs(t *testing.T) {
t.Parallel()
refShouldBeSet := func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test allowed types.
allowedTypes := artifactConfig["allowedTypes"]
require.Len(t, allowedTypes, 2)
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 2)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
refShouldNotBeSet := func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test allowed types.
allowedTypes := artifactConfig["allowedTypes"]
require.Len(t, allowedTypes, 2)
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 1)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.False(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
refShouldBeSet,
},
{
"setPluginConfiguration",
map[string]string{
"collectors.enabled": "false",
},
refShouldNotBeSet,
},
{
"driver disabled",
map[string]string{
"driver.enabled": "false",
},
refShouldNotBeSet,
},
}
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/falcoctl-configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falcoctl.yaml"], &config)
testCase.expected(t, config)
})
}
}
type ContainerEngineSocket struct {
Enabled bool `yaml:"enabled"`
Sockets []string `yaml:"sockets,omitempty"`
}
type ContainerEngineConfig struct {
Docker ContainerEngineSocket `yaml:"docker"`
Podman ContainerEngineSocket `yaml:"podman"`
Containerd ContainerEngineSocket `yaml:"containerd"`
CRI ContainerEngineSocket `yaml:"cri"`
LXC ContainerEngineSocket `yaml:"lxc"`
LibvirtLXC ContainerEngineSocket `yaml:"libvirt_lxc"`
BPM ContainerEngineSocket `yaml:"bpm"`
}

View File

@ -1,310 +0,0 @@
package containerPlugin
import (
"path/filepath"
"slices"
"testing"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
func TestContainerPluginVolumeMounts(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, volumeMounts []corev1.VolumeMount)
}{
{
name: "defaultValues",
values: nil,
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 6)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
require.Equal(t, "/host/run/podman/podman.sock", volumeMounts[1].MountPath)
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[3].MountPath)
require.Equal(t, "container-engine-socket-4", volumeMounts[4].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[4].MountPath)
require.Equal(t, "container-engine-socket-5", volumeMounts[5].Name)
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[5].MountPath)
},
},
{
name: "defaultDockerVolumeMount",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
},
},
{
name: "customDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/docker.sock", volumeMounts[0].MountPath)
},
},
{
name: "defaultCriVolumeMount",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 4)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[0].MountPath)
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[1].MountPath)
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[3].MountPath)
},
},
{
name: "customCriSocket",
values: map[string]string{
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/crio.sock", volumeMounts[0].MountPath)
},
},
{
name: "defaultContainerdVolumeMount",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[0].MountPath)
},
},
{
name: "customContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/containerd.sock", volumeMounts[0].MountPath)
},
},
{
name: "ContainerEnginesDefaultValues",
values: map[string]string{},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 6)
// dockerV := findVolumeMount("docker-socket-0", volumeMounts)
// require.NotNil(t, dockerV)
// require.Equal(t, "/host/var/run/docker.sock", dockerV.MountPath)
// podmanV := findVolumeMount("podman-socket-0", volumeMounts)
// require.NotNil(t, podmanV)
// require.Equal(t, "/host/run/podman/podman.sock", podmanV.MountPath)
// containerdV := findVolumeMount("containerd-socket-0", volumeMounts)
// require.NotNil(t, containerdV)
// require.Equal(t, "/host/run/host-containerd/containerd.sock", containerdV.MountPath)
// crioV0 := findVolumeMount("cri-socket-0", volumeMounts)
// require.NotNil(t, crioV0)
// require.Equal(t, "/host/run/containerd/containerd.sock", crioV0.MountPath)
// crioV1 := findVolumeMount("cri-socket-1", volumeMounts)
// require.NotNil(t, crioV1)
// require.Equal(t, "/host/run/crio/crio.sock", crioV1.MountPath)
// crioV2 := findVolumeMount("cri-socket-2", volumeMounts)
// require.NotNil(t, crioV2)
// require.Equal(t, "/host/run/k3s/containerd/containerd.sock", crioV2.MountPath)
},
},
{
name: "ContainerEnginesDockerWithMultipleSockets",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 2)
dockerV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
require.NotNil(t, dockerV0)
require.Equal(t, "/host/var/run/docker.sock", dockerV0.MountPath)
dockerV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
require.NotNil(t, dockerV1)
require.Equal(t, "/host/custom/docker.sock", dockerV1.MountPath)
},
},
{
name: "ContainerEnginesCrioWithMultipleSockets",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/run/crio/crio.sock",
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/crio.sock",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 2)
crioV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
require.NotNil(t, crioV0)
require.Equal(t, "/host/run/crio/crio.sock", crioV0.MountPath)
crioV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
require.NotNil(t, crioV1)
require.Equal(t, "/host/custom/crio.sock", crioV1.MountPath)
},
},
{
name: "noVolumeMountsWhenCollectorsDisabled",
values: map[string]string{
"collectors.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 0)
},
},
{
name: "noVolumeMountsWhenDriverDisabled",
values: map[string]string{
"driver.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 0)
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Render the template
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
// Parse the YAML output
var daemonset appsv1.DaemonSet
helm.UnmarshalK8SYaml(t, output, &daemonset)
// Find volumeMounts in the falco container
var pluginVolumeMounts []corev1.VolumeMount
for _, container := range daemonset.Spec.Template.Spec.Containers {
if container.Name == "falco" {
for _, volumeMount := range container.VolumeMounts {
if slices.Contains(volumeNames, volumeMount.Name) {
pluginVolumeMounts = append(pluginVolumeMounts, volumeMount)
}
}
}
}
// Run the test case's assertions
tc.expected(t, pluginVolumeMounts)
})
}
}
func TestInvalidVolumeMountConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expectedErr string
}{
{
name: "bothOldAndNewConfigEnabled",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Attempt to render the template, expect an error
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
})
}
}
func findVolumeMount(name string, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount {
for _, v := range volumeMounts {
if v.Name == name {
return &v
}
}
return nil
}

View File

@ -1,373 +0,0 @@
package containerPlugin
import (
"path/filepath"
"slices"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
)
func TestContainerPluginVolumes(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, volumes []corev1.Volume)
}{
{
name: "defaultValues",
values: nil,
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 6)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
},
},
{
name: "defaultDockerVolume",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
},
},
{
name: "customDockerSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/docker.sock", volumes[0].HostPath.Path)
},
},
{
name: "defaultCriVolume",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 4)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[3].HostPath.Path)
},
},
{
name: "customCrioSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/crio.sock", volumes[0].HostPath.Path)
},
},
{
name: "defaultContainerdVolume",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[0].HostPath.Path)
},
},
{
name: "customContainerdSocket",
values: map[string]string{
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/containerd.sock", volumes[0].HostPath.Path)
},
},
{
name: "ContainerEnginesDefaultValues",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 6)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
},
},
{
name: "ContainerEnginesDockerWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/docker.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesCrioWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/run/crio/crio.sock",
"collectors.containerEngine.engines.cri.sockets[1]": "/custom/crio.sock",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/crio.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesPodmanWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "true",
"collectors.containerEngine.engines.podman.sockets[0]": "/run/podman/podman.sock",
"collectors.containerEngine.engines.podman.sockets[1]": "/custom/podman.sock",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/podman.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesContainerdWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/run/containerd/containerd.sock",
"collectors.containerEngine.engines.containerd.sockets[1]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/containerd.sock", volumes[1].HostPath.Path)
},
},
{
name: "ContainerEnginesMultipleWithCustomSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker/socket.sock",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/var/custom/crio.sock",
"collectors.containerEngine.engines.podman.enabled": "true",
"collectors.containerEngine.engines.podman.sockets[0]": "/run/podman/podman.sock",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 4)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/docker/socket.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/var/custom/crio.sock", volumes[3].HostPath.Path)
},
},
{
name: "noVolumesWhenCollectorsDisabled",
values: map[string]string{
"collectors.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 0)
},
},
{
name: "noVolumesWhenDriverDisabled",
values: map[string]string{
"driver.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 0)
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Render the template
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
// Parse the YAML output
var daemonset appsv1.DaemonSet
helm.UnmarshalK8SYaml(t, output, &daemonset)
// Find volumes that match our container plugin pattern
var pluginVolumes []corev1.Volume
for _, volume := range daemonset.Spec.Template.Spec.Volumes {
// Check if the volume is for container sockets
if volume.HostPath != nil && slices.Contains(volumeNames, volume.Name) {
pluginVolumes = append(pluginVolumes, volume)
}
}
// Run the test case's assertions
tc.expected(t, pluginVolumes)
})
}
}
func TestInvalidVolumeConfiguration(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expectedErr string
}{
{
name: "bothOldAndNewConfigEnabled",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{
SetValues: tc.values,
}
// Attempt to render the template, expect an error
_, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
})
}
}
func findVolume(name string, volumes []corev1.Volume) *corev1.Volume {
for _, v := range volumes {
if v.Name == name {
return &v
}
}
return nil
}

View File

@ -1,17 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package unit contains the unit tests for the Falco chart.
package unit

View File

@ -1,334 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
func TestDriverConfigInFalcoConfig(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
func(t *testing.T, config any) {
require.Len(t, config, 4, "should have four items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kind=kmod",
map[string]string{
"driver.kind": "kmod",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "kmod", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kind=module(alias)",
map[string]string{
"driver.kind": "module",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "kmod", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kmod=config",
map[string]string{
"driver.kmod.bufSizePreset": "6",
"driver.kmod.dropFailedExit": "true",
"driver.kind": "module",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "kmod", kind)
require.Equal(t, float64(6), bufSizePreset)
require.True(t, dropFailedExit)
},
},
{
"ebpf=config",
map[string]string{
"driver.kind": "ebpf",
"driver.ebpf.bufSizePreset": "6",
"driver.ebpf.dropFailedExit": "true",
"driver.ebpf.path": "testing/Path/ebpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "ebpf", kind)
require.Equal(t, "testing/Path/ebpf", path)
require.Equal(t, float64(6), bufSizePreset)
require.True(t, dropFailedExit)
},
},
{
"kind=ebpf",
map[string]string{
"driver.kind": "ebpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "ebpf", kind)
require.Equal(t, "${HOME}/.falco/falco-bpf.o", path)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
},
},
{
"kind=modern_ebpf",
map[string]string{
"driver.kind": "modern_ebpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.Equal(t, float64(2), cpusForEachBuffer)
require.False(t, dropFailedExit)
},
},
{
"kind=modern-bpf(alias)",
map[string]string{
"driver.kind": "modern-bpf",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.Equal(t, float64(2), cpusForEachBuffer)
require.False(t, dropFailedExit)
},
},
{
"modernEbpf=config",
map[string]string{
"driver.kind": "modern-bpf",
"driver.modernEbpf.bufSizePreset": "6",
"driver.modernEbpf.dropFailedExit": "true",
"driver.modernEbpf.cpusForEachBuffer": "8",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(6), bufSizePreset)
require.Equal(t, float64(8), cpusForEachBuffer)
require.True(t, dropFailedExit)
},
},
{
"kind=gvisor",
map[string]string{
"driver.kind": "gvisor",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, config, root, err := getGvisorConfig(config)
require.NoError(t, err)
require.Equal(t, "gvisor", kind)
require.Equal(t, "/gvisor-config/pod-init.json", config)
require.Equal(t, "/host/run/containerd/runsc/k8s.io", root)
},
},
{
"gvisor=config",
map[string]string{
"driver.kind": "gvisor",
"driver.gvisor.runsc.root": "/my/root/test",
},
func(t *testing.T, config any) {
require.Len(t, config, 2, "should have only two items")
kind, config, root, err := getGvisorConfig(config)
require.NoError(t, err)
require.Equal(t, "gvisor", kind)
require.Equal(t, "/gvisor-config/pod-init.json", config)
require.Equal(t, "/host/my/root/test/k8s.io", root)
},
},
{
"kind=auto",
map[string]string{
"driver.kind": "auto",
},
func(t *testing.T, config any) {
require.Len(t, config, 4, "should have four items")
// Check that configuration for kmod has been set.
kind, bufSizePreset, dropFailedExit, err := getKmodConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
// Check that configuration for ebpf has been set.
kind, path, bufSizePreset, dropFailedExit, err := getEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, "${HOME}/.falco/falco-bpf.o", path)
require.Equal(t, float64(4), bufSizePreset)
require.False(t, dropFailedExit)
// Check that configuration for modern_ebpf has been set.
kind, bufSizePreset, cpusForEachBuffer, dropFailedExit, err := getModernEbpfConfig(config)
require.NoError(t, err)
require.Equal(t, "modern_ebpf", kind)
require.Equal(t, float64(4), bufSizePreset)
require.Equal(t, float64(2), cpusForEachBuffer)
require.False(t, dropFailedExit)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
engine := config["engine"]
testCase.expected(t, engine)
})
}
}
func TestDriverConfigWithUnsupportedDriver(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
values := map[string]string{
"driver.kind": "notExisting",
}
options := &helm.Options{SetValues: values}
_, err = helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
require.Error(t, err)
require.True(t, strings.Contains(err.Error(),
"unsupported driver kind: \"notExisting\". Supported drivers [kmod ebpf modern_ebpf gvisor auto], alias [module modern-bpf]"))
}
func getKmodConfig(config interface{}) (kind string, bufSizePreset float64, dropFailedExit bool, err error) {
configMap, ok := config.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
kmod := configMap["kmod"].(map[string]interface{})
bufSizePreset = kmod["buf_size_preset"].(float64)
dropFailedExit = kmod["drop_failed_exit"].(bool)
return
}
func getEbpfConfig(config interface{}) (kind, path string, bufSizePreset float64, dropFailedExit bool, err error) {
configMap, ok := config.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
ebpf := configMap["ebpf"].(map[string]interface{})
bufSizePreset = ebpf["buf_size_preset"].(float64)
dropFailedExit = ebpf["drop_failed_exit"].(bool)
path = ebpf["probe"].(string)
return
}
func getModernEbpfConfig(config interface{}) (kind string, bufSizePreset, cpusForEachBuffer float64, dropFailedExit bool, err error) {
configMap, ok := config.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
modernEbpf := configMap["modern_ebpf"].(map[string]interface{})
bufSizePreset = modernEbpf["buf_size_preset"].(float64)
dropFailedExit = modernEbpf["drop_failed_exit"].(bool)
cpusForEachBuffer = modernEbpf["cpus_for_each_buffer"].(float64)
return
}
func getGvisorConfig(cfg interface{}) (kind, config, root string, err error) {
configMap, ok := cfg.(map[string]interface{})
if !ok {
err = fmt.Errorf("can't assert type of config")
return
}
kind = configMap["kind"].(string)
gvisor := configMap["gvisor"].(map[string]interface{})
config = gvisor["config"].(string)
root = gvisor["root"].(string)
return
}

View File

@ -1,266 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"testing"
v1 "k8s.io/api/core/v1"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
)
var (
namespaceEnvVar = v1.EnvVar{
Name: "FALCOCTL_DRIVER_CONFIG_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "",
FieldPath: "metadata.namespace",
},
}}
configmapEnvVar = v1.EnvVar{
Name: "FALCOCTL_DRIVER_CONFIG_CONFIGMAP",
Value: unit.ReleaseName + "-falco",
}
updateConfigMapEnvVar = v1.EnvVar{
Name: "FALCOCTL_DRIVER_CONFIG_UPDATE_FALCO",
Value: "false",
}
)
// TestDriverLoaderEnabled tests the helper that enables the driver loader based on the configuration.
func TestDriverLoaderEnabled(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, initContainer any)
}{
{
"defaultValues",
nil,
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "auto")
require.True(t, *container.SecurityContext.Privileged)
require.Contains(t, container.Env, namespaceEnvVar)
require.Contains(t, container.Env, configmapEnvVar)
require.NotContains(t, container.Env, updateConfigMapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=modern-bpf",
map[string]string{
"driver.kind": "modern-bpf",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.kind=modern_ebpf",
map[string]string{
"driver.kind": "modern_ebpf",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.kind=gvisor",
map[string]string{
"driver.kind": "gvisor",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.disabled",
map[string]string{
"driver.enabled": "false",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.loader.disabled",
map[string]string{
"driver.loader.enabled": "false",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
{
"driver.kind=kmod",
map[string]string{
"driver.kind": "kmod",
},
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "kmod")
require.True(t, *container.SecurityContext.Privileged)
require.NotContains(t, container.Env, namespaceEnvVar)
require.NotContains(t, container.Env, configmapEnvVar)
require.Contains(t, container.Env, updateConfigMapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=module",
map[string]string{
"driver.kind": "module",
},
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "kmod")
require.True(t, *container.SecurityContext.Privileged)
require.NotContains(t, container.Env, namespaceEnvVar)
require.NotContains(t, container.Env, configmapEnvVar)
require.Contains(t, container.Env, updateConfigMapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=ebpf",
map[string]string{
"driver.kind": "ebpf",
},
func(t *testing.T, initContainer any) {
container, ok := initContainer.(v1.Container)
require.True(t, ok)
require.Contains(t, container.Args, "ebpf")
require.Nil(t, container.SecurityContext)
require.NotContains(t, container.Env, namespaceEnvVar)
require.Contains(t, container.Env, updateConfigMapEnvVar)
require.NotContains(t, container.Env, configmapEnvVar)
// Check that the expected volumes are there.
volumeMounts(t, container.VolumeMounts)
},
},
{
"driver.kind=kmod&driver.loader.disabled",
map[string]string{
"driver.kind": "kmod",
"driver.loader.enabled": "false",
},
func(t *testing.T, initContainer any) {
require.Equal(t, initContainer, nil)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/daemonset.yaml"})
var ds appsv1.DaemonSet
helm.UnmarshalK8SYaml(t, output, &ds)
for i := range ds.Spec.Template.Spec.InitContainers {
if ds.Spec.Template.Spec.InitContainers[i].Name == "falco-driver-loader" {
testCase.expected(t, ds.Spec.Template.Spec.InitContainers[i])
return
}
}
testCase.expected(t, nil)
})
}
}
// volumenMounts checks that the expected volume mounts have been configured.
func volumeMounts(t *testing.T, volumeMounts []v1.VolumeMount) {
rootFalcoFS := v1.VolumeMount{
Name: "root-falco-fs",
ReadOnly: false,
MountPath: "/root/.falco",
}
require.Contains(t, volumeMounts, rootFalcoFS)
procFS := v1.VolumeMount{
Name: "proc-fs",
ReadOnly: true,
MountPath: "/host/proc",
}
require.Contains(t, volumeMounts, procFS)
bootFS := v1.VolumeMount{
Name: "boot-fs",
ReadOnly: true,
MountPath: "/host/boot",
}
require.Contains(t, volumeMounts, bootFS)
libModulesFS := v1.VolumeMount{
Name: "lib-modules",
ReadOnly: false,
MountPath: "/host/lib/modules",
}
require.Contains(t, volumeMounts, libModulesFS)
usrFS := v1.VolumeMount{
Name: "usr-fs",
ReadOnly: true,
MountPath: "/host/usr",
}
require.Contains(t, volumeMounts, usrFS)
etcFS := v1.VolumeMount{
Name: "etc-fs",
ReadOnly: true,
MountPath: "/host/etc",
}
require.Contains(t, volumeMounts, etcFS)
specializedFalcoConfigs := v1.VolumeMount{
Name: "specialized-falco-configs",
ReadOnly: false,
MountPath: "/etc/falco/config.d",
}
require.Contains(t, volumeMounts, specializedFalcoConfigs)
}

View File

@ -1,145 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
)
type grafanaDashboardsTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestGrafanaDashboardsTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
suite.Run(t, &grafanaDashboardsTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "falco-test-dashboard",
namespace: "falco-test-dashboard",
templates: []string{"templates/falco-dashboard-grafana.yaml"},
})
}
func (g *grafanaDashboardsTemplateTest) TestCreationDefaultValues() {
// Render the dashboard configmap and check that it has not been rendered.
_, err := helm.RenderTemplateE(g.T(), &helm.Options{}, g.chartPath, g.releaseName, g.templates, fmt.Sprintf("--namespace=%s", g.namespace))
g.Error(err, "should error")
g.Equal("error while running command: exit status 1; Error: could not find template templates/falco-dashboard-grafana.yaml in chart", err.Error())
}
func (g *grafanaDashboardsTemplateTest) TestConfig() {
testCases := []struct {
name string
values map[string]string
expected func(cm *corev1.ConfigMap)
}{
{"dashboard enabled",
map[string]string{
"grafana.dashboards.enabled": "true",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("falco-grafana-dashboard", cm.Name)
// Check the namespace.
g.Equal(g.namespace, cm.Namespace)
g.Nil(cm.Annotations)
},
},
{"namespace",
map[string]string{
"grafana.dashboards.enabled": "true",
"grafana.dashboards.configMaps.falco.namespace": "custom-namespace",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("falco-grafana-dashboard", cm.Name)
// Check the namespace.
g.Equal("custom-namespace", cm.Namespace)
g.Nil(cm.Annotations)
},
},
{"folder",
map[string]string{
"grafana.dashboards.enabled": "true",
"grafana.dashboards.configMaps.falco.folder": "custom-folder",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("falco-grafana-dashboard", cm.Name)
g.NotNil(cm.Annotations)
g.Len(cm.Annotations, 2)
// Check sidecar annotation.
val, ok := cm.Annotations["k8s-sidecar-target-directory"]
g.True(ok)
g.Equal("/tmp/dashboards/custom-folder", val)
// Check grafana annotation.
val, ok = cm.Annotations["grafana_dashboard_folder"]
g.True(ok)
g.Equal("custom-folder", val)
},
},
}
for _, testCase := range testCases {
testCase := testCase
g.Run(testCase.name, func() {
subT := g.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the configmap unmarshal it.
output, err := helm.RenderTemplateE(subT, options, g.chartPath, g.releaseName, g.templates, "--namespace="+g.namespace)
g.NoError(err, "should succeed")
var cfgMap corev1.ConfigMap
helm.UnmarshalK8SYaml(subT, output, &cfgMap)
// Common checks
// Check that contains the right label.
g.Contains(cfgMap.Labels, "grafana_dashboard")
// Check that the dashboard is contained in the config map.
file, err := os.Open("../../../dashboards/falco-dashboard.json")
g.NoError(err)
content, err := io.ReadAll(file)
g.NoError(err)
cfgData, ok := cfgMap.Data["falco-dashboard.json"]
g.True(ok)
g.Equal(strings.TrimRight(string(content), "\n"), cfgData)
testCase.expected(&cfgMap)
})
}
}

View File

@ -1,210 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
)
type metricsConfig struct {
Enabled bool `yaml:"enabled"`
ConvertMemoryToMB bool `yaml:"convert_memory_to_mb"`
IncludeEmptyValues bool `yaml:"include_empty_values"`
KernelEventCountersEnabled bool `yaml:"kernel_event_counters_enabled"`
KernelEventCountersPerCPUEnabled bool `yaml:"kernel_event_counters_per_cpu_enabled"`
ResourceUtilizationEnabled bool `yaml:"resource_utilization_enabled"`
RulesCountersEnabled bool `yaml:"rules_counters_enabled"`
LibbpfStatsEnabled bool `yaml:"libbpf_stats_enabled"`
OutputRule bool `yaml:"output_rule"`
StateCountersEnabled bool `yaml:"state_counters_enabled"`
Interval string `yaml:"interval"`
}
type webServerConfig struct {
Enabled bool `yaml:"enabled"`
K8sHealthzEndpoint string `yaml:"k8s_healthz_endpoint"`
ListenPort string `yaml:"listen_port"`
PrometheusMetricsEnabled bool `yaml:"prometheus_metrics_enabled"`
SSLCertificate string `yaml:"ssl_certificate"`
SSLEnabled bool `yaml:"ssl_enabled"`
Threadiness int `yaml:"threadiness"`
}
func TestMetricsConfigInFalcoConfig(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, metricsConfig, webServerConfig any)
}{
{
"defaultValues",
nil,
func(t *testing.T, metricsConfig, webServerConfig any) {
require.Len(t, metricsConfig, 11, "should have ten items")
metrics, err := getMetricsConfig(metricsConfig)
require.NoError(t, err)
require.NotNil(t, metrics)
require.True(t, metrics.ConvertMemoryToMB)
require.False(t, metrics.Enabled)
require.False(t, metrics.IncludeEmptyValues)
require.True(t, metrics.KernelEventCountersEnabled)
require.True(t, metrics.ResourceUtilizationEnabled)
require.True(t, metrics.RulesCountersEnabled)
require.Equal(t, "1h", metrics.Interval)
require.True(t, metrics.LibbpfStatsEnabled)
require.True(t, metrics.OutputRule)
require.True(t, metrics.StateCountersEnabled)
require.False(t, metrics.KernelEventCountersPerCPUEnabled)
webServer, err := getWebServerConfig(webServerConfig)
require.NoError(t, err)
require.NotNil(t, webServer)
require.True(t, webServer.Enabled)
require.False(t, webServer.PrometheusMetricsEnabled)
},
},
{
"metricsEnabled",
map[string]string{
"metrics.enabled": "true",
},
func(t *testing.T, metricsConfig, webServerConfig any) {
require.Len(t, metricsConfig, 11, "should have ten items")
metrics, err := getMetricsConfig(metricsConfig)
require.NoError(t, err)
require.NotNil(t, metrics)
require.True(t, metrics.ConvertMemoryToMB)
require.True(t, metrics.Enabled)
require.False(t, metrics.IncludeEmptyValues)
require.True(t, metrics.KernelEventCountersEnabled)
require.True(t, metrics.ResourceUtilizationEnabled)
require.True(t, metrics.RulesCountersEnabled)
require.Equal(t, "1h", metrics.Interval)
require.True(t, metrics.LibbpfStatsEnabled)
require.False(t, metrics.OutputRule)
require.True(t, metrics.StateCountersEnabled)
require.False(t, metrics.KernelEventCountersPerCPUEnabled)
webServer, err := getWebServerConfig(webServerConfig)
require.NoError(t, err)
require.NotNil(t, webServer)
require.True(t, webServer.Enabled)
require.True(t, webServer.PrometheusMetricsEnabled)
},
},
{
"Flip/Change Values",
map[string]string{
"metrics.enabled": "true",
"metrics.convertMemoryToMB": "false",
"metrics.includeEmptyValues": "true",
"metrics.kernelEventCountersEnabled": "false",
"metrics.resourceUtilizationEnabled": "false",
"metrics.rulesCountersEnabled": "false",
"metrics.libbpfStatsEnabled": "false",
"metrics.outputRule": "false",
"metrics.stateCountersEnabled": "false",
"metrics.interval": "1s",
"metrics.kernelEventCountersPerCPUEnabled": "true",
},
func(t *testing.T, metricsConfig, webServerConfig any) {
require.Len(t, metricsConfig, 11, "should have ten items")
metrics, err := getMetricsConfig(metricsConfig)
require.NoError(t, err)
require.NotNil(t, metrics)
require.False(t, metrics.ConvertMemoryToMB)
require.True(t, metrics.Enabled)
require.True(t, metrics.IncludeEmptyValues)
require.False(t, metrics.KernelEventCountersEnabled)
require.False(t, metrics.ResourceUtilizationEnabled)
require.False(t, metrics.RulesCountersEnabled)
require.Equal(t, "1s", metrics.Interval)
require.False(t, metrics.LibbpfStatsEnabled)
require.False(t, metrics.OutputRule)
require.False(t, metrics.StateCountersEnabled)
require.True(t, metrics.KernelEventCountersPerCPUEnabled)
webServer, err := getWebServerConfig(webServerConfig)
require.NoError(t, err)
require.NotNil(t, webServer)
require.True(t, webServer.Enabled)
require.True(t, webServer.PrometheusMetricsEnabled)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
metrics := config["metrics"]
webServer := config["webserver"]
testCase.expected(t, metrics, webServer)
})
}
}
func getMetricsConfig(config any) (*metricsConfig, error) {
var metrics metricsConfig
metricsByte, err := yaml.Marshal(config)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(metricsByte, &metrics); err != nil {
return nil, err
}
return &metrics, nil
}
func getWebServerConfig(config any) (*webServerConfig, error) {
var webServer webServerConfig
webServerByte, err := yaml.Marshal(config)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(webServerByte, &webServer); err != nil {
return nil, err
}
return &webServer, nil
}

View File

@ -1,60 +0,0 @@
package falcoTemplates
import (
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"path/filepath"
"strings"
"testing"
)
func TestServiceAccount(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, sa *corev1.ServiceAccount)
}{
{
"defaultValues",
nil,
func(t *testing.T, sa *corev1.ServiceAccount) {
require.Equal(t, sa.Name, "rendered-resources-falco")
},
},
{
"kind=auto",
map[string]string{
"serviceAccount.create": "false",
},
func(t *testing.T, sa *corev1.ServiceAccount) {
require.Equal(t, sa.Name, "")
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, []string{"templates/serviceaccount.yaml"})
if err != nil {
require.True(t, strings.Contains(err.Error(), "Error: could not find template templates/serviceaccount.yaml in chart"))
}
var sa corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &sa)
testCase.expected(t, &sa)
})
}
}

View File

@ -1,160 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"encoding/json"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"reflect"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
type serviceMonitorTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceMonitorTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
suite.Run(t, &serviceMonitorTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "falco-test",
namespace: "falco-namespace-test",
templates: []string{"templates/serviceMonitor.yaml"},
})
}
func (s *serviceMonitorTemplateTest) TestCreationDefaultValues() {
// Render the servicemonitor and check that it has not been rendered.
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/serviceMonitor.yaml in chart", err.Error())
}
func (s *serviceMonitorTemplateTest) TestEndpoint() {
defaultEndpointsJSON := `[
{
"port": "metrics",
"interval": "15s",
"scrapeTimeout": "10s",
"honorLabels": true,
"path": "/metrics",
"scheme": "http"
}
]`
var defaultEndpoints []monitoringv1.Endpoint
err := json.Unmarshal([]byte(defaultEndpointsJSON), &defaultEndpoints)
s.NoError(err)
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Len(svcMonitor.Spec.Endpoints, 1, "should have only one endpoint")
s.True(reflect.DeepEqual(svcMonitor.Spec.Endpoints[0], defaultEndpoints[0]))
}
func (s *serviceMonitorTemplateTest) TestNamespaceSelector() {
selectorsLabelJson := `{
"app.kubernetes.io/instance": "my-falco",
"foo": "bar"
}`
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"},
SetJsonValues: map[string]string{"serviceMonitor.selector": selectorsLabelJson}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Len(svcMonitor.Spec.NamespaceSelector.MatchNames, 1)
s.Equal("default", svcMonitor.Spec.NamespaceSelector.MatchNames[0])
}
func (s *serviceMonitorTemplateTest) TestServiceMonitorSelector() {
testCases := []struct {
name string
values string
expected map[string]string
}{
{
"defaultValues",
"",
map[string]string{
"app.kubernetes.io/instance": "falco-test",
"app.kubernetes.io/name": "falco",
"type": "falco-metrics",
},
},
{
"customValues",
`{
"foo": "bar"
}`,
map[string]string{
"app.kubernetes.io/instance": "falco-test",
"app.kubernetes.io/name": "falco",
"foo": "bar",
"type": "falco-metrics",
},
},
{
"overwriteDefaultValues",
`{
"app.kubernetes.io/instance": "falco-overwrite",
"foo": "bar"
}`,
map[string]string{
"app.kubernetes.io/instance": "falco-overwrite",
"app.kubernetes.io/name": "falco",
"foo": "bar",
"type": "falco-metrics",
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"},
SetJsonValues: map[string]string{"serviceMonitor.selector": testCase.values}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Equal(testCase.expected, svcMonitor.Spec.Selector.MatchLabels, "should be the same")
})
}
}

View File

@ -1,177 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package falcoTemplates
import (
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
)
type serviceTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
suite.Run(t, &serviceTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "falco-test",
namespace: "falco-namespace-test",
templates: []string{"templates/service.yaml"},
})
}
func (s *serviceTemplateTest) TestCreationDefaultValues() {
// Render the service and check that it has not been rendered.
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/service.yaml in chart", err.Error())
}
func (s *serviceTemplateTest) TestDefaultLabelsValues() {
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true"}}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should render template")
cInfo, err := unit.ChartInfo(s.T(), s.chartPath)
s.NoError(err)
// Get app version.
appVersion, found := cInfo["appVersion"]
s.True(found, "should find app version in chart info")
appVersion = appVersion.(string)
// Get chart version.
chartVersion, found := cInfo["version"]
s.True(found, "should find chart version in chart info")
// Get chart name.
chartName, found := cInfo["name"]
s.True(found, "should find chart name in chart info")
chartName = chartName.(string)
expectedLabels := map[string]string{
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
"app.kubernetes.io/name": chartName.(string),
"app.kubernetes.io/instance": s.releaseName,
"app.kubernetes.io/version": appVersion.(string),
"app.kubernetes.io/managed-by": "Helm",
"type": "falco-metrics",
}
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
labels := svc.GetLabels()
for key, value := range labels {
expectedVal := expectedLabels[key]
s.Equal(expectedVal, value)
}
for key, value := range expectedLabels {
expectedVal := labels[key]
s.Equal(expectedVal, value)
}
}
func (s *serviceTemplateTest) TestCustomLabelsValues() {
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true",
"metrics.service.labels.customLabel": "customLabelValues"}}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should render template")
cInfo, err := unit.ChartInfo(s.T(), s.chartPath)
s.NoError(err)
// Get app version.
appVersion, found := cInfo["appVersion"]
s.True(found, "should find app version in chart info")
appVersion = appVersion.(string)
// Get chart version.
chartVersion, found := cInfo["version"]
s.True(found, "should find chart version in chart info")
// Get chart name.
chartName, found := cInfo["name"]
s.True(found, "should find chart name in chart info")
chartName = chartName.(string)
expectedLabels := map[string]string{
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
"app.kubernetes.io/name": chartName.(string),
"app.kubernetes.io/instance": s.releaseName,
"app.kubernetes.io/version": appVersion.(string),
"app.kubernetes.io/managed-by": "Helm",
"type": "falco-metrics",
"customLabel": "customLabelValues",
}
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
labels := svc.GetLabels()
for key, value := range labels {
expectedVal := expectedLabels[key]
s.Equal(expectedVal, value)
}
for key, value := range expectedLabels {
expectedVal := labels[key]
s.Equal(expectedVal, value)
}
}
func (s *serviceTemplateTest) TestDefaultAnnotationsValues() {
options := &helm.Options{SetValues: map[string]string{"metrics.enabled": "true"}}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err)
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
s.Nil(svc.Annotations, "should be nil")
}
func (s *serviceTemplateTest) TestCustomAnnotationsValues() {
values := map[string]string{
"metrics.enabled": "true",
"metrics.service.annotations.annotation1": "customAnnotation1",
"metrics.service.annotations.annotation2": "customAnnotation2",
}
annotations := map[string]string{
"annotation1": "customAnnotation1",
"annotation2": "customAnnotation2",
}
options := &helm.Options{SetValues: values}
output, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.NoError(err)
var svc corev1.Service
helm.UnmarshalK8SYaml(s.T(), output, &svc)
s.Len(svc.Annotations, 2)
for key, value := range svc.Annotations {
expectedVal := annotations[key]
s.Equal(expectedVal, value)
}
}

View File

@ -1,649 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8smetaPlugin
import (
"encoding/json"
"fmt"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"slices"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
// Using the default values we want to test that all the expected resources for the k8s-metacollector are rendered.
func TestRenderedResourcesWithDefaultValues(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
options := &helm.Options{}
// Template the chart using the default values.yaml file.
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, nil)
require.NoError(t, err)
// Extract all rendered files from the output.
re := regexp.MustCompile(unit.PatternK8sMetacollectorFiles)
matches := re.FindAllStringSubmatch(output, -1)
require.Len(t, matches, 0)
}
func TestRenderedResourcesWhenNotEnabled(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
// Template files that we expect to be rendered.
templateFiles := []string{
"clusterrole.yaml",
"clusterrolebinding.yaml",
"deployment.yaml",
"service.yaml",
"serviceaccount.yaml",
}
require.NoError(t, err)
options := &helm.Options{SetValues: map[string]string{
"collectors.kubernetes.enabled": "true",
}}
// Template the chart using the default values.yaml file.
output, err := helm.RenderTemplateE(t, options, helmChartPath, unit.ReleaseName, nil)
require.NoError(t, err)
// Extract all rendered files from the output.
re := regexp.MustCompile(unit.PatternK8sMetacollectorFiles)
matches := re.FindAllStringSubmatch(output, -1)
var renderedTemplates []string
for _, match := range matches {
// Filter out test templates.
if !strings.Contains(match[1], "test-") {
renderedTemplates = append(renderedTemplates, match[1])
}
}
// Assert that the rendered resources are equal tho the expected ones.
require.Equal(t, len(renderedTemplates), len(templateFiles), "should be equal")
for _, rendered := range renderedTemplates {
require.True(t, slices.Contains(templateFiles, rendered), "template files should contain all the rendered files")
}
}
func TestPluginConfigurationInFalcoConfig(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
testCases := []struct {
name string
values map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"overrideK8s-metacollectorNamespace",
map[string]string{
"k8s-metacollector.namespaceOverride": "test",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.test.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"overrideK8s-metacollectorName",
map[string]string{
"k8s-metacollector.fullnameOverride": "collector",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "collector.default.svc", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"overrideK8s-metacollectorNamespaceAndName",
map[string]string{
"k8s-metacollector.namespaceOverride": "test",
"k8s-metacollector.fullnameOverride": "collector",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "collector.test.svc", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set CollectorHostname",
map[string]string{
"collectors.kubernetes.collectorHostname": "test",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "test", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set CollectorHostname and namespace name",
map[string]string{
"collectors.kubernetes.collectorHostname": "test-with-override",
"k8s-metacollector.namespaceOverride": "test",
"k8s-metacollector.fullnameOverride": "collector",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, "test-with-override", hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set collectorPort",
map[string]string{
"collectors.kubernetes.collectorPort": "8888",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(8888), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "info", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"set collector logger level and hostProc",
map[string]string{
"collectors.kubernetes.verbosity": "trace",
"collectors.kubernetes.hostProc": "/host/test",
},
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 5, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check that the collector port is correctly set.
port := initConfigMap["collectorPort"]
require.Equal(t, float64(45000), port.(float64))
// Check that the collector nodeName is correctly set.
nodeName := initConfigMap["nodeName"]
require.Equal(t, "${FALCO_K8S_NODE_NAME}", nodeName.(string))
// Check that the collector hostname is correctly set.
hostName := initConfigMap["collectorHostname"]
require.Equal(t, fmt.Sprintf("%s-k8s-metacollector.default.svc", unit.ReleaseName), hostName.(string))
// Check that the loglevel has been set.
verbosity := initConfigMap["verbosity"]
require.Equal(t, "trace", verbosity.(string))
// Check that host proc fs has been set.
hostProc := initConfigMap["hostProc"]
require.Equal(t, "/host/test", hostProc.(string))
// Check that the library path is set.
libPath := plugin["library_path"]
require.Equal(t, "libk8smeta.so", libPath)
},
},
{
"driver disabled",
map[string]string{
"driver.enabled": "false",
},
func(t *testing.T, config any) {
require.Nil(t, config)
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
// Enable the collector.
if testCase.values != nil {
testCase.values["collectors.kubernetes.enabled"] = "true"
} else {
testCase.values = map[string]string{"collectors.kubernetes.enabled": "true"}
}
options := &helm.Options{SetValues: testCase.values}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
plugins := config["plugins"]
pluginsArray := plugins.([]interface{})
found := false
// Find the k8smeta plugin configuration.
for _, plugin := range pluginsArray {
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.K8sMetaPluginName {
testCase.expected(t, plugin)
found = true
}
}
if found {
// Check that the plugin has been added to the ones that need to be loaded.
loadplugins := config["load_plugins"]
require.True(t, slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
} else {
testCase.expected(t, nil)
loadplugins := config["load_plugins"]
require.True(t, !slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
}
})
}
}
// Test that the helper does not overwrite user's configuration.
func TestPluginConfigurationUniqueEntries(t *testing.T) {
t.Parallel()
pluginsJSON := `[
{
"init_config": null,
"library_path": "libk8saudit.so",
"name": "k8saudit",
"open_params": "http://:9765/k8s-audit"
},
{
"library_path": "libcloudtrail.so",
"name": "cloudtrail"
},
{
"init_config": "",
"library_path": "libjson.so",
"name": "json"
},
{
"init_config": {
"collectorHostname": "rendered-resources-k8s-metacollector.default.svc",
"collectorPort": 45000,
"nodeName": "${FALCO_K8S_NODE_NAME}"
},
"library_path": "libk8smeta.so",
"name": "k8smeta"
},
{
"init_config": {
"engines": {
"bpm": {
"enabled": false
},
"containerd": {
"enabled": true,
"sockets": [
"/run/containerd/containerd.sock"
]
},
"cri": {
"enabled": true,
"sockets": [
"/run/crio/crio.sock"
]
},
"docker": {
"enabled": true,
"sockets": [
"/var/run/docker.sock"
]
},
"libvirt_lxc": {
"enabled": false
},
"lxc": {
"enabled": false
},
"podman": {
"enabled": false,
"sockets": [
"/run/podman/podman.sock"
]
}
},
"hooks": [
"create"
],
"label_max_len": 100,
"with_size": false
},
"library_path": "libcontainer.so",
"name": "container"
}
]`
loadPluginsJSON := `[
"k8smeta",
"k8saudit",
"container"
]`
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
options := &helm.Options{SetJsonValues: map[string]string{
"falco.plugins": pluginsJSON,
"falco.load_plugins": loadPluginsJSON,
}, SetValues: map[string]string{"collectors.kubernetes.enabled": "true"}}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falco.yaml"], &config)
plugins := config["plugins"]
out, err := json.MarshalIndent(plugins, "", " ")
require.NoError(t, err)
require.Equal(t, pluginsJSON, string(out))
pluginsArray := plugins.([]interface{})
// Find the k8smeta plugin configuration.
numConfigK8smeta := 0
for _, plugin := range pluginsArray {
if name, ok := plugin.(map[string]interface{})["name"]; ok && name == unit.K8sMetaPluginName {
numConfigK8smeta++
}
}
require.Equal(t, 1, numConfigK8smeta)
// Check that the plugin has been added to the ones that need to be loaded.
loadplugins := config["load_plugins"]
require.Len(t, loadplugins.([]interface{}), 3)
require.True(t, slices.Contains(loadplugins.([]interface{}), unit.K8sMetaPluginName))
}
// Test that the helper does not overwrite user's configuration.
func TestFalcoctlRefs(t *testing.T) {
t.Parallel()
pluginsJSON := `[
{
"init_config": null,
"library_path": "libk8saudit.so",
"name": "k8saudit",
"open_params": "http://:9765/k8s-audit"
},
{
"library_path": "libcloudtrail.so",
"name": "cloudtrail"
},
{
"init_config": "",
"library_path": "libjson.so",
"name": "json"
},
{
"init_config": {
"collectorHostname": "rendered-resources-k8s-metacollector.default.svc",
"collectorPort": 45000,
"nodeName": "${FALCO_K8S_NODE_NAME}"
},
"library_path": "libk8smeta.so",
"name": "k8smeta"
}
]`
testFunc := func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test allowed types.
allowedTypes := artifactConfig["allowedTypes"]
require.Len(t, allowedTypes, 2)
require.True(t, slices.Contains(allowedTypes.([]interface{}), "plugin"))
require.True(t, slices.Contains(allowedTypes.([]interface{}), "rulesfile"))
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 3)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
testCases := []struct {
name string
valuesJSON map[string]string
expected func(t *testing.T, config any)
}{
{
"defaultValues",
nil,
testFunc,
},
{
"setPluginConfiguration",
map[string]string{
"falco.plugins": pluginsJSON,
},
testFunc,
},
{
"driver disabled",
map[string]string{
"driver.enabled": "false",
},
func(t *testing.T, config any) {
// Get artifact configuration map.
configMap := config.(map[string]interface{})
artifactConfig := (configMap["artifact"]).(map[string]interface{})
// Test plugin reference.
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.True(t, !slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0"))
},
},
}
helmChartPath, err := filepath.Abs(unit.ChartPath)
require.NoError(t, err)
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
options := &helm.Options{SetJsonValues: testCase.valuesJSON, SetValues: map[string]string{"collectors.kubernetes.enabled": "true"}}
output := helm.RenderTemplate(t, options, helmChartPath, unit.ReleaseName, []string{"templates/falcoctl-configmap.yaml"})
var cm corev1.ConfigMap
helm.UnmarshalK8SYaml(t, output, &cm)
var config map[string]interface{}
helm.UnmarshalK8SYaml(t, cm.Data["falcoctl.yaml"], &config)
testCase.expected(t, config)
})
}
}

View File

@ -1,63 +0,0 @@
# Default values to deploy Falco on GKE with gVisor.
# Affinity constraint for pods' scheduling.
# Needed to deploy Falco on the gVisor enabled nodes.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: sandbox.gke.io/runtime
operator: In
values:
- gvisor
# Tolerations to allow Falco to run on Kubernetes 1.6 masters.
# Adds the neccesssary tolerations to allow Falco pods to be scheduled on the gVisor enabled nodes.
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: sandbox.gke.io/runtime
operator: Equal
value: gvisor
# Enable gVisor and set the appropriate paths.
driver:
enabled: true
kind: gvisor
gvisor:
runsc:
path: /home/containerd/usr/local/sbin
root: /run/containerd/runsc
config: /run/containerd/runsc/config.toml
# Enable the containerd collector to enrich the syscall events with metadata.
collectors:
enabled: true
containerd:
enabled: true
socket: /run/containerd/containerd.sock
falcoctl:
artifact:
install:
# -- Enable the init container. We do not recommend installing plugins for security reasons since they are executable objects.
# We install only "rulesfiles".
enabled: true
follow:
# -- Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules.
enabled: true
config:
artifact:
install:
# -- List of artifacts to be installed by the falcoctl init container.
# We do not recommend installing (or following) plugins for security reasons since they are executable objects.
refs: [falco-rules:4]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
# We do not recommend installing (or following) plugins for security reasons since they are executable objects.
refs: [falco-rules:4]
# Set this to true to force Falco so output the logs as soon as they are emmitted.
tty: false

View File

@ -1,58 +0,0 @@
# -- Disable the drivers since we want to deploy only the k8saudit plugin.
driver:
enabled: false
# -- Disable the collectors, no syscall events to enrich with metadata.
collectors:
enabled: false
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale.
controller:
kind: deployment
deployment:
# -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing.
# For more info check the section on Plugins in the README.md file.
replicas: 1
falcoctl:
artifact:
install:
# -- Enable the init container.
enabled: true
follow:
# -- Enable the sidecar container.
enabled: true
config:
artifact:
install:
# -- List of artifacts to be installed by the falcoctl init container.
refs: [k8saudit-rules:0.11, k8saudit:0.11]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
refs: [k8saudit-rules:0.11]
services:
- name: k8saudit-webhook
type: NodePort
ports:
- port: 9765 # See plugin open_params
nodePort: 30007
protocol: TCP
falco:
rules_files:
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
""
# maxEventBytes: 1048576
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: json
library_path: libjson.so
init_config: ""
# Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container.
load_plugins: [k8saudit, json]

View File

@ -1,62 +0,0 @@
# Enable the driver, and choose between the kernel module or the ebpf probe.
# Default value: kernel module.
driver:
enabled: true
kind: module
# Enable the collectors used to enrich the events with metadata.
# Check the values.yaml file for fine-grained options.
collectors:
enabled: true
# We set the controller to daemonset since we have the syscalls source enabled.
# It will ensure that every node on our cluster will be monitored by Falco.
# Please note that the api-server will use the "k8saudit-webhook" service to send
# audit logs to the falco instances. That means that when we have multiple instances of Falco
# we can not predict to which instance the audit logs will be sent. When testing please check all
# the Falco instance to make sure that at least one of them have received the audit logs.
controller:
kind: daemonset
falcoctl:
artifact:
install:
# -- Enable the init container.
enabled: true
follow:
# -- Enable the sidecar container.
enabled: true
config:
artifact:
install:
# -- List of artifacts to be installed by the falcoctl init container.
refs: [falco-rules:4, k8saudit-rules:0.11, k8saudit:0.11]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
refs: [falco-rules:4, k8saudit-rules:0.11, k8saudit:0.11]
services:
- name: k8saudit-webhook
type: NodePort
ports:
- port: 9765 # See plugin open_params
nodePort: 30007
protocol: TCP
falco:
rules_files:
- /etc/falco/falco_rules.yaml
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
""
# maxEventBytes: 1048576
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: json
library_path: libjson.so
init_config: ""
load_plugins: [k8saudit, json]

File diff suppressed because it is too large Load Diff

View File

@ -1,753 +0,0 @@
# Change Log
This file documents all notable changes to Falcosidekick Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
Before release 0.1.20, the helm chart can be found in `falcosidekick` [repository](https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick).
## 0.10.2
- Add type information to `volumeClaimTemplates`.
## 0.10.1
- Add an "or" condition for `configmap-ui`
## 0.10.0
- Add new features to the Loki dashboard
## 0.9.11
- Add `customtags` setting
## 0.9.10
- Fix missing values in the README
## 0.9.9
- Added Azure Workload Identity for Falcosidekick
## 0.9.8
- Ugrade to Falcosidekick 2.31.1 (fix last release)
## 0.9.7
- Ugrade to Falcosidekick 2.31.1
## 0.9.6
- Ugrade to Falcosidekick 2.31.0
## 0.9.5
- Move the `prometheus.io/scrape` annotation to the default values, to allow overrides.
## 0.9.4
- Fix Prometheus metrics names in Prometheus Rule
## 0.9.3
- Add a Grafana dashboard for the Prometheus metrics
## 0.9.2
- Add new dashboard with Loki
## 0.9.1
- Ugrade to Falcosidekick 2.30.0
## 0.8.9
- Fix customConfig mount path for webui redis
## 0.8.8
- Fix customConfig template for webui redis
## 0.8.7
- Fix securityContext for webui initContainer
## 0.8.6
- Use of `redis-cli` by the initContainer of Falcosidekick-UI to wait til the redis is up and running
- Add the possibility to override the default redis server settings
- Allow to set up a password to use with an external redis
- Fix wrong value used for `OTLP_TRACES_PROTOCOL` env var
- Used names for the priorities in the prometheus rules
## 0.8.5
- Fix an issue with the by default missing custom CA cert
## 0.8.4
- Fix falcosidekick chart ignoring custom service type for webui redis
## 0.8.3
- Add a condition to create the secrets for the redis only if the webui is deployed
## 0.8.2
- Fix redis-availability check of the UI init-container in case externalRedis is enabled
## 0.8.1
- Allow to set resources, securityContext and image overwrite for wait-redis initContainer
## 0.8.0
- Ugrade to Falcosidekick 2.29.0
- Allow to set custom labels and annotations to set to all resources
- Allow to use an existing secrets and values for the env vars at the same time
- Fix missing ingressClassName settings in the values.yaml
- Add of an initContainer to check if the redis for falcosidekick-ui is up
## 0.7.22
- Upgrade redis-stack image to 7.2.0-v11
## 0.7.21
- Fix the Falco Sidekick WEBUI_URL secret value.
## 0.7.20
- Align Web UI service port from values.yaml file with Falco Sidekick WEBUI_URL secret value.
## 0.7.19
- Enhanced the service Monitor to support additional Properties.
- Fix the promql query for prometheusRules: FalcoErrorOutputEventsRateHigh.
## 0.7.18
- Fix PrometheusRule duplicate alert name
## 0.7.17
- Fix the labels for the serviceMonitor
## 0.7.16
- Fix the error with the `NOTES` (`index of untyped nil Use`) when the ingress is enabled to falcosidekick-ui
## 0.7.15
- Fix ServiceMonitor selector labels
## 0.7.14
- Fix duplicate component labels
## 0.7.13
- Fix ServiceMonitor port name and selector labels
## 0.7.12
- Align README values with the values.yaml file
## 0.7.11
- Fix a link in the falcosidekick README to the policy report output documentation
## 0.7.10
- Set Helm recommended labels (`app.kubernetes.io/name`, `app.kubernetes.io/instance`, `app.kubernetes.io/version`, `helm.sh/chart`, `app.kubernetes.io/part-of`, `app.kubernetes.io/managed-by`) using helpers.tpl
## 0.7.9
- noop change to the chart itself. Updated makefile.
## 0.7.8
- Fix the condition for missing cert files
## 0.7.7
- Support extraArgs in the helm chart
## 0.7.6
- Fix the behavior with the `AWS IRSA` with a new value `aws.config.useirsa`
- Add a section in the README to describe how to use a subpath for `Falcosidekick-ui` ingress
- Add a `ServiceMonitor` for prometheus-operator
- Add a `PrometheusRule` for prometheus-operator
## 0.7.5
- noop change just to test the ci
## 0.7.4
- Fix volume mount when `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables are defined.
## 0.7.3
- Allow to set (m)TLS Server cryptographic material via `config.tlsserver.servercrt`, `config.tlsserver.serverkey` and `config.tlsserver.cacrt` variables or through `config.tlsserver.existingSecret` variables.
## 0.7.2
- Fix the wrong key of the secret for the user
## 0.7.1
- Allow to set a password `webui.redis.password` for Redis for `Falcosidekick-UI`
- The user for `Falcosidekick-UI` is now set with an env var from a secret
## 0.7.0
- Support configuration of revisionHistoryLimit of the deployments
## 0.6.3
- Update Falcosidekick to 2.28.0
- Add Mutual TLS Client config
- Add TLS Server config
- Add `bracketreplacer` config
- Add `customseveritymap` to `alertmanager` output
- Add Drop Event config to `alertmanager` output
- Add `customheaders` to `elasticsearch` output
- Add `customheaders` to `loki` output
- Add `customheaders` to `grafana` output
- Add `rolearn` and `externalid` for `aws` outputs
- Add `method` to `webhook` output
- Add `customattributes` to `gcp.pubsub` output
- Add `region` to `pargerduty` output
- Add `topiccreation` and `tls` to `kafka` output
- Add `Grafana OnCall` output
- Add `Redis` output
- Add `Telegram` output
- Add `N8N` output
- Add `OpenObserver` output
## 0.6.2
- Fix interpolation of `SYSLOG_PORT`
## 0.6.1
- Add `webui.allowcors` value for `Falcosidekick-UI`
## 0.6.0
- Change the docker image for the redis pod for falcosidekick-ui
## 0.5.16
- Add `affinity`, `nodeSelector` and `tolerations` values for the Falcosidekick test-connection pod
## 0.5.15
- Set extra labels and annotations for `AlertManager` only if they're not empty
## 0.5.14
- Fix Prometheus extralabels configuration in Falcosidekick
## 0.5.13
- Fix missing quotes in Falcosidekick-UI ttl argument
## 0.5.12
- Fix missing space in Falcosidekick-UI ttl argument
## 0.5.11
- Fix missing space in Falcosidekick-UI arguments
## 0.5.10
- upgrade Falcosidekick image to 2.27.0
- upgrade Falcosidekick-UI image to 2.1.0
- Add `Yandex Data Streams` output
- Add `Node-Red` output
- Add `MQTT` output
- Add `Zincsearch` output
- Add `Gotify` output
- Add `Spyderbat` output
- Add `Tekton` output
- Add `TimescaleDB` output
- Add `AWS Security Lake` output
- Add `config.templatedfields` to set templated fields
- Add `config.slack.channel` to override `Slack` channel
- Add `config.alertmanager.extralabels` and `config.alertmanager.extraannotations` for `AlertManager` output
- Add `config.influxdb.token`, `config.influxdb.organization` and `config.influxdb.precision` for `InfluxDB` output
- Add `config.aws.checkidentity` to disallow STS checks
- Add `config.smtp.authmechanism`, `config.smtp.token`, `config.smtp.identity`, `config.smtp.trace` to manage `SMTP` auth
- Update default doc type for `Elastichsearch`
- Add `config.loki.user`, `config.loki.apikey` to manage auth to Grafana Cloud for `Loki` output
- Add `config.kafka.sasl`, `config.kafka.async`, `config.kafka.compression`, `config.kafka.balancer`, `config.kafka.clientid` to manage auth and communication for `Kafka` output
- Add `config.syslog.format` to manage the format of `Syslog` payload
- Add `webui.ttl` to set TTL of keys in Falcosidekick-UI
- Add `webui.loglevel` to set log level in Falcosidekick-UI
- Add `webui.user` to set log user:password in Falcosidekick-UI
## 0.5.9
- Fix: remove `namespace` from `clusterrole` and `clusterrolebinding` metadata
## 0.5.8
- Support `storageEnabled` for `redis` to allow ephemeral installs
## 0.5.7
- Removing unused Kafka config values
## 0.5.6
- Fixing Syslog's port import in `secrets.yaml`
## 0.5.5
- Add `webui.externalRedis` with `enabled`, `url` and `port` to values to set an external Redis database with RediSearch > v2 for the WebUI
- Add `webui.redis.enabled` option to disable the deployment of the database.
- `webui.redis.enabled ` and `webui.externalRedis.enabled` are mutually exclusive
## 0.5.4
- Upgrade image to fix Panic of `Prometheus` output when `customfields` is set
- Add `extralabels` for `Loki` and `Prometheus` outputs to set fields to use as labels
- Add `expiresafter` for `AlertManager` output
## 0.5.3
- Support full configuration of `securityContext` blocks in falcosidekick and falcosidekick-ui deployments, and redis statefulset.
## 0.5.2
- Update Falcosidekick-UI image (fix wrong redirect to localhost when an ingress is used)
## 0.5.1
- Support `ingressClassName` field in falcosidekick ingresses.
## 0.5.0
### Major Changes
- Add `Policy Report` output
- Add `Syslog` output
- Add `AWS Kinesis` output
- Add `Zoho Cliq` output
- Support IRSA for AWS authentication
- Upgrade Falcosidekick-UI to v2.0.1
### Minor changes
- Allow to set custom Labels for pods
## 0.4.5
- Allow additional service-ui annotations
## 0.4.4
- Fix output after chart installation when ingress is enable
## 0.4.3
- Support `annotation` block in service
## 0.4.2
- Fix: Added the rule to use the podsecuritypolicy
- Fix: Added `ServiceAccountName` to the UI deployment
## 0.4.1
- Removes duplicate `Fission` keys from secret
## 0.4.0
### Major Changes
- Support Ingress API version `networking.k8s.io/v1`, see `ingress.hosts` and `webui.ingress.hosts` in [values.yaml](values.yaml) for a breaking change in the `path` parameter
## 0.3.17
- Fix: Remove the value for bucket of `Yandex S3`, it enabled the output by default
## 0.3.16
### Major Changes
- Fix: set correct new image 2.24.0
## 0.3.15
### Major Changes
- Add `Fission` output
## 0.3.14
### Major Changes
- Add `Grafana` output
- Add `Yandex Cloud S3` output
- Add `Kafka REST` output
### Minor changes
- Docker image is now available on AWS ECR Public Gallery (`--set image.registry=public.ecr.aws`)
## 0.3.13
### Minor changes
- Enable extra volumes and volumemounts for `falcosidekick` via values
## 0.3.12
- Add AWS configuration field `config.aws.rolearn`
## 0.3.11
### Minor changes
- Make image registries for `falcosidekick` and `falcosidekick-ui` configurable
## 0.3.10
### Minor changes
- Fix table formatting in `README.md`
## 0.3.9
### Fixes
- Add missing `imagePullSecrets` in `falcosidekick/templates/deployment-ui.yaml`
## 0.3.8
### Major Changes
- Add `GCP Cloud Run` output
- Add `GCP Cloud Functions` output
- Add `Wavefront` output
- Allow MutualTLS for some outputs
- Add basic auth for Elasticsearch output
## 0.3.7
### Minor changes
- Fix table formatting in `README.md`
- Fix `config.azure.eventHub` parameter name in `README.md`
## 0.3.6
### Fixes
- Point to the correct name of aadpodidentnity
## 0.3.5
### Minor Changes
- Fix link to Falco in the `README.md`
## 0.3.4
### Major Changes
- Bump up version (`v1.0.1`) of image for `falcosidekick-ui`
## 0.3.3
### Minor Changes
- Set default values for `OpenFaaS` output type parameters
- Fixes of documentation
## 0.3.2
### Fixes
- Add config checksum annotation to deployment pods to restart pods on config change
- Fix statsd config options in the secret to make them match the docs
## 0.3.1
### Fixes
- Fix for `s3.bucket`, it should be empty
## 0.3.0
### Major Changes
- Add `AWS S3` output
- Add `GCP Storage` output
- Add `RabbitMQ` output
- Add `OpenFaas` output
## 0.2.9
### Major Changes
- Updated falcosidekuck-ui default image version to `v0.2.0`
## 0.2.8
### Fixes
- Fixed to specify `kafka.hostPort` instead of `kafka.url`
## 0.2.7
### Fixes
- Fixed missing hyphen in podidentity
## 0.2.6
### Fixes
- Fix repo and tag for `ui` image
## 0.2.5
### Major Changes
- Add `CLOUDEVENTS` output
- Add `WEBUI` output
### Minor Changes
- Add details about syntax for adding `custom_fields`
## 0.2.4
### Minor Changes
- Add `DATADOG_HOST` to secret
## 0.2.3
### Minor Changes
- Allow additional pod annotations
- Remove namespace condition in aad-pod-identity
## 0.2.2
### Major Changes
- Add `Kubeless` output
## 0.2.1
### Major Changes
- Add `PagerDuty` output
## 0.2.0
### Major Changes
- Add option to use an existing secret
- Add option to add extra environment variables
- Add `Stan` output
### Minor Changes
- Use the Existing secret resource and add all possible variables to there, and make it simpler to read and less error-prone in the deployment resource
## 0.1.37
### Minor Changes
- Fix aws keys not being added to the deployment
## 0.1.36
### Minor Changes
- Fix helm test
## 0.1.35
### Major Changes
- Update image to use release 2.19.1
## 0.1.34
- New outputs can be set : `Kafka`, `AWS CloudWatchLogs`
## 0.1.33
### Minor Changes
- Fixed GCP Pub/Sub values references in `deployment.yaml`
## 0.1.32
### Major Changes
- Support release namespace configuration
## 0.1.31
### Major Changes
- New outputs can be set : `Googlechat`
## 0.1.30
### Major changes
- New output can be set : `GCP PubSub`
- Custom Headers can be set for `Webhook` output
- Fix typo `aipKey` for OpsGenie output
## 0.1.29
- Fix falcosidekick configuration table to use full path of configuration properties in the `README.md`
## 0.1.28
### Major changes
- New output can be set : `AWS SNS`
- Metrics in `prometheus` format can be scrapped from `/metrics` URI
## 0.1.27
### Minor Changes
- Replace extensions apiGroup/apiVersion because of deprecation
## 0.1.26
### Minor Changes
- Allow the creation of a PodSecurityPolicy, disabled by default
## 0.1.25
### Minor Changes
- Allow the configuration of the Pod securityContext, set default runAsUser and fsGroup values
## 0.1.24
### Minor Changes
- Remove duplicated `webhook` block in `values.yaml`
## 0.1.23
- fake release for triggering CI for auto-publishing
## 0.1.22
### Major Changes
- Add `imagePullSecrets`
## 0.1.21
### Minor Changes
- Fix `Azure Indentity` case sensitive value
## 0.1.20
### Major Changes
- New outputs can be set : `Azure Event Hubs`, `Discord`
### Minor Changes
- Fix wrong port name in output
## 0.1.17
### Major Changes
- New outputs can be set : `Mattermost`, `Rocketchat`
## 0.1.11
### Major Changes
- Add Pod Security Policy
## 0.1.11
### Minor Changes
- Fix wrong value reference for Elasticsearch output in deployment.yaml
## 0.1.10
### Major Changes
- New output can be set : `DogStatsD`
## 0.1.9
### Major Changes
- New output can be set : `StatsD`
## 0.1.7
### Major Changes
- New output can be set : `Opsgenie`
## 0.1.6
### Major Changes
- New output can be set : `NATS`
## 0.1.5
### Major Changes
- `Falcosidekick` and its chart are now part of `falcosecurity` organization
## 0.1.4
### Minor Changes
- Use more recent image with `Golang` 1.14
## 0.1.3
### Major Changes
- New output can be set : `Loki`
## 0.1.2
### Major Changes
- New output can be set : `SMTP`
## 0.1.1
### Major Changes
- New outputs can be set : `AWS Lambda`, `AWS SQS`, `Teams`
## 0.1.0
### Major Changes
- Initial release of Falcosidekick Helm Chart

View File

@ -1,187 +0,0 @@
# Falcosidekick
![falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falcosidekick_color.png)
![release](https://flat.badgen.net/github/release/falcosecurity/falcosidekick/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falcosidekick) ![licence](https://flat.badgen.net/badge/license/MIT/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/falcosecurity/falcosidekick?icon=docker)
## Description
A simple daemon for connecting [`Falco`](https://github.com/falcosecurity/falco) to your ecossytem. It takes a `Falco`'s events and
forward them to different outputs in a fan-out way.
It works as a single endpoint for as many as you want `Falco` instances :
![falco_with_falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falco_with_falcosidekick.png)
## Outputs
`Falcosidekick` manages a large variety of outputs with different purposes.
> **Note**
Follow the links to get the configuration of each output.
### Chat
- [**Slack**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/slack.md)
- [**Rocketchat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rocketchat.md)
- [**Mattermost**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mattermost.md)
- [**Teams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/teams.md)
- [**Discord**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/discord.md)
- [**Google Chat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/googlechat.md)
- [**Zoho Cliq**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cliq.md)
- [**Telegram**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/telegram.md)
### Metrics / Observability
- [**Datadog**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/datadog.md)
- [**Influxdb**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/influxdb.md)
- [**StatsD**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/statsd.md) (for monitoring of `falcosidekick`)
- [**DogStatsD**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dogstatsd.md) (for monitoring of `falcosidekick`)
- [**Prometheus**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/prometheus.md) (for both events and monitoring of `falcosidekick`)
- [**Wavefront**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/wavefront.md)
- [**Spyderbat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/spyderbat.md)
- [**TimescaleDB**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/timescaledb.md)
- [**Dynatrace**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dynatrace.md)
### Alerting
- [**AlertManager**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/alertmanager.md)
- [**Opsgenie**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/opsgenie.md)
- [**PagerDuty**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/pagerduty.md)
- [**Grafana OnCall**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/grafana_oncall.md)
### Logs
- [**Elasticsearch**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/elasticsearch.md)
- [**Loki**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/loki.md)
- [**AWS CloudWatchLogs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_cloudwatch_logs.md)
- [**Grafana**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/grafana.md)
- [**Syslog**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/syslog.md)
- [**Zincsearch**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs//zincsearch.md)
- [**OpenObserve**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openobserve.md)
### Object Storage
- [**AWS S3**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_s3.md)
- [**GCP Storage**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_storage.md)
- [**Yandex S3 Storage**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/yandex_s3.md)
### FaaS / Serverless
- [**AWS Lambda**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_lambda.md)
- [**GCP Cloud Run**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_cloud_run.md)
- [**GCP Cloud Functions**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_cloud_functions.md)
- [**Fission**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/fission.md)
- [**KNative (CloudEvents)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cloudevents.md)
- [**Kubeless**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kubeless.md)
- [**OpenFaaS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openfaas.md)
- [**Tekton**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/tekton.md)
### Message queue / Streaming
- [**NATS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/nats.md)
- [**STAN (NATS Streaming)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/stan.md)
- [**AWS SQS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_sqs.md)
- [**AWS SNS**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_sns.md)
- [**AWS Kinesis**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_kinesis.md)
- [**GCP PubSub**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gcp_pub_sub.md)
- [**Apache Kafka**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kafka.md)
- [**Kafka Rest Proxy**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/kafkarest.md)
- [**RabbitMQ**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rabbitmq.md)
- [**Azure Event Hubs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/azure_event_hub.md)
- [**Yandex Data Streams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/yandex_datastreams.md)
- [**MQTT**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mqtt.md)
- [**Gotify**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/gotify.md)
### Email
- [**SMTP**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/smtp.md)
### Database
- [**Redis**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/redis.md)
### Web
- [**Webhook**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/webhook.md)
- [**Node-RED**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/nodered.md)
- [**WebUI (Falcosidekick UI)**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/falcosidekick-ui.md)
### SIEM
- [**AWS Security Lake**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/aws_security_lake.md)
### Workflow
- [**n8n**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/n8n.md)
### Other
- [**Policy Report**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/policy_report.md)
## Adding `falcosecurity` repository
Prior to install the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
### Install Falco + Falcosidekick + Falcosidekick-ui
To install the chart with the release name `falcosidekick` run:
```bash
helm install falcosidekick falcosecurity/falcosidekick --set webui.enabled=true
```
### With Helm chart of Falco
`Falco`, `Falcosidekick` and `Falcosidekick-ui` can be installed together in one command. All values to configure `Falcosidekick` will have to be
prefixed with `falcosidekick.`.
```bash
helm install falco falcosecurity/falco --set falcosidekick.enabled=true --set falcosidekick.webui.enabled=true
```
After a few seconds, Falcosidekick should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
## Minimum Kubernetes version
The minimum Kubernetes version required is 1.17.x
## Uninstalling the Chart
To uninstall the `falcosidekick` deployment:
```bash
helm uninstall falcosidekick
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the Falcosidekick chart and their default values. See `values.yaml` for full list.
{{ template "chart.valuesSection" . }}
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
> **Tip**: You can use the default [values.yaml](values.yaml)
## Metrics
A `prometheus` endpoint can be scrapped at `/metrics`.
## Access Falcosidekick UI through an Ingress and a subpath
You may want to access the `WebUI (Falcosidekick UI)`](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/falcosidekick-ui.md) dashboard not from `/` but from `/subpath` and use an Ingress, here's an example of annotations to add to the Ingress for `nginx-ingress controller`:
```yaml
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
```

View File

@ -1,714 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 5,
"links": [],
"panels": [
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"legend": {
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"editorMode": "builder",
"expr": "count by(priority) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Priority counts",
"type": "piechart"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 2,
"options": {
"displayLabels": [
"value",
"percent"
],
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true,
"values": []
},
"pieType": "pie",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"editorMode": "builder",
"expr": "count by(rule) (rate({priority=~\".+\", rule!=\"Falco internal: metrics snapshot\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Rules counts",
"type": "piechart"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": "left",
"cellOptions": {
"type": "auto",
"wrapText": false
},
"filterable": true,
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Value #A"
},
"properties": [
{
"id": "displayName",
"value": "Number of Messages"
}
]
},
{
"matcher": {
"id": "byName",
"options": "Time"
},
"properties": [
{
"id": "custom.hidden",
"value": true
}
]
},
{
"matcher": {
"id": "byName",
"options": "k8s_ns"
},
"properties": [
{
"id": "custom.width",
"value": 96
}
]
},
{
"matcher": {
"id": "byName",
"options": "priority"
},
"properties": [
{
"id": "custom.width",
"value": 91
}
]
},
{
"matcher": {
"id": "byName",
"options": "rule"
},
"properties": [
{
"id": "custom.width",
"value": 450
}
]
},
{
"matcher": {
"id": "byName",
"options": "k8s_pod_name"
},
"properties": [
{
"id": "custom.width",
"value": 184
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 8
},
"id": 5,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"enablePagination": false,
"fields": "",
"reducer": [
"last"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": false,
"displayName": "k8s_pod_name"
}
]
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"editorMode": "builder",
"expr": "count by(k8s_pod_name, rule, priority, k8s_ns) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [$__auto]))",
"legendFormat": "",
"queryType": "instant",
"refId": "A"
}
],
"transformations": [
{
"id": "sortBy",
"options": {
"fields": {},
"sort": [
{
"desc": true,
"field": "Value #A"
}
]
}
}
],
"type": "table"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 16
},
"id": 6,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.2.0",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"direction": "backward",
"editorMode": "builder",
"expr": "{priority=~\".+\"} |= `$line_filter` | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority`",
"queryType": "range",
"refId": "A"
}
],
"title": "Realtime logs",
"type": "logs"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "stepBefore",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 1,
"pointSize": 4,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"fieldMinMax": false,
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 23
},
"id": 7,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "count by(priority) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [1m]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Priorities Rates",
"type": "timeseries"
},
{
"datasource": {
"default": false,
"type": "loki",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "stepBefore",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 1,
"pointSize": 4,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"fieldMinMax": false,
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 23
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "loki"
},
"editorMode": "builder",
"expr": "count by(rule) (rate({priority=~\".+\"} | logfmt | k8s_ns =~ `$namespace` | priority =~ `$priority` [1m]))",
"legendFormat": "{{priority}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Rules Rates",
"type": "timeseries"
}
],
"refresh": "auto",
"schemaVersion": 39,
"tags": [],
"templating": {
"list": [
{
"allValue": "",
"current": {
"selected": true,
"text": [
"arr",
"core",
"falco",
"kube-system",
"media",
"monitoring",
"rook",
"rook-cluster",
"storage",
"utilities",
"webs"
],
"value": [
"arr",
"core",
"falco",
"kube-system",
"media",
"monitoring",
"rook",
"rook-cluster",
"storage",
"utilities",
"webs"
]
},
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"definition": "",
"description": "",
"hide": 0,
"includeAll": false,
"label": "namespace",
"multi": true,
"name": "namespace",
"options": [],
"query": {
"label": "namespace",
"refId": "LokiVariableQueryEditor-VariableQuery",
"stream": "",
"type": 1
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": false,
"text": "Loki",
"value": "loki"
},
"hide": 0,
"includeAll": false,
"label": "datasource",
"multi": false,
"name": "datasource",
"options": [],
"query": "loki",
"queryValue": "",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {
"selected": true,
"text": [
"Critical"
],
"value": [
"Critical"
]
},
"datasource": {
"type": "loki",
"uid": "${datasource}"
},
"definition": "",
"hide": 0,
"includeAll": true,
"label": "priority",
"multi": true,
"name": "priority",
"options": [],
"query": {
"label": "priority",
"refId": "LokiVariableQueryEditor-VariableQuery",
"stream": "",
"type": 1
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"description": "Text to filter lines",
"hide": 0,
"label": "line_filter",
"name": "line_filter",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Falco logs",
"uid": "de6ixj4nl1kowc",
"version": 2,
"weekStart": ""
}

View File

@ -1,26 +0,0 @@
{{- if and .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falcosidekick.fullname" . }}-certs
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: core
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{ $key := .Values.config.tlsserver.serverkey }}
server.key: {{ $key | b64enc | quote }}
{{ $crt := .Values.config.tlsserver.servercrt }}
server.crt: {{ $crt | b64enc | quote }}
falcosidekick.pem: {{ print $key $crt | b64enc | quote }}
ca.crt: {{ .Values.config.tlsserver.cacrt | b64enc | quote }}
ca.pem: {{ .Values.config.tlsserver.cacrt | b64enc | quote }}
{{- end }}

View File

@ -1,28 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.falcosidekick.name }}
{{ if .Values.grafana.dashboards.configMaps.falcosidekick.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.falcosidekick.namespace }}
{{- else -}}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
grafana_dashboard: "1"
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.grafana.dashboards.configMaps.falcosidekick.folder }}
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.falcosidekick.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.falcosidekick.folder }}
{{- end }}
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
data:
falco-dashboard.json: |-
{{- .Files.Get "dashboards/falcosidekick-grafana-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,46 +0,0 @@
{{- if and (.Values.webui.enabled) (or (.Values.webui.redis.enabled) (.Values.webui.externalRedis.enabled)) -}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui-redis
data:
{{- if .Values.webui.redis.customConfig }}
redis-stack.conf: |-
{{ range .Values.webui.redis.customConfig }}
{{- . }}
{{ end -}}
{{- end }}
ping-redis.sh: |-
#!/bin/bash
for i in {1..10};
do
response=$(
timeout -s 3 30 \
redis-cli \
{{- if .Values.webui.redis.enabled }}
-h {{ include "falcosidekick.fullname" . }}-ui-redis -p 6379 \
{{- if .Values.webui.redis.password }}
-a ${REDIS_PASSWORD} \
{{- end }}
{{- end }}
{{- if .Values.webui.externalRedis.enabled }}
-h {{ .Values.webui.externalRedis.url }} \
-p {{ .Values.webui.externalRedis.port }} \
{{- if .Values.webui.externalRedis.password }}
-a ${REDIS_PASSWORD} \
{{- end }}
{{- end }}
ping
)
if [ "$response" = "PONG" ]; then
exit 0
fi
sleep 3
done
exit 1
{{- end }}

View File

@ -1,192 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: core
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
{{- if .Values.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- end }}
selector:
matchLabels:
{{- include "falcosidekick.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: core
template:
metadata:
labels:
{{- include "falcosidekick.labels" . | nindent 8 }}
app.kubernetes.io/component: core
{{- if and .Values.config.azure.podIdentityClientID .Values.config.azure.podIdentityName }}
aadpodidbinding: {{ include "falcosidekick.fullname" . }}
{{- end }}
{{- if .Values.config.azure.workloadIdentityClientID }}
azure.workload.identity/use: "true"
{{- end }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 2801
protocol: TCP
{{- if .Values.config.tlsserver.deploy }}
- name: http-notls
containerPort: 2810
protocol: TCP
{{- end }}
livenessProbe:
httpGet:
path: /ping
{{- if .Values.config.tlsserver.deploy }}
port: http-notls
{{- else }}
port: http
{{- end }}
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /ping
{{- if .Values.config.tlsserver.deploy }}
port: http-notls
{{- else }}
port: http
{{- end }}
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
{{- if .Values.config.extraArgs }}
args:
{{ toYaml .Values.config.extraArgs | nindent 12 }}
{{- end }}
envFrom:
- secretRef:
name: {{ include "falcosidekick.fullname" . }}
{{- if .Values.config.existingSecret }}
- secretRef:
name: {{ .Values.config.existingSecret }}
{{- end }}
env:
- name: DEBUG
value: {{ .Values.config.debug | quote }}
- name: CUSTOMFIELDS
value: {{ .Values.config.customfields | quote }}
- name: TEMPLATEDFIELDS
value: {{ .Values.config.templatedfields | quote }}
- name: CUSTOMTAGS
value: {{ .Values.config.customtags | quote }}
- name: OUTPUTFIELDFORMAT
value: {{ .Values.config.outputFieldFormat | quote }}
- name: BRACKETREPLACER
value: {{ .Values.config.bracketreplacer | quote }}
- name: MUTUALTLSFILESPATH
value: {{ .Values.config.mutualtlsfilespath | quote }}
- name: MUTUALTLSCLIENT_CERTFILE
value: {{ .Values.config.mutualtlsclient.certfile | quote }}
- name: MUTUALTLSCLIENT_KEYFILE
value: {{ .Values.config.mutualtlsclient.keyfile | quote }}
- name: MUTUALTLSCLIENT_CACERTFILE
value: {{ .Values.config.mutualtlsclient.cacertfile | quote }}
- name: TLSCLIENT_CACERTFILE
value: {{ .Values.config.tlsclient.cacertfile | quote }}
{{- if .Values.config.tlsserver.deploy }}
- name: TLSSERVER_DEPLOY
value: {{ .Values.config.tlsserver.deploy | quote }}
- name: TLSSERVER_CERTFILE
value: {{ .Values.config.tlsserver.certfile | quote }}
- name: TLSSERVER_KEYFILE
value: {{ .Values.config.tlsserver.keyfile | quote }}
- name: TLSSERVER_CACERTFILE
value: {{ .Values.config.tlsserver.cacertfile | quote }}
- name: TLSSERVER_MUTUALTLS
value: {{ .Values.config.tlsserver.mutualtls | quote }}
- name: TLSSERVER_NOTLSPORT
value: {{ .Values.config.tlsserver.notlsport | quote }}
- name: TLSSERVER_NOTLSPATHS
value: {{ .Values.config.tlsserver.notlspaths | quote }}
{{- end }}
{{- if .Values.config.otlp.traces.extraenvvars }}
{{ toYaml .Values.config.otlp.traces.extraenvvars | nindent 12 }}
{{- end }}
{{- if .Values.config.extraEnv }}
{{ toYaml .Values.config.extraEnv | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if or .Values.extraVolumeMounts (and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt)) }}
volumeMounts:
{{- if and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt) }}
- mountPath: /etc/certs/server
name: certs-volume
readOnly: true
{{- end }}
{{- if or .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 12 }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.extraVolumes (and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt)) }}
volumes:
{{- if and .Values.config.tlsserver.deploy (or .Values.config.tlsserver.existingSecret .Values.config.tlsserver.serverkey .Values.config.tlsserver.servercrt .Values.config.tlsserver.cacrt) }}
- name: certs-volume
secret:
{{- if .Values.config.tlsserver.existingSecret }}
secretName: {{.Values.config.tlsserver.existingSecret }}
{{- else }}
secretName: {{ include "falcosidekick.fullname" . }}-certs
{{- end }}
{{- end }}
{{- if or .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 8 }}
{{- end }}
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.config.loki.grafanaDashboard.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.config.loki.grafanaDashboard.configMap.name }}
{{ if .Values.config.loki.grafanaDashboard.configMap.namespace }}
namespace: {{ .Values.config.loki.grafanaDashboard.configMap.namespace }}
{{- else -}}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
grafana_dashboard: "1"
{{- if .Values.config.loki.grafanaDashboard.configMap.folder }}
annotations:
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.config.loki.grafanaDashboard.configMap.folder}}
grafana_dashboard_folder: {{ .Values.config.loki.grafanaDashboard.configMap.folder }}
{{- end }}
data:
falcosidekick-loki-dashboard.json: |-
{{- .Files.Get "dashboards/falcosidekick-loki-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,51 +0,0 @@
{{- if .Values.webui.enabled -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui
{{- with .Values.webui.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.webui.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui
{{- with .Values.webui.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.webui.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "falcosidekick.fullname" . }}-ui
subjects:
- kind: ServiceAccount
name: {{ include "falcosidekick.fullname" . }}-ui
{{- end }}

View File

@ -1,53 +0,0 @@
{{- if .Values.webui.enabled -}}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui
{{- with .Values.webui.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.webui.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if .Values.webui.user }}
FALCOSIDEKICK_UI_USER: "{{ .Values.webui.user | b64enc}}"
{{- end }}
{{- if .Values.webui.redis.password }}
FALCOSIDEKICK_UI_REDIS_PASSWORD: "{{ .Values.webui.redis.password | b64enc}}"
{{- end }}
{{- if eq .Values.webui.redis.existingSecret "" }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: ui
{{- with .Values.webui.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.webui.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if and .Values.webui.redis.enabled .Values.webui.redis.password }}
REDIS_ARGS: "{{ printf "--requirepass %s" .Values.webui.redis.password | b64enc}}"
REDIS_PASSWORD: "{{ .Values.webui.redis.password | b64enc }}"
{{- end }}
{{- if and .Values.webui.externalRedis.password .Values.webui.externalRedis.password }}
REDIS_PASSWORD: "{{ .Values.webui.externalRedis.password| b64enc }}"
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,35 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: core
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if not (eq .Values.config.tlsserver.notlspaths "") }}
- port: {{ .Values.config.tlsserver.notlsport }}
targetPort: http-notls
protocol: TCP
name: http-notls
{{- end }}
selector:
{{- include "falcosidekick.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: core

View File

@ -1,36 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falcosidekick.labels" . | nindent 4 }}
app.kubernetes.io/component: core
{{- with .Values.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.customLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: http
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- with .Values.serviceMonitor.additionalProperties }}
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "falcosidekick.labels" . | nindent 6 }}
app.kubernetes.io/component: core
{{- end }}

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,56 +0,0 @@
# Change Log
This file documents all notable changes to `k8s-metacollector` Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## v0.1.10
* Fix Grafana dashboards datasources
## v0.1.9
* Add podLabels
## v0.1.8
* Bump application version to 0.1.1. For more info see release notes: https://github.com/falcosecurity/k8s-metacollector/releases/tag/v0.1.1
## v0.1.7
* Lower initial delay seconds for readiness and liveness probes;
## v0.1.6
* Add grafana dashboard;
## v0.1.5
* Fix service monitor indentation;
## v0.1.4
* Lower `interval` and `scrape_timeout` values for service monitor;
## v0.1.3
* Bump application version to 0.1.3
## v0.1.2
### Major Changes
* Update unit tests;
## v0.1.1
### Major Changes
* Add `work in progress` disclaimer;
* Update chart info.
## v0.1.0
### Major Changes
* Initial release of k8s-metacollector Helm Chart. **Note:** the chart uses the `main` tag, since we don't have released the k8s-metacollector yet.

View File

@ -1,71 +0,0 @@
# k8s-metacollector
[k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) is a self-contained module that can be deployed within a Kubernetes cluster to perform the task of gathering metadata from various Kubernetes resources and subsequently transmitting this collected metadata to designated subscribers.
## Introduction
This chart installs the [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) in a kubernetes cluster. The main application will be deployed as Kubernetes deployment with replica count equal to 1. In order for the application to work correctly the following resources will be created:
* ServiceAccount;
* ClusterRole;
* ClusterRoleBinding;
* Service;
* ServiceMonitor (optional);
*Note*: Incrementing the number of replicas is not recommended. The [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) does not implement memory sharding techniques. Furthermore, events are distributed over `gRPC` using `streams` which does not work well with load balancing mechanisms implemented by Kubernetes.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with default values and release name `k8s-metacollector` run:
```bash
helm install k8s-metacollector falcosecurity/k8s-metacollector --namespace metacollector --create-namespace
```
After a few seconds, k8s-metacollector should be running in the `metacollector` namespace.
### Enabling ServiceMonitor
Assuming that Prometheus scrapes only the ServiceMonitors that present a `release label` the following command will install and label the ServiceMonitor:
```bash
helm install k8s-metacollector falcosecurity/k8s-metacollector \
--create-namespace \
--namespace metacollector \
--set serviceMonitor.create=true \
--set serviceMonitor.labels.release="kube-prometheus-stack"
```
### Deploying the Grafana Dashboard
By setting `grafana.dashboards.enabled=true` the k8s-metacollector's grafana dashboard is deployed in the cluster using a configmap.
Based in Grafana's configuration, the configmap could be scraped by Grafana dashboard sidecar.
The following command will deploy the k8s-metacollector + serviceMonitor + grafana dashboard:
```bash
helm install k8s-metacollector falcosecurity/k8s-metacollector \
--create-namespace \
--namespace metacollector \
--set serviceMonitor.create=true \
--set serviceMonitor.labels.release="kube-prometheus-stack" \
--set grafana.dashboards.enabled=true
```
## Uninstalling the Chart
To uninstall the `k8s-metacollector` release in namespace `metacollector`:
```bash
helm uninstall k8s-metacollector --namespace metacollector
```
The command removes all the Kubernetes resources associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the {{ template "chart.name" . }} chart v{{ template "chart.version" . }} and their default values. See `values.yaml` for full list.
{{ template "chart.valuesSection" . }}

View File

@ -1,151 +0,0 @@
# k8s-metacollector
[k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) is a self-contained module that can be deployed within a Kubernetes cluster to perform the task of gathering metadata from various Kubernetes resources and subsequently transmitting this collected metadata to designated subscribers.
## Introduction
This chart installs the [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) in a kubernetes cluster. The main application will be deployed as Kubernetes deployment with replica count equal to 1. In order for the application to work correctly the following resources will be created:
* ServiceAccount;
* ClusterRole;
* ClusterRoleBinding;
* Service;
* ServiceMonitor (optional);
*Note*: Incrementing the number of replicas is not recommended. The [k8s-metacollector](https://github.com/falcosecurity/k8s-metacollector) does not implement memory sharding techniques. Furthermore, events are distributed over `gRPC` using `streams` which does not work well with load balancing mechanisms implemented by Kubernetes.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with default values and release name `k8s-metacollector` run:
```bash
helm install k8s-metacollector falcosecurity/k8s-metacollector --namespace metacollector --create-namespace
```
After a few seconds, k8s-metacollector should be running in the `metacollector` namespace.
### Enabling ServiceMonitor
Assuming that Prometheus scrapes only the ServiceMonitors that present a `release label` the following command will install and label the ServiceMonitor:
```bash
helm install k8s-metacollector falcosecurity/k8s-metacollector \
--create-namespace \
--namespace metacollector \
--set serviceMonitor.create=true \
--set serviceMonitor.labels.release="kube-prometheus-stack"
```
### Deploying the Grafana Dashboard
By setting `grafana.dashboards.enabled=true` the k8s-metacollector's grafana dashboard is deployed in the cluster using a configmap.
Based in Grafana's configuration, the configmap could be scraped by Grafana dashboard sidecar.
The following command will deploy the k8s-metacollector + serviceMonitor + grafana dashboard:
```bash
helm install k8s-metacollector falcosecurity/k8s-metacollector \
--create-namespace \
--namespace metacollector \
--set serviceMonitor.create=true \
--set serviceMonitor.labels.release="kube-prometheus-stack" \
--set grafana.dashboards.enabled=true
```
## Uninstalling the Chart
To uninstall the `k8s-metacollector` release in namespace `metacollector`:
```bash
helm uninstall k8s-metacollector --namespace metacollector
```
The command removes all the Kubernetes resources associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the k8s-metacollector chart v0.1.10 and their default values. See `values.yaml` for full list.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | affinity allows pod placement based on node characteristics, or any other custom labels assigned to nodes. |
| containerSecurityContext | object | `{"capabilities":{"drop":["ALL"]}}` | containerSecurityContext holds the security settings for the container. |
| containerSecurityContext.capabilities | object | `{"drop":["ALL"]}` | capabilities fine-grained privileges that can be assigned to processes. |
| containerSecurityContext.capabilities.drop | list | `["ALL"]` | drop drops the given set of privileges. |
| fullnameOverride | string | `""` | fullNameOverride same as nameOverride but for the full name. |
| grafana | object | `{"dashboards":{"configMaps":{"collector":{"folder":"","name":"k8s-metacollector-grafana-dashboard","namespace":""}},"enabled":false}}` | grafana contains the configuration related to grafana. |
| grafana.dashboards | object | `{"configMaps":{"collector":{"folder":"","name":"k8s-metacollector-grafana-dashboard","namespace":""}},"enabled":false}` | dashboards contains configuration for grafana dashboards. |
| grafana.dashboards.configMaps | object | `{"collector":{"folder":"","name":"k8s-metacollector-grafana-dashboard","namespace":""}}` | configmaps to be deployed that contain a grafana dashboard. |
| grafana.dashboards.configMaps.collector | object | `{"folder":"","name":"k8s-metacollector-grafana-dashboard","namespace":""}` | collector contains the configuration for collector's dashboard. |
| grafana.dashboards.configMaps.collector.folder | string | `""` | folder where the dashboard is stored by grafana. |
| grafana.dashboards.configMaps.collector.name | string | `"k8s-metacollector-grafana-dashboard"` | name specifies the name for the configmap. |
| grafana.dashboards.configMaps.collector.namespace | string | `""` | namespace specifies the namespace for the configmap. |
| grafana.dashboards.enabled | bool | `false` | enabled specifies whether the dashboards should be deployed. |
| healthChecks | object | `{"livenessProbe":{"httpGet":{"path":"/healthz","port":8081},"initialDelaySeconds":45,"periodSeconds":15,"timeoutSeconds":5},"readinessProbe":{"httpGet":{"path":"/readyz","port":8081},"initialDelaySeconds":30,"periodSeconds":15,"timeoutSeconds":5}}` | healthChecks contains the configuration for liveness and readiness probes. |
| healthChecks.livenessProbe | object | `{"httpGet":{"path":"/healthz","port":8081},"initialDelaySeconds":45,"periodSeconds":15,"timeoutSeconds":5}` | livenessProbe is a diagnostic mechanism used to determine wether a container within a Pod is still running and healthy. |
| healthChecks.livenessProbe.httpGet | object | `{"path":"/healthz","port":8081}` | httpGet specifies that the liveness probe will make an HTTP GET request to check the health of the container. |
| healthChecks.livenessProbe.httpGet.path | string | `"/healthz"` | path is the specific endpoint on which the HTTP GET request will be made. |
| healthChecks.livenessProbe.httpGet.port | int | `8081` | port is the port on which the container exposes the "/healthz" endpoint. |
| healthChecks.livenessProbe.initialDelaySeconds | int | `45` | initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe. |
| healthChecks.livenessProbe.periodSeconds | int | `15` | periodSeconds specifies the interval at which the liveness probe will be repeated. |
| healthChecks.livenessProbe.timeoutSeconds | int | `5` | timeoutSeconds is the number of seconds after which the probe times out. |
| healthChecks.readinessProbe | object | `{"httpGet":{"path":"/readyz","port":8081},"initialDelaySeconds":30,"periodSeconds":15,"timeoutSeconds":5}` | readinessProbe is a mechanism used to determine whether a container within a Pod is ready to serve traffic. |
| healthChecks.readinessProbe.httpGet | object | `{"path":"/readyz","port":8081}` | httpGet specifies that the readiness probe will make an HTTP GET request to check whether the container is ready. |
| healthChecks.readinessProbe.httpGet.path | string | `"/readyz"` | path is the specific endpoint on which the HTTP GET request will be made. |
| healthChecks.readinessProbe.httpGet.port | int | `8081` | port is the port on which the container exposes the "/readyz" endpoint. |
| healthChecks.readinessProbe.initialDelaySeconds | int | `30` | initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe. |
| healthChecks.readinessProbe.periodSeconds | int | `15` | periodSeconds specifies the interval at which the readiness probe will be repeated. |
| healthChecks.readinessProbe.timeoutSeconds | int | `5` | timeoutSeconds is the number of seconds after which the probe times out. |
| image | object | `{"pullPolicy":"IfNotPresent","pullSecrets":[],"registry":"docker.io","repository":"falcosecurity/k8s-metacollector","tag":""}` | image is the configuration for the k8s-metacollector image. |
| image.pullPolicy | string | `"IfNotPresent"` | pullPolicy is the policy used to determine when a node should attempt to pull the container image. |
| image.pullSecrets | list | `[]` | pullSecects a list of secrets containing credentials used when pulling from private/secure registries. |
| image.registry | string | `"docker.io"` | registry is the image registry to pull from. |
| image.repository | string | `"falcosecurity/k8s-metacollector"` | repository is the image repository to pull from |
| image.tag | string | `""` | tag is image tag to pull. Overrides the image tag whose default is the chart appVersion. |
| nameOverride | string | `""` | nameOverride is the new name used to override the release name used for k8s-metacollector components. |
| namespaceOverride | string | `""` | namespaceOverride overrides the deployment namespace. It's useful for multi-namespace deployments in combined charts. |
| nodeSelector | object | `{}` | nodeSelector specifies a set of key-value pairs that must match labels assigned to nodes for the Pod to be eligible for scheduling on that node. |
| podAnnotations | object | `{}` | podAnnotations are custom annotations to be added to the pod. |
| podLabels | object | `{}` | podLabels are labels to be added to the pod. |
| podSecurityContext | object | `{"fsGroup":1000,"runAsGroup":1000,"runAsNonRoot":true,"runAsUser":1000}` | These settings are override by the ones specified for the container when there is overlap. |
| podSecurityContext.fsGroup | int | `1000` | fsGroup specifies the group ID (GID) that should be used for the volume mounted within a Pod. |
| podSecurityContext.runAsGroup | int | `1000` | runAsGroup specifies the group ID (GID) that the containers inside the pod should run as. |
| podSecurityContext.runAsNonRoot | bool | `true` | runAsNonRoot when set to true enforces that the specified container runs as a non-root user. |
| podSecurityContext.runAsUser | int | `1000` | runAsUser specifies the user ID (UID) that the containers inside the pod should run as. |
| replicaCount | int | `1` | replicaCount is the number of identical copies of the k8s-metacollector. |
| resources | object | `{}` | resources defines the computing resources (CPU and memory) that are allocated to the containers running within the Pod. |
| service | object | `{"create":true,"ports":{"broker-grpc":{"port":45000,"protocol":"TCP","targetPort":"broker-grpc"},"health-probe":{"port":8081,"protocol":"TCP","targetPort":"health-probe"},"metrics":{"port":8080,"protocol":"TCP","targetPort":"metrics"}},"type":"ClusterIP"}` | service exposes the k8s-metacollector services to be accessed from within the cluster. ref: https://kubernetes.io/docs/concepts/services-networking/service/ |
| service.create | bool | `true` | enabled specifies whether a service should be created. |
| service.ports | object | `{"broker-grpc":{"port":45000,"protocol":"TCP","targetPort":"broker-grpc"},"health-probe":{"port":8081,"protocol":"TCP","targetPort":"health-probe"},"metrics":{"port":8080,"protocol":"TCP","targetPort":"metrics"}}` | ports denotes all the ports on which the Service will listen. |
| service.ports.broker-grpc | object | `{"port":45000,"protocol":"TCP","targetPort":"broker-grpc"}` | broker-grpc denotes a listening service named "grpc-broker" |
| service.ports.broker-grpc.port | int | `45000` | port is the port on which the Service will listen. |
| service.ports.broker-grpc.protocol | string | `"TCP"` | protocol specifies the network protocol that the Service should use for the associated port. |
| service.ports.broker-grpc.targetPort | string | `"broker-grpc"` | targetPort is the port on which the Pod is listening. |
| service.ports.health-probe | object | `{"port":8081,"protocol":"TCP","targetPort":"health-probe"}` | health-probe denotes a listening service named "health-probe" |
| service.ports.health-probe.port | int | `8081` | port is the port on which the Service will listen. |
| service.ports.health-probe.protocol | string | `"TCP"` | protocol specifies the network protocol that the Service should use for the associated port. |
| service.ports.health-probe.targetPort | string | `"health-probe"` | targetPort is the port on which the Pod is listening. |
| service.ports.metrics | object | `{"port":8080,"protocol":"TCP","targetPort":"metrics"}` | metrics denotes a listening service named "metrics". |
| service.ports.metrics.port | int | `8080` | port is the port on which the Service will listen. |
| service.ports.metrics.protocol | string | `"TCP"` | protocol specifies the network protocol that the Service should use for the associated port. |
| service.ports.metrics.targetPort | string | `"metrics"` | targetPort is the port on which the Pod is listening. |
| service.type | string | `"ClusterIP"` | type denotes the service type. Setting it to "ClusterIP" we ensure that are accessible from within the cluster. |
| serviceAccount | object | `{"annotations":{},"create":true,"name":""}` | serviceAccount is the configuration for the service account. |
| serviceAccount.annotations | object | `{}` | annotations to add to the service account. |
| serviceAccount.create | bool | `true` | create specifies whether a service account should be created. |
| serviceAccount.name | string | `""` | If not set and create is true, a name is generated using the full name template. |
| serviceMonitor | object | `{"create":false,"interval":"15s","labels":{},"path":"/metrics","relabelings":[],"scheme":"http","scrapeTimeout":"10s","targetLabels":[],"tlsConfig":{}}` | serviceMonitor holds the configuration for the ServiceMonitor CRD. A ServiceMonitor is a custom resource definition (CRD) used to configure how Prometheus should discover and scrape metrics from the k8s-metacollector service. |
| serviceMonitor.create | bool | `false` | create specifies whether a ServiceMonitor CRD should be created for a prometheus operator. https://github.com/coreos/prometheus-operator Enable it only if the ServiceMonitor CRD is installed in your cluster. |
| serviceMonitor.interval | string | `"15s"` | interval specifies the time interval at which Prometheus should scrape metrics from the service. |
| serviceMonitor.labels | object | `{}` | labels set of labels to be applied to the ServiceMonitor resource. If your Prometheus deployment is configured to use serviceMonitorSelector, then add the right label here in order for the ServiceMonitor to be selected for target discovery. |
| serviceMonitor.path | string | `"/metrics"` | path at which the metrics are expose by the k8s-metacollector. |
| serviceMonitor.relabelings | list | `[]` | relabelings configures the relabeling rules to apply the targets metadata labels. |
| serviceMonitor.scheme | string | `"http"` | scheme specifies network protocol used by the metrics endpoint. In this case HTTP. |
| serviceMonitor.scrapeTimeout | string | `"10s"` | scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request. If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for that target. |
| serviceMonitor.targetLabels | list | `[]` | targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics. |
| serviceMonitor.tlsConfig | object | `{}` | tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when scraping metrics from a service. It allows you to define the details of the TLS connection, such as CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support TLS configuration for the metrics endpoint. |
| tolerations | list | `[]` | tolerations are applied to pods and allow them to be scheduled on nodes with matching taints. |

View File

@ -1,121 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "k8s-metacollector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "k8s-metacollector.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "k8s-metacollector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "k8s-metacollector.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride -}}
{{- end }}
{{/*
Common labels
*/}}
{{- define "k8s-metacollector.labels" -}}
helm.sh/chart: {{ include "k8s-metacollector.chart" . }}
{{ include "k8s-metacollector.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/component: "metadata-collector"
{{- end }}
{{/*
Selector labels
*/}}
{{- define "k8s-metacollector.selectorLabels" -}}
app.kubernetes.io/name: {{ include "k8s-metacollector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Return the proper k8s-metacollector image name
*/}}
{{- define "k8s-metacollector.image" -}}
"
{{- with .Values.image.registry -}}
{{- . }}/
{{- end -}}
{{- .Values.image.repository }}:
{{- .Values.image.tag | default .Chart.AppVersion -}}
"
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "k8s-metacollector.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "k8s-metacollector.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Generate the ports for the service
*/}}
{{- define "k8s-metacollector.servicePorts" -}}
{{- if .Values.service.create }}
{{- with .Values.service.ports }}
{{- range $key, $value := . }}
- name: {{ $key }}
{{- range $key1, $value1 := $value }}
{{ $key1}}: {{ $value1 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Generate the ports for the container
*/}}
{{- define "k8s-metacollector.containerPorts" -}}
{{- if .Values.service.create }}
{{- with .Values.service.ports }}
{{- range $key, $value := . }}
- name: "{{ $key }}"
{{- range $key1, $value1 := $value }}
{{- if ne $key1 "targetPort" }}
{{- if eq $key1 "port" }}
containerPort: {{ $value1 }}
{{- else }}
{{ $key1}}: {{ $value1 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,16 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "k8s-metacollector.fullname" . }}
labels:
{{- include "k8s-metacollector.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "k8s-metacollector.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "k8s-metacollector.serviceAccountName" . }}
namespace: {{ include "k8s-metacollector.namespace" . }}
{{- end }}

View File

@ -1,21 +0,0 @@
{{- if .Values.grafana.dashboards.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.grafana.dashboards.configMaps.collector.name }}
{{ if .Values.grafana.dashboards.configMaps.collector.namespace }}
namespace: {{ .Values.grafana.dashboards.configMaps.collector.namespace }}
{{- else -}}
namespace: {{ include "k8s-metacollector.namespace" . }}
{{- end }}
labels:
grafana_dashboard: "1"
{{- if .Values.grafana.dashboards.configMaps.collector.folder }}
annotations:
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafana.dashboards.configMaps.collector.folder}}
grafana_dashboard_folder: {{ .Values.grafana.dashboards.configMaps.collector.folder }}
{{- end }}
data:
dashboard.json: |-
{{- .Files.Get "dashboards/k8s-metacollector-dashboard.json" | nindent 4 }}
{{- end -}}

View File

@ -1,65 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "k8s-metacollector.fullname" . }}
namespace: {{ include "k8s-metacollector.namespace" . }}
labels:
{{- include "k8s-metacollector.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "k8s-metacollector.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "k8s-metacollector.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "k8s-metacollector.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
image: {{ include "k8s-metacollector.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /meta-collector
args:
- run
ports:
{{- include "k8s-metacollector.containerPorts" . | indent 12}}
{{- with .Values.healthChecks.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12}}
{{- end }}
{{- with .Values.healthChecks.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12}}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,15 +0,0 @@
{{- if .Values.service.create}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "k8s-metacollector.fullname" . }}
namespace: {{ include "k8s-metacollector.namespace" . }}
labels:
{{- include "k8s-metacollector.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
{{- include "k8s-metacollector.servicePorts" . | indent 4 }}
selector:
{{- include "k8s-metacollector.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -1,47 +0,0 @@
{{- if .Values.serviceMonitor.create }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "k8s-metacollector.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ tpl .Values.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ include "k8s-metacollector.namespace" . }}
{{- end }}
labels:
{{- include "k8s-metacollector.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: {{ .Values.service.ports.metrics.targetPort }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.serviceMonitor.path }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "k8s-metacollector.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ include "k8s-metacollector.namespace" . }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,34 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"gopkg.in/yaml.v3"
)
func chartInfo(t *testing.T, chartPath string) (map[string]interface{}, error) {
// Get chart info.
output, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "show", "chart", chartPath)
if err != nil {
return nil, err
}
chartInfo := map[string]interface{}{}
err = yaml.Unmarshal([]byte(output), &chartInfo)
return chartInfo, err
}

View File

@ -1,222 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"fmt"
"path/filepath"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type commonMetaFieldsTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestCommonMetaFields(t *testing.T) {
t.Parallel()
// Template files that will be rendered.
templateFiles := []string{
"templates/clusterrole.yaml",
"templates/clusterrolebinding.yaml",
"templates/deployment.yaml",
"templates/service.yaml",
"templates/serviceaccount.yaml",
"templates/servicemonitor.yaml",
}
chartFullPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
suite.Run(t, &commonMetaFieldsTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "releasename-test",
namespace: "metacollector-test",
templates: templateFiles,
})
}
func (s *commonMetaFieldsTest) TestNameOverride() {
cInfo, err := chartInfo(s.T(), s.chartPath)
s.NoError(err)
chartName, found := cInfo["name"]
s.True(found)
testCases := []struct {
name string
values map[string]string
expected string
}{
{
"defaultValues, release name does not contain chart name",
map[string]string{
"serviceMonitor.create": "true",
},
fmt.Sprintf("%s-%s", s.releaseName, chartName),
},
{
"overrideFullName",
map[string]string{
"fullnameOverride": "metadata",
"serviceMonitor.create": "true",
},
"metadata",
},
{
"overrideFullName, longer than 63 chars",
map[string]string{
"fullnameOverride": "aVeryLongNameForTheReleaseThatIsLongerThanSixtyThreeCharsaVeryLongNameForTheReleaseThatIsLongerThanSixtyThreeChars",
"serviceMonitor.create": "true",
},
"aVeryLongNameForTheReleaseThatIsLongerThanSixtyThreeCharsaVeryL",
},
{
"overrideName, not containing release name",
map[string]string{
"nameOverride": "metadata",
"serviceMonitor.create": "true",
},
fmt.Sprintf("%s-metadata", s.releaseName),
},
{
"overrideName, release name contains the name",
map[string]string{
"nameOverride": "releasename",
"serviceMonitor.create": "true",
},
s.releaseName,
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
for _, template := range s.templates {
// Render the current template.
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, []string{template})
// Unmarshal output to a map.
var resource unstructured.Unstructured
helm.UnmarshalK8SYaml(s.T(), output, &resource)
s.Equal(testCase.expected, resource.GetName(), "should be equal")
}
})
}
}
func (s *commonMetaFieldsTest) TestNamespaceOverride() {
testCases := []struct {
name string
values map[string]string
expected string
}{
{
"defaultValues",
map[string]string{
"serviceMonitor.create": "true",
},
"default",
},
{
"overrideNamespace",
map[string]string{
"namespaceOverride": "metacollector",
"serviceMonitor.create": "true",
},
"metacollector",
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
for _, template := range s.templates {
// Render the current template.
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, []string{template})
// Unmarshal output to a map.
var resource unstructured.Unstructured
helm.UnmarshalK8SYaml(s.T(), output, &resource)
if resource.GetKind() == "ClusterRole" || resource.GetKind() == "ClusterRoleBinding" {
continue
}
s.Equal(testCase.expected, resource.GetNamespace(), "should be equal")
}
})
}
}
// TestLabels tests that all rendered resources have the same base set of labels.
func (s *commonMetaFieldsTest) TestLabels() {
// Get chart info.
cInfo, err := chartInfo(s.T(), s.chartPath)
s.NoError(err)
// Get app version.
appVersion, found := cInfo["appVersion"]
s.True(found, "should find app version in chart info")
appVersion = appVersion.(string)
// Get chart version.
chartVersion, found := cInfo["version"]
s.True(found, "should find chart version in chart info")
// Get chart name.
chartName, found := cInfo["name"]
s.True(found, "should find chart name in chart info")
chartName = chartName.(string)
expectedLabels := map[string]string{
"helm.sh/chart": fmt.Sprintf("%s-%s", chartName, chartVersion),
"app.kubernetes.io/name": chartName.(string),
"app.kubernetes.io/instance": s.releaseName,
"app.kubernetes.io/version": appVersion.(string),
"app.kubernetes.io/managed-by": "Helm",
"app.kubernetes.io/component": "metadata-collector",
}
// Adjust the values to render all components.
options := helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"}}
for _, template := range s.templates {
// Render the current template.
output := helm.RenderTemplate(s.T(), &options, s.chartPath, s.releaseName, []string{template})
// Unmarshal output to a map.
var resource unstructured.Unstructured
helm.UnmarshalK8SYaml(s.T(), output, &resource)
labels := resource.GetLabels()
s.Len(labels, len(expectedLabels), "should have the same number of labels")
for key, value := range labels {
expectedVal := expectedLabels[key]
s.Equal(expectedVal, value)
}
}
}

View File

@ -1,76 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"k8s.io/utils/strings/slices"
)
const chartPath = "../../"
// Using the default values we want to test that all the expected resources are rendered.
func TestRenderedResourcesWithDefaultValues(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
releaseName := "rendered-resources"
// Template files that we expect to be rendered.
templateFiles := []string{
"clusterrole.yaml",
"clusterrolebinding.yaml",
"deployment.yaml",
"service.yaml",
"serviceaccount.yaml",
}
require.NoError(t, err)
options := &helm.Options{}
// Template the chart using the default values.yaml file.
output, err := helm.RenderTemplateE(t, options, helmChartPath, releaseName, nil)
require.NoError(t, err)
// Extract all rendered files from the output.
pattern := `# Source: k8s-metacollector/templates/([^\n]+)`
re := regexp.MustCompile(pattern)
matches := re.FindAllStringSubmatch(output, -1)
var renderedTemplates []string
for _, match := range matches {
// Filter out test templates.
if !strings.Contains(match[1], "test-") {
renderedTemplates = append(renderedTemplates, match[1])
}
}
// Assert that the rendered resources are equal tho the expected ones.
require.Equal(t, len(renderedTemplates), len(templateFiles), "should be equal")
for _, rendered := range renderedTemplates {
require.True(t, slices.Contains(templateFiles, rendered), "template files should contain all the rendered files")
}
}

View File

@ -1,862 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
type deploymentTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestDeploymentTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
suite.Run(t, &deploymentTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "k8s-metacollector-test",
namespace: "metacollector-test",
templates: []string{"templates/deployment.yaml"},
})
}
func (s *deploymentTemplateTest) TestImage() {
// Get chart info.
cInfo, err := chartInfo(s.T(), s.chartPath)
s.NoError(err)
// Extract the appVersion.
appVersion, found := cInfo["appVersion"]
s.True(found, "should find app version from chart info")
testCases := []struct {
name string
values map[string]string
expected string
}{
{
"defaultValues",
nil,
fmt.Sprintf("docker.io/falcosecurity/k8s-metacollector:%s", appVersion),
},
{
"changingImageTag",
map[string]string{
"image.tag": "testingTag",
},
"docker.io/falcosecurity/k8s-metacollector:testingTag",
},
{
"changingImageRepo",
map[string]string{
"image.repository": "falcosecurity/testingRepository",
},
fmt.Sprintf("docker.io/falcosecurity/testingRepository:%s", appVersion),
},
{
"changingImageRegistry",
map[string]string{
"image.registry": "ghcr.io",
},
fmt.Sprintf("ghcr.io/falcosecurity/k8s-metacollector:%s", appVersion),
},
{
"changingAllImageFields",
map[string]string{
"image.registry": "ghcr.io",
"image.repository": "falcosecurity/testingRepository",
"image.tag": "testingTag",
},
"ghcr.io/falcosecurity/testingRepository:testingTag",
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
s.Equal(testCase.expected, deployment.Spec.Template.Spec.Containers[0].Image, "should be equal")
})
}
}
func (s *deploymentTemplateTest) TestImagePullPolicy() {
testCases := []struct {
name string
values map[string]string
expected string
}{
{
"defaultValues",
nil,
"IfNotPresent",
},
{
"changingPullPolicy",
map[string]string{
"image.pullPolicy": "Always",
},
"Always",
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
s.Equal(testCase.expected, string(deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy), "should be equal")
})
}
}
func (s *deploymentTemplateTest) TestImagePullSecrets() {
testCases := []struct {
name string
values map[string]string
expected string
}{
{
"defaultValues",
nil,
"",
},
{
"changingPullPolicy",
map[string]string{
"image.pullSecrets[0].name": "my-pull-secret",
},
"my-pull-secret",
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
if testCase.expected == "" {
s.Nil(deployment.Spec.Template.Spec.ImagePullSecrets, "should be nil")
} else {
s.Equal(testCase.expected, deployment.Spec.Template.Spec.ImagePullSecrets[0].Name, "should be equal")
}
})
}
}
func (s *deploymentTemplateTest) TestServiceAccount() {
testCases := []struct {
name string
values map[string]string
expected string
}{
{
"defaultValues",
nil,
s.releaseName,
},
{
"changingServiceAccountName",
map[string]string{
"serviceAccount.name": "service-account",
},
"service-account",
},
{
"disablingServiceAccount",
map[string]string{
"serviceAccount.create": "false",
},
"default",
},
{
"disablingServiceAccountAndSettingName",
map[string]string{
"serviceAccount.create": "false",
"serviceAccount.name": "service-account",
},
"service-account",
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
s.Equal(testCase.expected, deployment.Spec.Template.Spec.ServiceAccountName, "should be equal")
})
}
}
func (s *deploymentTemplateTest) TestPodAnnotations() {
testCases := []struct {
name string
values map[string]string
expected map[string]string
}{
{
"defaultValues",
nil,
nil,
},
{
"settingPodAnnotations",
map[string]string{
"podAnnotations.my-annotation": "annotationValue",
},
map[string]string{
"my-annotation": "annotationValue",
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
if testCase.expected == nil {
s.Nil(deployment.Spec.Template.Annotations, "should be nil")
} else {
for key, val := range testCase.expected {
val1 := deployment.Spec.Template.Annotations[key]
s.Equal(val, val1, "should contain all the added annotations")
}
}
})
}
}
func (s *deploymentTemplateTest) TestPodSecurityContext() {
testCases := []struct {
name string
values map[string]string
expected func(psc *corev1.PodSecurityContext)
}{
{
"defaultValues",
nil,
func(psc *corev1.PodSecurityContext) {
s.Equal(true, *psc.RunAsNonRoot, "runAsNonRoot should be set to true")
s.Equal(int64(1000), *psc.RunAsUser, "runAsUser should be set to 1000")
s.Equal(int64(1000), *psc.FSGroup, "fsGroup should be set to 1000")
s.Equal(int64(1000), *psc.RunAsGroup, "runAsGroup should be set to 1000")
s.Nil(psc.SELinuxOptions)
s.Nil(psc.WindowsOptions)
s.Nil(psc.SupplementalGroups)
s.Nil(psc.Sysctls)
s.Nil(psc.FSGroupChangePolicy)
s.Nil(psc.SeccompProfile)
},
},
{
"changingServiceAccountName",
map[string]string{
"podSecurityContext": "null",
},
func(psc *corev1.PodSecurityContext) {
s.Nil(psc, "podSecurityContext should be set to nil")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.SecurityContext)
})
}
}
func (s *deploymentTemplateTest) TestContainerSecurityContext() {
testCases := []struct {
name string
values map[string]string
expected func(sc *corev1.SecurityContext)
}{
{
"defaultValues",
nil,
func(sc *corev1.SecurityContext) {
s.Len(sc.Capabilities.Drop, 1, "capabilities in drop should be set to one")
s.Equal("ALL", string(sc.Capabilities.Drop[0]), "should drop all capabilities")
s.Nil(sc.Capabilities.Add)
s.Nil(sc.Privileged)
s.Nil(sc.SELinuxOptions)
s.Nil(sc.WindowsOptions)
s.Nil(sc.RunAsUser)
s.Nil(sc.RunAsGroup)
s.Nil(sc.RunAsNonRoot)
s.Nil(sc.ReadOnlyRootFilesystem)
s.Nil(sc.AllowPrivilegeEscalation)
s.Nil(sc.ProcMount)
s.Nil(sc.SeccompProfile)
},
},
{
"changingServiceAccountName",
map[string]string{
"containerSecurityContext": "null",
},
func(sc *corev1.SecurityContext) {
s.Nil(sc, "containerSecurityContext should be set to nil")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Containers[0].SecurityContext)
})
}
}
func (s *deploymentTemplateTest) TestResources() {
testCases := []struct {
name string
values map[string]string
expected func(res corev1.ResourceRequirements)
}{
{
"defaultValues",
nil,
func(res corev1.ResourceRequirements) {
s.Nil(res.Claims)
s.Nil(res.Requests)
s.Nil(res.Limits)
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Containers[0].Resources)
})
}
}
func (s *deploymentTemplateTest) TestNodeSelector() {
testCases := []struct {
name string
values map[string]string
expected func(ns map[string]string)
}{
{
"defaultValues",
nil,
func(ns map[string]string) {
s.Nil(ns, "should be nil")
},
},
{
"Setting nodeSelector",
map[string]string{
"nodeSelector.mySelector": "myNode",
},
func(ns map[string]string) {
value, ok := ns["mySelector"]
s.True(ok, "should find the key")
s.Equal("myNode", value, "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.NodeSelector)
})
}
}
func (s *deploymentTemplateTest) TestTolerations() {
tolerationString := `[
{
"key": "key1",
"operator": "Equal",
"value": "value1",
"effect": "NoSchedule"
}
]`
var tolerations []corev1.Toleration
err := json.Unmarshal([]byte(tolerationString), &tolerations)
s.NoError(err)
testCases := []struct {
name string
values map[string]string
expected func(tol []corev1.Toleration)
}{
{
"defaultValues",
nil,
func(tol []corev1.Toleration) {
s.Nil(tol, "should be nil")
},
},
{
"Setting tolerations",
map[string]string{
"tolerations": tolerationString,
},
func(tol []corev1.Toleration) {
s.Len(tol, 1, "should have only one toleration")
s.True(reflect.DeepEqual(tol[0], tolerations[0]), "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetJsonValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Tolerations)
})
}
}
func (s *deploymentTemplateTest) TestAffinity() {
affinityString := `{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "disktype",
"operator": "In",
"values": [
"ssd"
]
}
]
}
]
}
}
}`
var affinity corev1.Affinity
err := json.Unmarshal([]byte(affinityString), &affinity)
s.NoError(err)
testCases := []struct {
name string
values map[string]string
expected func(aff *corev1.Affinity)
}{
{
"defaultValues",
nil,
func(aff *corev1.Affinity) {
s.Nil(aff, "should be nil")
},
},
{
"Setting affinity",
map[string]string{
"affinity": affinityString,
},
func(aff *corev1.Affinity) {
s.True(reflect.DeepEqual(affinity, *aff), "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetJsonValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Affinity)
})
}
}
func (s *deploymentTemplateTest) TestLiveness() {
livenessProbeString := `{
"httpGet": {
"path": "/healthz",
"port": 8081
},
"initialDelaySeconds": 45,
"timeoutSeconds": 5,
"periodSeconds": 15
}`
var liveness corev1.Probe
err := json.Unmarshal([]byte(livenessProbeString), &liveness)
s.NoError(err)
testCases := []struct {
name string
values map[string]string
expected func(probe *corev1.Probe)
}{
{
"defaultValues",
nil,
func(probe *corev1.Probe) {
s.True(reflect.DeepEqual(*probe, liveness), "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetJsonValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Containers[0].LivenessProbe)
})
}
}
func (s *deploymentTemplateTest) TestReadiness() {
readinessProbeString := `{
"httpGet": {
"path": "/readyz",
"port": 8081
},
"initialDelaySeconds": 30,
"timeoutSeconds": 5,
"periodSeconds": 15
}`
var readiness corev1.Probe
err := json.Unmarshal([]byte(readinessProbeString), &readiness)
s.NoError(err)
testCases := []struct {
name string
values map[string]string
expected func(probe *corev1.Probe)
}{
{
"defaultValues",
nil,
func(probe *corev1.Probe) {
s.True(reflect.DeepEqual(*probe, readiness), "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetJsonValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Containers[0].ReadinessProbe)
})
}
}
func (s *deploymentTemplateTest) TestContainerPorts() {
newPorts := `{
"enabled": true,
"type": "ClusterIP",
"ports": {
"metrics": {
"port": 8080,
"targetPort": "metrics",
"protocol": "TCP"
},
"health-probe": {
"port": 8081,
"targetPort": "health-probe",
"protocol": "TCP"
},
"broker-grpc": {
"port": 45000,
"targetPort": "broker-grpc",
"protocol": "TCP"
},
"myNewPort": {
"port": 1111,
"targetPort": "myNewPort",
"protocol": "UDP"
}
}
}`
testCases := []struct {
name string
values map[string]string
expected func(p []corev1.ContainerPort)
}{
{
"defaultValues",
nil,
func(p []corev1.ContainerPort) {
portsJSON := `[
{
"name": "broker-grpc",
"containerPort": 45000,
"protocol": "TCP"
},
{
"name": "health-probe",
"containerPort": 8081,
"protocol": "TCP"
},
{
"name": "metrics",
"containerPort": 8080,
"protocol": "TCP"
}
]`
var ports []corev1.ContainerPort
err := json.Unmarshal([]byte(portsJSON), &ports)
s.NoError(err)
s.True(reflect.DeepEqual(ports, p), "should be equal")
},
},
{
"addNewPort",
map[string]string{
"service": newPorts,
},
func(p []corev1.ContainerPort) {
portsJSON := `[
{
"name": "broker-grpc",
"containerPort": 45000,
"protocol": "TCP"
},
{
"name": "health-probe",
"containerPort": 8081,
"protocol": "TCP"
},
{
"name": "metrics",
"containerPort": 8080,
"protocol": "TCP"
},
{
"name": "myNewPort",
"containerPort": 1111,
"protocol": "UDP"
}
]`
var ports []corev1.ContainerPort
err := json.Unmarshal([]byte(portsJSON), &ports)
s.NoError(err)
s.True(reflect.DeepEqual(ports, p), "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetJsonValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
testCase.expected(deployment.Spec.Template.Spec.Containers[0].Ports)
})
}
}
func (s *deploymentTemplateTest) TestReplicaCount() {
testCases := []struct {
name string
values map[string]string
expected int32
}{
{
"defaultValues",
nil,
1,
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(subT, output, &deployment)
s.Equal(testCase.expected, (*deployment.Spec.Replicas), "should be equal")
})
}
}

View File

@ -1,144 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
)
type grafanaDashboardsTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestGrafanaDashboardsTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
suite.Run(t, &grafanaDashboardsTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "k8s-metacollector-test",
namespace: "metacollector-test",
templates: []string{"templates/collector-dashboard-grafana.yaml"},
})
}
func (g *grafanaDashboardsTemplateTest) TestCreationDefaultValues() {
// Render the dashboard configmap and check that it has not been rendered.
_, err := helm.RenderTemplateE(g.T(), &helm.Options{}, g.chartPath, g.releaseName, g.templates, fmt.Sprintf("--namespace=%s", g.namespace))
g.Error(err, "should error")
g.Equal("error while running command: exit status 1; Error: could not find template templates/collector-dashboard-grafana.yaml in chart", err.Error())
}
func (g *grafanaDashboardsTemplateTest) TestConfig() {
testCases := []struct {
name string
values map[string]string
expected func(cm *corev1.ConfigMap)
}{
{"dashboard enabled",
map[string]string{
"grafana.dashboards.enabled": "true",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("k8s-metacollector-grafana-dashboard", cm.Name)
// Check the namespace.
g.Equal(g.namespace, cm.Namespace)
g.Nil(cm.Annotations)
},
},
{"namespace",
map[string]string{
"grafana.dashboards.enabled": "true",
"grafana.dashboards.configMaps.collector.namespace": "custom-namespace",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("k8s-metacollector-grafana-dashboard", cm.Name)
// Check the namespace.
g.Equal("custom-namespace", cm.Namespace)
g.Nil(cm.Annotations)
},
},
{"folder",
map[string]string{
"grafana.dashboards.enabled": "true",
"grafana.dashboards.configMaps.collector.folder": "custom-folder",
},
func(cm *corev1.ConfigMap) {
// Check that the name is the expected one.
g.Equal("k8s-metacollector-grafana-dashboard", cm.Name)
g.NotNil(cm.Annotations)
g.Len(cm.Annotations, 2)
// Check sidecar annotation.
val, ok := cm.Annotations["k8s-sidecar-target-directory"]
g.True(ok)
g.Equal("/tmp/dashboards/custom-folder", val)
// Check grafana annotation.
val, ok = cm.Annotations["grafana_dashboard_folder"]
g.True(ok)
g.Equal("custom-folder", val)
},
},
}
for _, testCase := range testCases {
testCase := testCase
g.Run(testCase.name, func() {
subT := g.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the configmap unmarshal it.
output, err := helm.RenderTemplateE(subT, options, g.chartPath, g.releaseName, g.templates, "--namespace="+g.namespace)
g.NoError(err, "should succeed")
var cfgMap corev1.ConfigMap
helm.UnmarshalK8SYaml(subT, output, &cfgMap)
// Common checks
// Check that contains the right label.
g.Contains(cfgMap.Labels, "grafana_dashboard")
// Check that the dashboard is contained in the config map.
file, err := os.Open("../../dashboards/k8s-metacollector-dashboard.json")
g.NoError(err)
content, err := io.ReadAll(file)
g.NoError(err)
cfgData, ok := cfgMap.Data["dashboard.json"]
g.True(ok)
g.Equal(strings.TrimRight(string(content), "\n"), cfgData)
testCase.expected(&cfgMap)
})
}
}

View File

@ -1,172 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"path/filepath"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
)
// Type used to implement the testing suite for service account
// and the related resources: clusterrole, clusterrolebinding
type serviceAccountTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceAccountTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
suite.Run(t, &serviceAccountTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "k8s-metacollector-test",
namespace: "metacollector-test",
templates: []string{"templates/serviceaccount.yaml"},
})
}
func (s *serviceAccountTemplateTest) TestSVCAccountResourceCreation() {
testCases := []struct {
name string
values map[string]string
}{
{"defaultValues",
nil,
},
{"changeName",
map[string]string{
"serviceAccount.name": "TestName",
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the service account and unmarshal it.
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var svcAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(subT, output, &svcAccount)
// Render the clusterRole and unmarshal it.
output, err = helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, []string{"templates/clusterrole.yaml"})
s.NoError(err, "should succeed")
var clusterRole rbacv1.ClusterRole
helm.UnmarshalK8SYaml(subT, output, &clusterRole)
output, err = helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, []string{"templates/clusterrolebinding.yaml"})
s.NoError(err, "should succeed")
var clusterRoleBinding rbacv1.ClusterRoleBinding
helm.UnmarshalK8SYaml(subT, output, &clusterRoleBinding)
// Check that clusterRoleBinding references the right svc account.
s.Equal(svcAccount.Name, clusterRoleBinding.Subjects[0].Name, "should be the same")
s.Equal(svcAccount.Namespace, clusterRoleBinding.Subjects[0].Namespace, "should be the same")
// Check that clusterRobeBinding references the right clusterRole.
s.Equal(clusterRole.Name, clusterRoleBinding.RoleRef.Name)
if testCase.values != nil {
s.Equal("TestName", svcAccount.Name)
}
})
}
}
func (s *serviceAccountTemplateTest) TestSVCAccountResourceNonCreation() {
options := &helm.Options{SetValues: map[string]string{"serviceAccount.create": "false"}}
// Render the service account and unmarshal it.
_, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/serviceaccount.yaml in chart", err.Error())
// Render the clusterRole and unmarshal it.
_, err = helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, []string{"templates/clusterrole.yaml"})
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/clusterrole.yaml in chart", err.Error())
_, err = helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, []string{"templates/clusterrolebinding.yaml"})
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/clusterrolebinding.yaml in chart", err.Error())
}
func (s *serviceAccountTemplateTest) TestSVCAccountAnnotations() {
testCases := []struct {
name string
values map[string]string
expected map[string]string
}{
{
"defaultValues",
nil,
nil,
},
{
"settingSvcAccountAnnotations",
map[string]string{
"serviceAccount.annotations.my-annotation": "annotationValue",
},
map[string]string{
"my-annotation": "annotationValue",
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the service account and unmarshal it.
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var svcAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(subT, output, &svcAccount)
if testCase.expected == nil {
s.Nil(svcAccount.Annotations, "should be nil")
} else {
for key, val := range testCase.expected {
val1 := svcAccount.Annotations[key]
s.Equal(val, val1, "should contain all the added annotations")
}
}
})
}
}

View File

@ -1,93 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"encoding/json"
"path/filepath"
"reflect"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
type serviceMonitorTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceMonitorTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
suite.Run(t, &serviceMonitorTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "k8s-metacollector-test",
namespace: "metacollector-test",
templates: []string{"templates/servicemonitor.yaml"},
})
}
func (s *serviceMonitorTemplateTest) TestCreationDefaultValues() {
// Render the servicemonitor and check that it has not been rendered.
_, err := helm.RenderTemplateE(s.T(), &helm.Options{}, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/servicemonitor.yaml in chart", err.Error())
}
func (s *serviceMonitorTemplateTest) TestEndpoint() {
defaultEndpointsJSON := `[
{
"port": "metrics",
"interval": "15s",
"scrapeTimeout": "10s",
"honorLabels": true,
"path": "/metrics",
"scheme": "http"
}
]`
var defaultEndpoints []monitoringv1.Endpoint
err := json.Unmarshal([]byte(defaultEndpointsJSON), &defaultEndpoints)
s.NoError(err)
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Len(svcMonitor.Spec.Endpoints, 1, "should have only one endpoint")
s.True(reflect.DeepEqual(svcMonitor.Spec.Endpoints[0], defaultEndpoints[0]))
}
func (s *serviceMonitorTemplateTest) TestNamespaceSelector() {
options := &helm.Options{SetValues: map[string]string{"serviceMonitor.create": "true"}}
output := helm.RenderTemplate(s.T(), options, s.chartPath, s.releaseName, s.templates)
var svcMonitor monitoringv1.ServiceMonitor
helm.UnmarshalK8SYaml(s.T(), output, &svcMonitor)
s.Len(svcMonitor.Spec.NamespaceSelector.MatchNames, 1)
s.Equal("default", svcMonitor.Spec.NamespaceSelector.MatchNames[0])
}

View File

@ -1,220 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2024 The Falco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unit
import (
"encoding/json"
"path/filepath"
"reflect"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
)
type serviceTemplateTest struct {
suite.Suite
chartPath string
releaseName string
namespace string
templates []string
}
func TestServiceTemplate(t *testing.T) {
t.Parallel()
chartFullPath, err := filepath.Abs(chartPath)
require.NoError(t, err)
suite.Run(t, &serviceTemplateTest{
Suite: suite.Suite{},
chartPath: chartFullPath,
releaseName: "test",
namespace: "metacollector-test",
templates: []string{"templates/service.yaml"},
})
}
func (s *serviceTemplateTest) TestServiceCreateFalse() {
options := &helm.Options{SetValues: map[string]string{"service.create": "false"}}
// Render the service account and unmarshal it.
_, err := helm.RenderTemplateE(s.T(), options, s.chartPath, s.releaseName, s.templates)
s.Error(err, "should error")
s.Equal("error while running command: exit status 1; Error: could not find template templates/service.yaml in chart", err.Error())
}
func (s *serviceTemplateTest) TestServiceType() {
testCases := []struct {
name string
values map[string]string
expected string
}{
{"defaultValues",
nil,
"ClusterIP",
},
{"NodePort",
map[string]string{
"service.type": "NodePort",
},
"NodePort",
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetValues: testCase.values}
// Render the service and unmarshal it.
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var svc corev1.Service
helm.UnmarshalK8SYaml(subT, output, &svc)
s.Equal(testCase.expected, string(svc.Spec.Type))
})
}
}
func (s *serviceTemplateTest) TestServicePorts() {
newPorts := `{
"enabled": true,
"type": "ClusterIP",
"ports": {
"metrics": {
"port": 8080,
"targetPort": "metrics",
"protocol": "TCP"
},
"health-probe": {
"port": 8081,
"targetPort": "health-probe",
"protocol": "TCP"
},
"broker-grpc": {
"port": 45000,
"targetPort": "broker-grpc",
"protocol": "TCP"
},
"myNewPort": {
"port": 1111,
"targetPort": "myNewPort",
"protocol": "UDP"
}
}
}`
testCases := []struct {
name string
values map[string]string
expected func(p []corev1.ServicePort)
}{
{
"defaultValues",
nil,
func(p []corev1.ServicePort) {
portsJSON := `[
{
"name": "broker-grpc",
"port": 45000,
"protocol": "TCP",
"targetPort": "broker-grpc"
},
{
"name": "health-probe",
"port": 8081,
"protocol": "TCP",
"targetPort": "health-probe"
},
{
"name": "metrics",
"port": 8080,
"protocol": "TCP",
"targetPort": "metrics"
}
]`
var ports []corev1.ServicePort
err := json.Unmarshal([]byte(portsJSON), &ports)
s.NoError(err)
s.True(reflect.DeepEqual(ports, p), "should be equal")
},
},
{
"addNewPort",
map[string]string{
"service": newPorts,
},
func(p []corev1.ServicePort) {
portsJSON := `[
{
"name": "broker-grpc",
"port": 45000,
"protocol": "TCP",
"targetPort": "broker-grpc"
},
{
"name": "health-probe",
"port": 8081,
"protocol": "TCP",
"targetPort": "health-probe"
},
{
"name": "metrics",
"port": 8080,
"protocol": "TCP",
"targetPort": "metrics"
},
{
"name": "myNewPort",
"port": 1111,
"protocol": "UDP",
"targetPort": "myNewPort"
}
]`
var ports []corev1.ServicePort
err := json.Unmarshal([]byte(portsJSON), &ports)
s.NoError(err)
s.True(reflect.DeepEqual(ports, p), "should be equal")
},
},
}
for _, testCase := range testCases {
testCase := testCase
s.Run(testCase.name, func() {
subT := s.T()
subT.Parallel()
options := &helm.Options{SetJsonValues: testCase.values}
output, err := helm.RenderTemplateE(subT, options, s.chartPath, s.releaseName, s.templates)
s.NoError(err, "should succeed")
var svc corev1.Service
helm.UnmarshalK8SYaml(subT, output, &svc)
testCase.expected(svc.Spec.Ports)
})
}
}

View File

@ -1,204 +0,0 @@
# Default values for k8s-metacollector.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- replicaCount is the number of identical copies of the k8s-metacollector.
replicaCount: 1
# -- image is the configuration for the k8s-metacollector image.
image:
# -- pullPolicy is the policy used to determine when a node should attempt to pull the container image.
pullPolicy: IfNotPresent
# -- pullSecects a list of secrets containing credentials used when pulling from private/secure registries.
pullSecrets: []
# -- registry is the image registry to pull from.
registry: docker.io
# -- repository is the image repository to pull from
repository: falcosecurity/k8s-metacollector
# -- tag is image tag to pull. Overrides the image tag whose default is the chart appVersion.
tag: ""
# -- nameOverride is the new name used to override the release name used for k8s-metacollector components.
nameOverride: ""
# -- fullNameOverride same as nameOverride but for the full name.
fullnameOverride: ""
# -- namespaceOverride overrides the deployment namespace. It's useful for multi-namespace deployments in combined charts.
namespaceOverride: ""
# -- serviceAccount is the configuration for the service account.
serviceAccount:
# -- create specifies whether a service account should be created.
create: true
# -- annotations to add to the service account.
annotations: {}
# -- name is name of the service account to use.
# -- If not set and create is true, a name is generated using the full name template.
name: ""
# -- podAnnotations are custom annotations to be added to the pod.
podAnnotations: {}
# -- podLabels are labels to be added to the pod.
podLabels: {}
# -- podSecurityContext holds the security settings for the pod.
# -- These settings are override by the ones specified for the container when there is overlap.
podSecurityContext:
# -- runAsNonRoot when set to true enforces that the specified container runs as a non-root user.
runAsNonRoot: true
# -- runAsUser specifies the user ID (UID) that the containers inside the pod should run as.
runAsUser: 1000
# -- runAsGroup specifies the group ID (GID) that the containers inside the pod should run as.
runAsGroup: 1000
# -- fsGroup specifies the group ID (GID) that should be used for the volume mounted within a Pod.
fsGroup: 1000
# -- containerSecurityContext holds the security settings for the container.
containerSecurityContext:
# -- capabilities fine-grained privileges that can be assigned to processes.
capabilities:
# -- drop drops the given set of privileges.
drop:
- ALL
# -- service exposes the k8s-metacollector services to be accessed from within the cluster.
# ref: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# -- enabled specifies whether a service should be created.
create: true
# -- type denotes the service type. Setting it to "ClusterIP" we ensure that are accessible
# from within the cluster.
type: ClusterIP
# -- ports denotes all the ports on which the Service will listen.
ports:
# -- metrics denotes a listening service named "metrics".
metrics:
# -- port is the port on which the Service will listen.
port: 8080
# -- targetPort is the port on which the Pod is listening.
targetPort: "metrics"
# -- protocol specifies the network protocol that the Service should use for the associated port.
protocol: "TCP"
# -- health-probe denotes a listening service named "health-probe"
health-probe:
# -- port is the port on which the Service will listen.
port: 8081
# -- targetPort is the port on which the Pod is listening.
targetPort: "health-probe"
# -- protocol specifies the network protocol that the Service should use for the associated port.
protocol: "TCP"
# -- broker-grpc denotes a listening service named "grpc-broker"
broker-grpc:
# -- port is the port on which the Service will listen.
port: 45000
# -- targetPort is the port on which the Pod is listening.
targetPort: "broker-grpc"
# -- protocol specifies the network protocol that the Service should use for the associated port.
protocol: "TCP"
# -- serviceMonitor holds the configuration for the ServiceMonitor CRD.
# A ServiceMonitor is a custom resource definition (CRD) used to configure how Prometheus should
# discover and scrape metrics from the k8s-metacollector service.
serviceMonitor:
# -- create specifies whether a ServiceMonitor CRD should be created for a prometheus operator.
# https://github.com/coreos/prometheus-operator
# Enable it only if the ServiceMonitor CRD is installed in your cluster.
create: false
# -- path at which the metrics are expose by the k8s-metacollector.
path: /metrics
# -- labels set of labels to be applied to the ServiceMonitor resource.
# If your Prometheus deployment is configured to use serviceMonitorSelector, then add the right
# label here in order for the ServiceMonitor to be selected for target discovery.
labels: {}
# -- interval specifies the time interval at which Prometheus should scrape metrics from the service.
interval: 15s
# -- scheme specifies network protocol used by the metrics endpoint. In this case HTTP.
scheme: http
# -- tlsConfig specifies TLS (Transport Layer Security) configuration for secure communication when
# scraping metrics from a service. It allows you to define the details of the TLS connection, such as
# CA certificate, client certificate, and client key. Currently, the k8s-metacollector does not support
# TLS configuration for the metrics endpoint.
tlsConfig: {}
# insecureSkipVerify: false
# caFile: /path/to/ca.crt
# certFile: /path/to/client.crt
# keyFile: /path/to/client.key
# -- scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request.
# If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for
# that target.
scrapeTimeout: 10s
# -- relabelings configures the relabeling rules to apply the targets metadata labels.
relabelings: []
# -- targetLabels defines the labels which are transferred from the associated Kubernetes service object onto the ingested metrics.
targetLabels: []
# -- resources defines the computing resources (CPU and memory) that are allocated to the containers running within the Pod.
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- nodeSelector specifies a set of key-value pairs that must match labels assigned to nodes
# for the Pod to be eligible for scheduling on that node.
nodeSelector: {}
# -- tolerations are applied to pods and allow them to be scheduled on nodes with matching taints.
tolerations: []
# -- affinity allows pod placement based on node characteristics, or any other custom labels assigned to nodes.
affinity: {}
# -- healthChecks contains the configuration for liveness and readiness probes.
healthChecks:
# -- livenessProbe is a diagnostic mechanism used to determine wether a container within a Pod is still running and healthy.
livenessProbe:
# -- httpGet specifies that the liveness probe will make an HTTP GET request to check the health of the container.
httpGet:
# -- path is the specific endpoint on which the HTTP GET request will be made.
path: /healthz
# -- port is the port on which the container exposes the "/healthz" endpoint.
port: 8081
# -- initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe.
initialDelaySeconds: 45
# -- timeoutSeconds is the number of seconds after which the probe times out.
timeoutSeconds: 5
# -- periodSeconds specifies the interval at which the liveness probe will be repeated.
periodSeconds: 15
# -- readinessProbe is a mechanism used to determine whether a container within a Pod is ready to serve traffic.
readinessProbe:
# -- httpGet specifies that the readiness probe will make an HTTP GET request to check whether the container is ready.
httpGet:
# -- path is the specific endpoint on which the HTTP GET request will be made.
path: /readyz
# -- port is the port on which the container exposes the "/readyz" endpoint.
port: 8081
# -- initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe.
initialDelaySeconds: 30
# -- timeoutSeconds is the number of seconds after which the probe times out.
timeoutSeconds: 5
# -- periodSeconds specifies the interval at which the readiness probe will be repeated.
periodSeconds: 15
# -- grafana contains the configuration related to grafana.
grafana:
# -- dashboards contains configuration for grafana dashboards.
dashboards:
# -- enabled specifies whether the dashboards should be deployed.
enabled: false
# --configmaps to be deployed that contain a grafana dashboard.
configMaps:
# -- collector contains the configuration for collector's dashboard.
collector:
# -- name specifies the name for the configmap.
name: k8s-metacollector-grafana-dashboard
# -- namespace specifies the namespace for the configmap.
namespace: ""
# -- folder where the dashboard is stored by grafana.
folder: ""

View File

@ -1,3 +0,0 @@
# Enable automatic generation of release notes using GitHubs release notes generator.
# see: https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
generate-release-notes: true

View File

@ -4,28 +4,6 @@
This file documents all notable changes to `event-generator` Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## v0.3.4
* Pass `--all` flag to event-generator binary to allow disabled rules to run, e.g. the k8saudit ruleset.
## v0.3.3
* Update README.md.
## v0.3.2
* no change to the chart itself. Updated README.md and makefile.
## v0.3.1
* noop change just to test the ci
## v0.3.0
## Major Changes
* Support configuration of revisionHistoryLimit of the deployment
## v0.2.0
## Major Changes

Some files were not shown because too many files have changed in this diff Show More