Compare commits

...

22 Commits

Author SHA1 Message Date
dependabot[bot] 39bf675eb9
Merge 791aa1defd into 7ad10b8063 2025-08-08 06:29:19 +00:00
dependabot[bot] 7ad10b8063 chore(deps): Bump lycheeverse/lychee-action from 2.4.0 to 2.5.0
Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 2.4.0 to 2.5.0.
- [Release notes](https://github.com/lycheeverse/lychee-action/releases)
- [Commits](1d97d84f0b...5c4ee84814)

---
updated-dependencies:
- dependency-name: lycheeverse/lychee-action
  dependency-version: 2.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-08 08:29:15 +02:00
Federico Di Pierro cc96a4dde6 fix(charts/falco/tests): fixed Falco chart tests.
Signed-off-by: Federico Di Pierro <nierro92@gmail.com>
2025-08-04 22:36:52 +02:00
Federico Di Pierro 9717814edb update(charts/falco): updated CHANGELOG.
Signed-off-by: Federico Di Pierro <nierro92@gmail.com>
2025-08-04 22:36:52 +02:00
Federico Di Pierro 6305d9bf7d chore(charts/falco): bump chart version + variables.
Signed-off-by: Federico Di Pierro <nierro92@gmail.com>
2025-08-04 22:36:52 +02:00
Federico Di Pierro 0b9b5a01d4 update(charts/falco): bump container and k8smeta plugin to latest.
Signed-off-by: Federico Di Pierro <nierro92@gmail.com>
2025-08-04 22:36:52 +02:00
Leonardo Grasso 01ed738a2c docs(charts/falco): update docs for v6.2.1
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 15:15:40 +02:00
Leonardo Grasso 11be245149 update(charts/falco): bump version to 6.2.1
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 15:15:40 +02:00
Leonardo Grasso 65ba4c266e update(charts/falco): bump container plugin to v0.3.3
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 15:15:40 +02:00
Leonardo Grasso 530eded713 docs(charts/falco): update docs for v6.2.0
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 12:17:40 +02:00
Leonardo Grasso 9e1550ab44 update(charts/falco): bump charts to v6.2.0
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 12:17:40 +02:00
Leonardo Grasso 3a7cb6edba update(charts/falco): bump container plugin to v0.3.2
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 12:17:40 +02:00
Leonardo Grasso 2646171e4c chore(charts/falco): adapt volume mounts for new containerEngine
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 12:17:40 +02:00
Leonardo Grasso 9f5ead4705 update(charts/falco): update containerEngines configuration
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-24 12:17:40 +02:00
Benjamin FERNANDEZ 3cbf72bd9c feat(falco): Add possibility to custom falco pods hostname
Signed-off-by: Benjamin FERNANDEZ <benjamin2.fernandez.ext@orange.com>
2025-07-24 09:56:38 +02:00
Leonardo Grasso ff984cc8a8 update: remove falco-exporter
Signed-off-by: Leonardo Grasso <me@leonardograsso.com>
2025-07-22 15:06:29 +02:00
Leonardo Di Giovanna cd4dc68cb1 docs(OWNERS): add `ekoops` as approver
Signed-off-by: Leonardo Di Giovanna <41296180+ekoops@users.noreply.github.com>
2025-07-18 10:36:10 +02:00
Leonardo Di Giovanna 56f2eb7ccf update(charts/falco): update `README.md` for 6.0.2
Signed-off-by: Leonardo Di Giovanna <leonardodigiovanna1@gmail.com>
2025-07-01 14:36:21 +02:00
Leonardo Di Giovanna 489e4d67b6 update(charts/falco): update `CHANGELOG.md` for 6.0.2
Signed-off-by: Leonardo Di Giovanna <leonardodigiovanna1@gmail.com>
2025-07-01 14:36:21 +02:00
Leonardo Di Giovanna b821e9db06 update(falco): bump container plugin to 0.3.1
Signed-off-by: Leonardo Di Giovanna <leonardodigiovanna1@gmail.com>
2025-07-01 14:36:21 +02:00
Leonardo Di Giovanna 4ba195cc61 update(falco): upgrade chart for Falco 0.41.3
Signed-off-by: Leonardo Di Giovanna <leonardodigiovanna1@gmail.com>
2025-07-01 14:36:21 +02:00
dependabot[bot] 791aa1defd
chore(deps): Bump actions/setup-go from 5.4.0 to 5.5.0
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.4.0 to 5.5.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](0aaccfd150...d35c59abb0)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: 5.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-08 23:23:52 +00:00
34 changed files with 318 additions and 2236 deletions

View File

@ -17,7 +17,7 @@ jobs:
fetch-depth: 0
- name: Link Checker
uses: lycheeverse/lychee-action@1d97d84f0bc547f7b25f4c2170d87d810dc2fb2c #v2.4.0
uses: lycheeverse/lychee-action@5c4ee84814c983aa7164eaee476f014e53ff3963 #v2.5.0
with:
args: --no-progress './**/*.yml' './**/*.yaml' './**/*.md' './**/*.gotmpl' './**/*.tpl' './**/OWNERS' './**/LICENSE'
token: ${{ secrets.GITHUB_TOKE }}

View File

@ -62,7 +62,7 @@ jobs:
run: helm dependency update ./charts/falco
- name: Setup Go
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: "1.21"
check-latest: true

1
OWNERS
View File

@ -3,6 +3,7 @@ approvers:
- Issif
- cpanato
- alacuku
- ekoops
reviewers:
- bencer
emeritus_approvers:

View File

@ -1,243 +0,0 @@
# Change Log
This file documents all notable changes to `falco-exporter` Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## v0.12.2
* add deprecation notice
## v0.12.1
* fix bug in 'for' for falco exporter prometheus rules
## v0.12.0
* make 'for' configurable for falco exporter prometheus rules
## v0.11.0
* updated grafana dashboard
## v0.10.1
* Enhanced the service Monitor to support additional Properties.
## v0.10.0
* added ability to set the grafana folder annotation name
## v0.9.11
* fix dead links in README.md
## v0.9.10
* update configuration values in README.md
* introduce helm docs for the chart
## v0.9.9
* update tolerations
## v0.9.8
* add annotation for set of folder's grafana-chart
## v0.9.7
* noop change just to test the ci
## v0.9.6
### Minor Changes
* Bump falco-exporter to v0.8.3
## v0.9.5
### Minor Changes
* Removed unnecessary capabilities from security context
* Setted filesystem on read-only
## v0.9.4
### Minor Changes
* Add options to configure readiness/liveness probe values
## v0.9.3
### Minor Changes
* Bump falco-exporter to v0.8.2
## v0.9.2
### Minor Changes
* Add option to place Grafana dashboard in a folder
## v0.9.1
### Minor Changes
* Fix PSP allowed host path prefix to match grpc socket path change.
## v0.8.3
### Major Changes
* Changing the grpc socket path from `unix:///var/run/falco/falco.soc` to `unix:///run/falco/falco.sock`.
### Minor Changes
* Bump falco-exporter to v0.8.0
## v0.8.2
### Minor Changes
* Support configuration of updateStrategy of the Daemonset
## v0.8.0
* Upgrade falco-exporter version to v0.7.0 (see the [falco-exporter changelog](https://github.com/falcosecurity/falco-exporter/releases/tag/v0.7.0))
### Major Changes
* Add option to add labels to the Daemonset pods
## v0.7.2
### Minor Changes
* Add option to add labels to the Daemonset pods
## v0.7.1
### Minor Changes
* Fix `FalcoExporterAbsent` expression
## v0.7.0
### Major Changes
* Adds ability to create custom PrometheusRules for alerting
## v0.6.2
## Minor Changes
* Add Check availability of 'monitoring.coreos.com/v1' api version
## v0.6.1
### Minor Changes
* Add option the add annotations to the Daemonset
## v0.6.0
### Minor Changes
* Upgrade falco-exporter version to v0.6.0 (see the [falco-exporter changelog](https://github.com/falcosecurity/falco-exporter/releases/tag/v0.6.0))
## v0.5.2
### Minor changes
* Make image registry configurable
## v0.5.1
* Display only non-zero rates in Grafana dashboard template
## v0.5.0
### Minor Changes
* Upgrade falco-exporter version to v0.5.0
* Add metrics about Falco drops
* Make `unix://` prefix optional
## v0.4.2
### Minor Changes
* Fix Prometheus datasource name reference in grafana dashboard template
## v0.4.1
### Minor Changes
* Support release namespace configuration
## v0.4.0
### Mayor Changes
* Add Mutual TLS for falco-exporter enable/disabled feature
## v0.3.8
### Minor Changes
* Replace extensions apiGroup/apiVersion because of deprecation
## v0.3.7
### Minor Changes
* Fixed falco-exporter PSP by allowing secret volumes
## v0.3.6
### Minor Changes
* Add SecurityContextConstraint to allow deploying in Openshift
## v0.3.5
### Minor Changes
* Added the possibility to automatically add a PSP (in combination with a Role and a RoleBindung) via the podSecurityPolicy values
* Namespaced the falco-exporter ServiceAccount and Service
## v0.3.4
### Minor Changes
* Add priorityClassName to values
## v0.3.3
### Minor Changes
* Add grafana dashboard to helm chart
## v0.3.2
### Minor Changes
* Fix for additional labels for falco-exporter servicemonitor
## v0.3.1
### Minor Changes
* Added the support to deploy a Prometheus Service Monitor. Is disables by default.
## v0.3.0
### Major Changes
* Chart moved to [falcosecurity/charts](https://github.com/falcosecurity/charts) repository
* gRPC over unix socket support (by default)
* Updated falco-exporter version to `0.3.0`
### Minor Changes
* README.md and CHANGELOG.md added

View File

@ -1,37 +0,0 @@
apiVersion: v2
name: falco-exporter
deprecated: true
description: DEPRECATED Prometheus Metrics Exporter for Falco output events
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.12.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 0.8.7
keywords:
- monitoring
- security
- alerting
- metric
- troubleshooting
- run-time
sources:
- https://github.com/falcosecurity/falco-exporter
maintainers:
- name: leogr
email: me@leonardograsso.com

View File

@ -1,79 +0,0 @@
# falco-exporter Helm Chart
[![Falco Ecosystem Repository](https://github.com/falcosecurity/evolution/blob/main/repos/badges/falco-ecosystem-blue.svg)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#ecosystem-scope) [![Deprecated](https://img.shields.io/badge/status-deprecated-inactive?style=for-the-badge)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#deprecated)
**NOTICE**: [falco-exporter](https://github.com/falcosecurity/falco-exporter) project is currently being **deprecated**. Contributions are not accepted, and the repository will be fully archived in the future. Starting from Falco version 0.38, Falco can expose Prometheus metrics directly, eliminating the need for a separate exporter. For further details, please refer to the [official documentation](https://falco.org/docs/metrics/).
---
Before using this chart, you need [Falco installed](https://falco.org/docs/installation/) and running with the [gRPC Output](https://falco.org/docs/grpc/) enabled (over Unix socket by default).
This chart is compatible with the [Falco Chart](https://github.com/falcosecurity/charts/tree/master/charts/falco) version `v1.2.0` or greater. Instructions to enable the gRPC Output in the Falco Helm Chart can be found [here](https://github.com/falcosecurity/charts/tree/master/charts/falco#enabling-grpc). We also strongly recommend using [gRPC over Unix socket](https://github.com/falcosecurity/charts/tree/master/charts/falco#grpc-over-unix-socket-default).
## Introduction
The chart deploys **falco-exporter** as Daemon Set on your the Kubernetes cluster. If a [Prometheus installation](https://github.com/helm/charts/tree/master/stable/prometheus) is running within your cluster, metrics provided by **falco-exporter** will be automatically discovered.
## Adding `falcosecurity` repository
Prior to installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with the release name `falco-exporter` run:
```bash
helm install falco-exporter falcosecurity/falco-exporter
```
After a few seconds, **falco-exporter** should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
## Uninstalling the Chart
To uninstall the `falco-exporter` deployment:
```bash
helm uninstall falco-exporter
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
```bash
helm install falco-exporter --set falco.grpcTimeout=3m falcosecurity/falco-exporter
```
Alternatively, a YAML file that specifies the parameters' values can be provided while installing the chart. For example,
```bash
helm install falco-exporter -f values.yaml falcosecurity/falco-exporter
```
### Enable Mutual TLS
Mutual TLS for `/metrics` endpoint can be enabled to prevent alerts content from being consumed by unauthorized components.
To install falco-exporter with Mutual TLS enabled, you have to:
```shell
helm install falco-exporter \
--set service.mTLS.enabled=true \
--set-file service.mTLS.server.key=/path/to/server.key \
--set-file service.mTLS.server.crt=/path/to/server.crt \
--set-file service.mTLS.ca.crt=/path/to/ca.crt \
falcosecurity/falco-exporter
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration
The following table lists the main configurable parameters of the {{ template "chart.name" . }} chart v{{ template "chart.version" . }} and their default values. Please, refer to [values.yaml](./values.yaml) for the full list of configurable parameters.
{{ template "chart.valuesSection" . }}

View File

@ -1,164 +0,0 @@
# falco-exporter Helm Chart
[![Falco Ecosystem Repository](https://github.com/falcosecurity/evolution/blob/main/repos/badges/falco-ecosystem-blue.svg)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#ecosystem-scope) [![Deprecated](https://img.shields.io/badge/status-deprecated-inactive?style=for-the-badge)](https://github.com/falcosecurity/evolution/blob/main/REPOSITORIES.md#deprecated)
**NOTICE**: [falco-exporter](https://github.com/falcosecurity/falco-exporter) project is currently being **deprecated**. Contributions are not accepted, and the repository will be fully archived in the future. Starting from Falco version 0.38, Falco can expose Prometheus metrics directly, eliminating the need for a separate exporter. For further details, please refer to the [official documentation](https://falco.org/docs/metrics/).
---
Before using this chart, you need [Falco installed](https://falco.org/docs/installation/) and running with the [gRPC Output](https://falco.org/docs/grpc/) enabled (over Unix socket by default).
This chart is compatible with the [Falco Chart](https://github.com/falcosecurity/charts/tree/master/charts/falco) version `v1.2.0` or greater. Instructions to enable the gRPC Output in the Falco Helm Chart can be found [here](https://github.com/falcosecurity/charts/tree/master/charts/falco#enabling-grpc). We also strongly recommend using [gRPC over Unix socket](https://github.com/falcosecurity/charts/tree/master/charts/falco#grpc-over-unix-socket-default).
## Introduction
The chart deploys **falco-exporter** as Daemon Set on your the Kubernetes cluster. If a [Prometheus installation](https://github.com/helm/charts/tree/master/stable/prometheus) is running within your cluster, metrics provided by **falco-exporter** will be automatically discovered.
## Adding `falcosecurity` repository
Prior to installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with the release name `falco-exporter` run:
```bash
helm install falco-exporter falcosecurity/falco-exporter
```
After a few seconds, **falco-exporter** should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
## Uninstalling the Chart
To uninstall the `falco-exporter` deployment:
```bash
helm uninstall falco-exporter
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
```bash
helm install falco-exporter --set falco.grpcTimeout=3m falcosecurity/falco-exporter
```
Alternatively, a YAML file that specifies the parameters' values can be provided while installing the chart. For example,
```bash
helm install falco-exporter -f values.yaml falcosecurity/falco-exporter
```
### Enable Mutual TLS
Mutual TLS for `/metrics` endpoint can be enabled to prevent alerts content from being consumed by unauthorized components.
To install falco-exporter with Mutual TLS enabled, you have to:
```shell
helm install falco-exporter \
--set service.mTLS.enabled=true \
--set-file service.mTLS.server.key=/path/to/server.key \
--set-file service.mTLS.server.crt=/path/to/server.crt \
--set-file service.mTLS.ca.crt=/path/to/ca.crt \
falcosecurity/falco-exporter
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration
The following table lists the main configurable parameters of the falco-exporter chart v0.12.2 and their default values. Please, refer to [values.yaml](./values.yaml) for the full list of configurable parameters.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | affinity allows pod placement based on node characteristics, or any other custom labels assigned to nodes. |
| daemonset | object | `{"annotations":{},"podLabels":{},"updateStrategy":{"type":"RollingUpdate"}}` | daemonset holds the configuration for the daemonset. |
| daemonset.annotations | object | `{}` | annotations to add to the DaemonSet pods. |
| daemonset.podLabels | object | `{}` | podLabels labels to add to the pods. |
| falco | object | `{"grpcTimeout":"2m","grpcUnixSocketPath":"unix:///run/falco/falco.sock"}` | falco the configuration to connect falco. |
| falco.grpcTimeout | string | `"2m"` | grpcTimeout timout value for grpc connection. |
| falco.grpcUnixSocketPath | string | `"unix:///run/falco/falco.sock"` | grpcUnixSocketPath path to the falco's grpc unix socket. |
| fullnameOverride | string | `""` | fullNameOverride same as nameOverride but for the full name. |
| grafanaDashboard | object | `{"enabled":false,"folder":"","folderAnnotation":"grafana_dashboard_folder","namespace":"default","prometheusDatasourceName":"Prometheus"}` | grafanaDashboard contains the configuration related to grafana dashboards. |
| grafanaDashboard.enabled | bool | `false` | enabled specifies whether the dashboard should be deployed. |
| grafanaDashboard.folder | string | `""` | folder creates and set folderAnnotation to specify where the dashboard is stored in grafana. |
| grafanaDashboard.folderAnnotation | string | `"grafana_dashboard_folder"` | folderAnnotation sets the annotation's name used by folderAnnotation in grafana's helm-chart. |
| grafanaDashboard.namespace | string | `"default"` | namespace specifies the namespace for the configmap. |
| grafanaDashboard.prometheusDatasourceName | string | `"Prometheus"` | prometheusDatasourceName name of the data source. |
| healthChecks | object | `{"livenessProbe":{"initialDelaySeconds":60,"periodSeconds":15,"probesPort":19376,"timeoutSeconds":5},"readinessProbe":{"initialDelaySeconds":30,"periodSeconds":15,"probesPort":19376,"timeoutSeconds":5}}` | healthChecks contains the configuration for liveness and readiness probes. |
| healthChecks.livenessProbe | object | `{"initialDelaySeconds":60,"periodSeconds":15,"probesPort":19376,"timeoutSeconds":5}` | livenessProbe is a diagnostic mechanism used to determine weather a container within a Pod is still running and healthy. |
| healthChecks.livenessProbe.initialDelaySeconds | int | `60` | initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe. |
| healthChecks.livenessProbe.periodSeconds | int | `15` | periodSeconds specifies the interval at which the liveness probe will be repeated. |
| healthChecks.livenessProbe.probesPort | int | `19376` | probesPort is liveness probes port. |
| healthChecks.livenessProbe.timeoutSeconds | int | `5` | timeoutSeconds number of seconds after which the probe times out. |
| healthChecks.readinessProbe | object | `{"initialDelaySeconds":30,"periodSeconds":15,"probesPort":19376,"timeoutSeconds":5}` | readinessProbe is a mechanism used to determine whether a container within a Pod is ready to serve traffic. |
| healthChecks.readinessProbe.initialDelaySeconds | int | `30` | initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe. |
| healthChecks.readinessProbe.periodSeconds | int | `15` | periodSeconds specifies the interval at which the readiness probe will be repeated. |
| healthChecks.readinessProbe.timeoutSeconds | int | `5` | timeoutSeconds is the number of seconds after which the probe times out. |
| image | object | `{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"falcosecurity/falco-exporter","tag":"0.8.3"}` | image is the configuration for the exporter image. |
| image.pullPolicy | string | `"IfNotPresent"` | pullPolicy is the policy used to determine when a node should attempt to pull the container image. |
| image.registry | string | `"docker.io"` | registry is the image registry to pull from. |
| image.repository | string | `"falcosecurity/falco-exporter"` | repository is the image repository to pull from. |
| image.tag | string | `"0.8.3"` | tag is image tag to pull. |
| imagePullSecrets | list | `[]` | pullSecrets a list of secrets containing credentials used when pulling from private/secure registries. |
| nameOverride | string | `""` | nameOverride is the new name used to override the release name used for exporter's components. |
| nodeSelector | object | `{}` | nodeSelector specifies a set of key-value pairs that must match labels assigned to nodes for the Pod to be eligible for scheduling on that node |
| podSecurityContext | object | `{}` | podSecurityPolicy holds the security policy settings for the pod. |
| podSecurityPolicy | object | `{"annotations":{},"create":false,"name":""}` | podSecurityPolicy holds the security policy settings for the pod. |
| podSecurityPolicy.annotations | object | `{}` | annotations to add to the PSP, Role and RoleBinding |
| podSecurityPolicy.create | bool | `false` | create specifies whether a PSP, Role and RoleBinding should be created |
| podSecurityPolicy.name | string | `""` | name of the PSP, Role and RoleBinding to use. If not set and create is true, a name is generated using the fullname template |
| priorityClassName | string | `""` | priorityClassName specifies the name of the PriorityClass for the pods. |
| prometheusRules.alerts.additionalAlerts | object | `{}` | |
| prometheusRules.alerts.alert.enabled | bool | `true` | |
| prometheusRules.alerts.alert.for | string | `"5m"` | |
| prometheusRules.alerts.alert.rate_interval | string | `"5m"` | |
| prometheusRules.alerts.alert.threshold | int | `0` | |
| prometheusRules.alerts.critical.enabled | bool | `true` | |
| prometheusRules.alerts.critical.for | string | `"15m"` | |
| prometheusRules.alerts.critical.rate_interval | string | `"5m"` | |
| prometheusRules.alerts.critical.threshold | int | `0` | |
| prometheusRules.alerts.emergency.enabled | bool | `true` | |
| prometheusRules.alerts.emergency.for | string | `"1m"` | |
| prometheusRules.alerts.emergency.rate_interval | string | `"1m"` | |
| prometheusRules.alerts.emergency.threshold | int | `0` | |
| prometheusRules.alerts.error.enabled | bool | `true` | |
| prometheusRules.alerts.error.for | string | `"15m"` | |
| prometheusRules.alerts.error.rate_interval | string | `"5m"` | |
| prometheusRules.alerts.error.threshold | int | `0` | |
| prometheusRules.alerts.warning.enabled | bool | `true` | |
| prometheusRules.alerts.warning.for | string | `"15m"` | |
| prometheusRules.alerts.warning.rate_interval | string | `"5m"` | |
| prometheusRules.alerts.warning.threshold | int | `0` | |
| prometheusRules.enabled | bool | `false` | enabled specifies whether the prometheus rules should be deployed. |
| resources | object | `{}` | resources defines the computing resources (CPU and memory) that are allocated to the containers running within the Pod. |
| scc.create | bool | `true` | |
| securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"seccompProfile":{"type":"RuntimeDefault"}}` | securityContext holds the security context for the daemonset. |
| securityContext.capabilities | object | `{"drop":["ALL"]}` | capabilities to be assigned to the daemonset. |
| service | object | `{"annotations":{"prometheus.io/port":"9376","prometheus.io/scrape":"true"},"clusterIP":"None","labels":{},"mTLS":{"enabled":false},"port":9376,"targetPort":9376,"type":"ClusterIP"}` | service exposes the exporter service to be accessed from within the cluster. |
| service.annotations | object | `{"prometheus.io/port":"9376","prometheus.io/scrape":"true"}` | annotations set of annotations to be applied to the service. |
| service.clusterIP | string | `"None"` | clusterIP set to none. It's headless service. |
| service.labels | object | `{}` | labels set of labels to be applied to the service. |
| service.mTLS | object | `{"enabled":false}` | mTLS mutual TLS for HTTP metrics server. |
| service.mTLS.enabled | bool | `false` | enabled specifies whether the mTLS should be enabled. |
| service.port | int | `9376` | port is the port on which the Service will listen. |
| service.targetPort | int | `9376` | targetPort is the port on which the Pod is listening. |
| service.type | string | `"ClusterIP"` | type denotes the service type. Setting it to "ClusterIP" we ensure that are accessible from within the cluster. |
| serviceAccount | object | `{"annotations":{},"create":true,"name":""}` | serviceAccount is the configuration for the service account. |
| serviceAccount.name | string | `""` | name is the name of the service account to use. If not set and create is true, a name is generated using the fullname template. If set and create is false, an already existing serviceAccount must be provided. |
| serviceMonitor | object | `{"additionalLabels":{},"additionalProperties":{},"enabled":false,"interval":"","scrapeTimeout":""}` | serviceMonitor holds the configuration for the ServiceMonitor CRD. A ServiceMonitor is a custom resource definition (CRD) used to configure how Prometheus should discover and scrape metrics from the exporter service. |
| serviceMonitor.additionalLabels | object | `{}` | additionalLabels specifies labels to be added on the Service Monitor. |
| serviceMonitor.additionalProperties | object | `{}` | aditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc. |
| serviceMonitor.enabled | bool | `false` | enable the deployment of a Service Monitor for the Prometheus Operator. |
| serviceMonitor.interval | string | `""` | interval specifies the time interval at which Prometheus should scrape metrics from the service. |
| serviceMonitor.scrapeTimeout | string | `""` | scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request. If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for that target. |
| tolerations | list | `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"}]` | tolerations are applied to pods and allow them to be scheduled on nodes with matching taints. |

View File

@ -1,22 +0,0 @@
The falco-exporter project is currently being DEPRECATED.
Contributions are not accepted, and the repository will be fully archived in the future.
Starting from Falco version 0.38, Falco can expose Prometheus metrics directly,
eliminating the need for a separate exporter.
For further details, please refer to the https://falco.org/docs/metrics/.
Get the falco-exporter metrics URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falco-exporter.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo {{- if .Values.service.mTLS.enabled }} https{{- else }} http{{- end }}://$NODE_IP:$NODE_PORT/metrics
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "falco-exporter.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "falco-exporter.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo {{- if .Values.service.mTLS.enabled }} https{{- else }} http{{- end }}://$SERVICE_IP:{{ .Values.service.port }}/metrics
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "falco-exporter.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit {{- if .Values.service.mTLS.enabled }} https{{- else }} http{{- end }}://127.0.0.1:{{ .Values.service.targetPort }}/metrics to use your application"
kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.service.targetPort }}
{{- end }}
echo {{- if .Values.service.mTLS.enabled }} "You'll need a valid client certificate and its corresponding key for Mutual TLS handshake" {{- end }}

View File

@ -1,98 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "falco-exporter.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "falco-exporter.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falco-exporter.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "falco-exporter.labels" -}}
{{ include "falco-exporter.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- if not .Values.skipHelm }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{- if not .Values.skipHelm }}
helm.sh/chart: {{ include "falco-exporter.chart" . }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "falco-exporter.selectorLabels" -}}
app.kubernetes.io/name: {{ include "falco-exporter.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "falco-exporter.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "falco-exporter.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the PSP to use
*/}}
{{- define "falco-exporter.podSecurityPolicyName" -}}
{{- if .Values.podSecurityPolicy.create -}}
{{ default (include "falco-exporter.fullname" .) .Values.podSecurityPolicy.name }}
{{- else -}}
{{ default "default" .Values.podSecurityPolicy.name }}
{{- end -}}
{{- end -}}
{{/*
Extract the unixSocket's directory path
*/}}
{{- define "falco-exporter.unixSocketDir" -}}
{{- if .Values.falco.grpcUnixSocketPath -}}
{{- .Values.falco.grpcUnixSocketPath | trimPrefix "unix://" | dir -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "rbac.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@ -1,132 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "falco-exporter.fullname" . }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
{{- include "falco-exporter.selectorLabels" . | nindent 6 }}
updateStrategy:
{{ toYaml .Values.daemonset.updateStrategy | indent 4 }}
template:
metadata:
labels:
{{- include "falco-exporter.selectorLabels" . | nindent 8 }}
{{- if .Values.daemonset.podLabels }}
{{ toYaml .Values.daemonset.podLabels | nindent 8 }}
{{- end }}
{{- if .Values.daemonset.annotations }}
annotations:
{{ toYaml .Values.daemonset.annotations | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
serviceAccountName: {{ include "falco-exporter.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /usr/bin/falco-exporter
{{- if .Values.falco.grpcUnixSocketPath }}
- --client-socket={{ .Values.falco.grpcUnixSocketPath }}
{{- else }}
- --client-hostname={{ .Values.falco.grpcHostname }}
- --client-port={{ .Values.falco.grpcPort }}
{{- end }}
- --timeout={{ .Values.falco.grpcTimeout }}
- --listen-address=0.0.0.0:{{ .Values.service.port }}
{{- if .Values.service.mTLS.enabled }}
- --server-ca=/etc/falco/server-certs/ca.crt
- --server-cert=/etc/falco/server-certs/server.crt
- --server-key=/etc/falco/server-certs/server.key
{{- end }}
ports:
- name: metrics
containerPort: {{ .Values.service.targetPort }}
protocol: TCP
livenessProbe:
initialDelaySeconds: {{ .Values.healthChecks.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.healthChecks.livenessProbe.timeoutSeconds }}
periodSeconds: {{ .Values.healthChecks.livenessProbe.periodSeconds }}
httpGet:
path: /liveness
port: {{ .Values.healthChecks.livenessProbe.probesPort }}
readinessProbe:
initialDelaySeconds: {{ .Values.healthChecks.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.healthChecks.readinessProbe.timeoutSeconds }}
periodSeconds: {{ .Values.healthChecks.readinessProbe.periodSeconds }}
httpGet:
path: /readiness
port: {{ .Values.healthChecks.readinessProbe.probesPort }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
{{- if .Values.falco.grpcUnixSocketPath }}
- mountPath: {{ include "falco-exporter.unixSocketDir" . }}
name: falco-socket-dir
readOnly: true
{{- else }}
- mountPath: /etc/falco/certs
name: certs-volume
readOnly: true
{{- end }}
{{- if .Values.service.mTLS.enabled }}
- mountPath: /etc/falco/server-certs
name: server-certs-volume
readOnly: true
{{- end }}
volumes:
{{- if .Values.falco.grpcUnixSocketPath }}
- name: falco-socket-dir
hostPath:
path: {{ include "falco-exporter.unixSocketDir" . }}
{{- else }}
- name: certs-volume
secret:
secretName: {{ include "falco-exporter.fullname" . }}-certs
items:
- key: client.key
path: client.key
- key: client.crt
path: client.crt
- key: ca.crt
path: ca.crt
{{- end }}
{{- if .Values.service.mTLS.enabled }}
- name: server-certs-volume
secret:
secretName: {{ include "falco-exporter.fullname" . }}-server-certs
items:
- key: server.key
path: server.key
- key: server.crt
path: server.crt
- key: ca.crt
path: ca.crt
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,648 +0,0 @@
{{- if .Values.grafanaDashboard.enabled }}
apiVersion: v1
data:
grafana-falco.json: |-
{
"__inputs": [
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.0.3"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "smooth",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 90,
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "asc"
}
},
"pluginVersion": "8.3.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "sum(rate(falco_events[$__rate_interval])) by (rule)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Events rate by rule",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "smooth",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 72,
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "asc"
}
},
"pluginVersion": "8.3.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "sum(rate(falco_events[$__rate_interval])) by (priority)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Events rate by priority",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "smooth",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 89,
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "asc"
}
},
"pluginVersion": "8.3.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "sum(rate(falco_events[$__rate_interval])) by (tags)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Events rate by tags",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "smooth",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 91,
"options": {
"legend": {
"calcs": [],
"displayMode": "table",
"placement": "right",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "asc"
}
},
"pluginVersion": "8.3.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
"expr": "sum(rate(falco_events[$__rate_interval])) by (pod, hostname)",
"hide": false,
"instant": false,
"legendFormat": "{{`{{ pod }} ({{hostname}})`}}",
"range": true,
"refId": "A"
}
],
"title": "Events rate by pod, hostname",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": "auto",
"cellOptions": {
"type": "color-text"
},
"filterable": true,
"inspect": false,
"minWidth": 50
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "text",
"value": null
},
{
"color": "#EAB839",
"value": 100
},
{
"color": "red",
"value": 1000
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 24,
"x": 0,
"y": 16
},
"id": 94,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"enablePagination": true,
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "Count"
}
]
},
"pluginVersion": "10.4.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"exemplar": false,
"expr": "falco_events",
"format": "table",
"instant": true,
"legendFormat": "__auto",
"range": false,
"refId": "A"
}
],
"title": "Events Total",
"transformations": [
{
"id": "organize",
"options": {
"excludeByName": {
"Time": true,
"__name__": true,
"container": true,
"endpoint": true,
"instance": true,
"job": true,
"k8s_ns_name": true,
"k8s_pod_name": true,
"service": true
},
"includeByName": {},
"indexByName": {},
"renameByName": {
"Value": "Count"
}
}
}
],
"type": "table"
}
],
"refresh": "30s",
"schemaVersion": 39,
"tags": [
"security",
"falco"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "Prometheus",
"value": "prometheus"
},
"hide": 0,
"includeAll": false,
"multi": false,
"name": "datasource",
"options": [],
"query": "prometheus",
"queryValue": "",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {
"isNone": true,
"selected": false,
"text": "None",
"value": ""
},
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"definition": "label_values(kube_node_info,cluster)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "cluster",
"options": [],
"query": {
"qryType": 1,
"query": "label_values(kube_node_info,cluster)",
"refId": "PrometheusVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Falco Events",
"uid": "FvUFlfuZz",
"version": 2,
"weekStart": ""
}
kind: ConfigMap
metadata:
labels:
grafana_dashboard: "1"
{{- if .Values.grafanaDashboard.folder }}
annotations:
k8s-sidecar-target-directory: /tmp/dashboards/{{ .Values.grafanaDashboard.folder }}
{{ .Values.grafanaDashboard.folderAnnotation }}: {{ .Values.grafanaDashboard.folder }}
{{- end }}
name: grafana-falco
{{- if .Values.grafanaDashboard.namespace }}
namespace: {{ .Values.grafanaDashboard.namespace }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end}}
{{- end -}}

View File

@ -1,28 +0,0 @@
{{- if and .Values.podSecurityPolicy.create (.Capabilities.APIVersions.Has "policy/v1beta1") }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- with .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
allowPrivilegeEscalation: false
allowedHostPaths:
- pathPrefix: "/run/falco"
readOnly: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'hostPath'
- 'secret'
{{- end -}}

View File

@ -1,81 +0,0 @@
{{- if and .Values.prometheusRules.enabled .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "falco-exporter.fullname" . }}
{{- if .Values.prometheusRules.namespace }}
namespace: {{ .Values.prometheusRules.namespace }}
{{- end }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- if .Values.prometheusRules.additionalLabels }}
{{- toYaml .Values.prometheusRules.additionalLabels | nindent 4 }}
{{- end }}
spec:
groups:
- name: falco-exporter
rules:
{{- if .Values.prometheusRules.enabled }}
- alert: FalcoExporterAbsent
expr: absent(up{job="{{- include "falco-exporter.fullname" . }}"})
for: 10m
annotations:
summary: Falco Exporter has dissapeared from Prometheus service discovery.
description: No metrics are being scraped from falco. No events will trigger any alerts.
labels:
severity: critical
{{- end }}
{{- if .Values.prometheusRules.alerts.warning.enabled }}
- alert: FalcoWarningEventsRateHigh
annotations:
summary: Falco is experiencing high rate of warning events
description: A high rate of warning events are being detected by Falco
expr: rate(falco_events{priority="4"}[{{ .Values.prometheusRules.alerts.warning.rate_interval }}]) > {{ .Values.prometheusRules.alerts.warning.threshold }}
for: {{ .Values.prometheusRules.alerts.warning.for }}
labels:
severity: warning
{{- end }}
{{- if .Values.prometheusRules.alerts.error.enabled }}
- alert: FalcoErrorEventsRateHigh
annotations:
summary: Falco is experiencing high rate of error events
description: A high rate of error events are being detected by Falco
expr: rate(falco_events{priority="3"}[{{ .Values.prometheusRules.alerts.error.rate_interval }}]) > {{ .Values.prometheusRules.alerts.error.threshold }}
for: {{ .Values.prometheusRules.alerts.error.for }}
labels:
severity: warning
{{- end }}
{{- if .Values.prometheusRules.alerts.critical.enabled }}
- alert: FalcoCriticalEventsRateHigh
annotations:
summary: Falco is experiencing high rate of critical events
description: A high rate of critical events are being detected by Falco
expr: rate(falco_events{priority="2"}[{{ .Values.prometheusRules.alerts.critical.rate_interval }}]) > {{ .Values.prometheusRules.alerts.critical.threshold }}
for: {{ .Values.prometheusRules.alerts.critical.for }}
labels:
severity: critical
{{- end }}
{{- if .Values.prometheusRules.alerts.alert.enabled }}
- alert: FalcoAlertEventsRateHigh
annotations:
summary: Falco is experiencing high rate of alert events
description: A high rate of alert events are being detected by Falco
expr: rate(falco_events{priority="1"}[{{ .Values.prometheusRules.alerts.alert.rate_interval }}]) > {{ .Values.prometheusRules.alerts.alert.threshold }}
for: {{ .Values.prometheusRules.alerts.alert.for }}
labels:
severity: critical
{{- end }}
{{- if .Values.prometheusRules.alerts.emergency.enabled }}
- alert: FalcoEmergencyEventsRateHigh
annotations:
summary: Falco is experiencing high rate of emergency events
description: A high rate of emergency events are being detected by Falco
expr: rate(falco_events{priority="0"}[{{ .Values.prometheusRules.alerts.emergency.rate_interval }}]) > {{ .Values.prometheusRules.alerts.emergency.threshold }}
for: {{ .Values.prometheusRules.alerts.emergency.for }}
labels:
severity: critical
{{- end }}
{{- with .Values.prometheusRules.additionalAlerts }}
{{ . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,22 +0,0 @@
{{- if .Values.podSecurityPolicy.create -}}
kind: Role
apiVersion: {{ template "rbac.apiVersion" . }}
metadata:
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- with .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ include "falco-exporter.podSecurityPolicyName" . }}
verbs:
- use
{{- end -}}

View File

@ -1,20 +0,0 @@
{{- if .Values.podSecurityPolicy.create -}}
kind: RoleBinding
apiVersion: {{ template "rbac.apiVersion" . }}
metadata:
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- with .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ include "falco-exporter.serviceAccountName" . }}
roleRef:
kind: Role
name: {{ include "falco-exporter.podSecurityPolicyName" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -1,24 +0,0 @@
{{- if .Values.certs }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco-exporter.fullname" . }}-certs
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
type: Opaque
data:
{{- if .Values.certs }}
{{- if and .Values.certs.ca .Values.certs.ca.crt }}
ca.crt: {{ .Values.certs.ca.crt | b64enc | quote }}
{{- end }}
{{- if .Values.certs.client }}
{{- if .Values.certs.client.key }}
client.key: {{ .Values.certs.client.key | b64enc | quote }}
{{- end }}
{{- if .Values.certs.client.crt }}
client.crt: {{ .Values.certs.client.crt | b64enc | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if and .Values.scc.create (.Capabilities.APIVersions.Has "security.openshift.io/v1") }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
annotations:
kubernetes.io/description: |
This provides the minimum requirements Falco-exporter to run in Openshift.
name: {{ template "falco-exporter.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
allowHostDirVolumePlugin: true
allowHostIPC: false
allowHostNetwork: false
allowHostPID: false
allowHostPorts: false
allowPrivilegeEscalation: false
allowPrivilegedContainer: false
allowedCapabilities: []
allowedUnsafeSysctls: []
defaultAddCapabilities: []
fsGroup:
type: RunAsAny
groups: []
priority: 0
readOnlyRootFilesystem: false
requiredDropCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
seccompProfiles:
- '*'
supplementalGroups:
type: RunAsAny
users:
- system:serviceaccount:{{ .Release.Namespace }}:{{ include "falco-exporter.serviceAccountName" . }}
volumes:
- hostPath
- secret
{{- end }}

View File

@ -1,14 +0,0 @@
{{- if .Values.service.mTLS.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falco-exporter.fullname" . }}-server-certs
namespace: {{ .Release.Namespace }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
type: Opaque
data:
server.crt: {{ .Values.service.mTLS.server.crt | b64enc | quote }}
server.key: {{ .Values.service.mTLS.server.key | b64enc | quote }}
ca.crt: {{ .Values.service.mTLS.ca.crt | b64enc | quote }}
{{- end }}

View File

@ -1,42 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "falco-exporter.fullname" . }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
namespace: {{ .Release.Namespace }}
spec:
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $cidr := .Values.service.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
{{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
targetPort: {{ .Values.service.targetPort }}
protocol: TCP
name: metrics
selector:
{{- include "falco-exporter.selectorLabels" . | nindent 4 }}

View File

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "falco-exporter.serviceAccountName" . }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -1,27 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "falco-exporter.fullname" . }}
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
{{- range $key, $value := .Values.serviceMonitor.additionalLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- port: metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- with .Values.serviceMonitor.additionalProperties }}
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "falco-exporter.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "falco-exporter.fullname" . }}-test-connection"
labels:
{{- include "falco-exporter.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "falco-exporter.fullname" . }}:{{ .Values.service.port }}/metrics']
restartPolicy: Never

View File

@ -1,222 +0,0 @@
# Default values for falco-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- service exposes the exporter service to be accessed from within the cluster.
service:
# -- type denotes the service type. Setting it to "ClusterIP" we ensure that are accessible
# from within the cluster.
type: ClusterIP
# -- clusterIP set to none. It's headless service.
clusterIP: None
# -- port is the port on which the Service will listen.
port: 9376
# -- targetPort is the port on which the Pod is listening.
targetPort: 9376
# -- labels set of labels to be applied to the service.
labels: {}
# -- annotations set of annotations to be applied to the service.
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9376"
# -- mTLS mutual TLS for HTTP metrics server.
mTLS:
# -- enabled specifies whether the mTLS should be enabled.
enabled: false
# -- healthChecks contains the configuration for liveness and readiness probes.
healthChecks:
# -- livenessProbe is a diagnostic mechanism used to determine weather a container within a Pod is still running and healthy.
livenessProbe:
# -- probesPort is liveness probes port.
probesPort: 19376
# -- initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe.
initialDelaySeconds: 60
# -- timeoutSeconds number of seconds after which the probe times out.
timeoutSeconds: 5
# -- periodSeconds specifies the interval at which the liveness probe will be repeated.
periodSeconds: 15
# -- readinessProbe is a mechanism used to determine whether a container within a Pod is ready to serve traffic.
readinessProbe:
# probesPort is readiness probes port
probesPort: 19376
# -- initialDelaySeconds tells the kubelet that it should wait X seconds before performing the first probe.
initialDelaySeconds: 30
# -- timeoutSeconds is the number of seconds after which the probe times out.
timeoutSeconds: 5
# -- periodSeconds specifies the interval at which the readiness probe will be repeated.
periodSeconds: 15
# -- image is the configuration for the exporter image.
image:
# -- registry is the image registry to pull from.
registry: docker.io
# -- repository is the image repository to pull from.
repository: falcosecurity/falco-exporter
# -- tag is image tag to pull.
tag: "0.8.3"
# -- pullPolicy is the policy used to determine when a node should attempt to pull the container image.
pullPolicy: IfNotPresent
# -- pullSecrets a list of secrets containing credentials used when pulling from private/secure registries.
imagePullSecrets: []
# -- nameOverride is the new name used to override the release name used for exporter's components.
nameOverride: ""
# -- fullNameOverride same as nameOverride but for the full name.
fullnameOverride: ""
# -- priorityClassName specifies the name of the PriorityClass for the pods.
priorityClassName: ""
# -- falco the configuration to connect falco.
falco:
# -- grpcUnixSocketPath path to the falco's grpc unix socket.
grpcUnixSocketPath: "unix:///run/falco/falco.sock"
# -- grpcTimeout timout value for grpc connection.
grpcTimeout: 2m
# -- serviceAccount is the configuration for the service account.
serviceAccount:
# create specifies whether a service account should be created.
create: true
# annotations to add to the service account
annotations: {}
# -- name is the name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
# If set and create is false, an already existing serviceAccount must be provided.
name: ""
# -- podSecurityPolicy holds the security policy settings for the pod.
podSecurityPolicy:
# -- create specifies whether a PSP, Role and RoleBinding should be created
create: false
# -- annotations to add to the PSP, Role and RoleBinding
annotations: {}
# -- name of the PSP, Role and RoleBinding to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- podSecurityPolicy holds the security policy settings for the pod.
podSecurityContext:
{}
# fsGroup: 2000
# -- daemonset holds the configuration for the daemonset.
daemonset:
# updateStrategy perform rolling updates by default in the DaemonSet agent
# ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
updateStrategy:
# type of the strategy. Can also customize maxUnavailable or minReadySeconds based on your needs.
type: RollingUpdate
# -- annotations to add to the DaemonSet pods.
annotations: {}
# -- podLabels labels to add to the pods.
podLabels: {}
# -- securityContext holds the security context for the daemonset.
securityContext:
# -- capabilities to be assigned to the daemonset.
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
privileged: false
seccompProfile:
type: RuntimeDefault
# -- resources defines the computing resources (CPU and memory) that are allocated to the containers running within the Pod.
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- nodeSelector specifies a set of key-value pairs that must match labels assigned to nodes
# for the Pod to be eligible for scheduling on that node
nodeSelector: {}
# -- tolerations are applied to pods and allow them to be scheduled on nodes with matching taints.
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
# -- affinity allows pod placement based on node characteristics, or any other custom labels assigned to nodes.
affinity: {}
# -- serviceMonitor holds the configuration for the ServiceMonitor CRD.
# A ServiceMonitor is a custom resource definition (CRD) used to configure how Prometheus should
# discover and scrape metrics from the exporter service.
serviceMonitor:
# -- enable the deployment of a Service Monitor for the Prometheus Operator.
enabled: false
# -- additionalLabels specifies labels to be added on the Service Monitor.
additionalLabels: {}
# -- interval specifies the time interval at which Prometheus should scrape metrics from the service.
interval: ""
# -- scrapeTimeout determines the maximum time Prometheus should wait for a target to respond to a scrape request.
# If the target does not respond within the specified timeout, Prometheus considers the scrape as failed for
# that target.
scrapeTimeout: ""
# -- aditionalProperties allows setting additional properties on the endpoint such as relabelings, metricRelabelings etc.
additionalProperties: {}
# -- grafanaDashboard contains the configuration related to grafana dashboards.
grafanaDashboard:
# -- enabled specifies whether the dashboard should be deployed.
enabled: false
# -- folder creates and set folderAnnotation to specify where the dashboard is stored in grafana.
folder: ""
# -- folderAnnotation sets the annotation's name used by folderAnnotation in grafana's helm-chart.
folderAnnotation: "grafana_dashboard_folder"
# -- namespace specifies the namespace for the configmap.
namespace: default
# -- prometheusDatasourceName name of the data source.
prometheusDatasourceName: Prometheus
scc:
# true here enabled creation of Security Context Constraints in Openshift
create: true
# prometheusRules holds the configuration for alerting on priority events.
prometheusRules:
# -- enabled specifies whether the prometheus rules should be deployed.
enabled: false
alerts:
warning:
enabled: true
rate_interval: "5m"
threshold: 0
for: "15m"
error:
enabled: true
rate_interval: "5m"
threshold: 0
for: "15m"
critical:
enabled: true
rate_interval: "5m"
threshold: 0
for: "15m"
alert:
enabled: true
rate_interval: "5m"
threshold: 0
for: "5m"
emergency:
enabled: true
rate_interval: "1m"
threshold: 0
for: "1m"
additionalAlerts: {}

View File

@ -3,6 +3,33 @@
This file documents all notable changes to Falco Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## v6.2.2
* Bump container plugin to 0.3.5
* Bump k8smeta plugin to 0.3.1
## v6.2.1
* Bump container plugin to 0.3.3
## v6.2.0
* Switch to `collectors.containerEngine` configuration by default
* Update `collectors.containerEngine.engines` default values
* Fix containerd socket path configuration
* Address "container.name shows container.id" issue
* Address "Missing k8s.pod name, container.name, other metadata with k3s" issue
* Bump container plugin to 0.3.2
## v6.1.0
* feat(falco): Add possibility to custom falco pods hostname
## v6.0.2
* Bump Falco to 0.41.3
* Bump container plugin to 0.3.1
## v6.0.1
* Bump Falco to 0.41.2

View File

@ -1,7 +1,7 @@
apiVersion: v2
name: falco
version: 6.0.1
appVersion: "0.41.2"
version: 6.2.2
appVersion: "0.41.3"
description: Falco
keywords:
- monitoring

View File

@ -583,7 +583,7 @@ If you use a Proxy in your cluster, the requests between `Falco` and `Falcosidek
## Configuration
The following table lists the main configurable parameters of the falco chart v6.0.1 and their default values. See [values.yaml](./values.yaml) for full list.
The following table lists the main configurable parameters of the falco chart v6.2.2 and their default values. See [values.yaml](./values.yaml) for full list.
## Values
@ -597,27 +597,28 @@ The following table lists the main configurable parameters of the falco chart v6
| certs.existingSecret | string | `""` | Existing secret containing the following key, crt and ca as well as the bundle pem. |
| certs.server.crt | string | `""` | Certificate used by gRPC and webserver. |
| certs.server.key | string | `""` | Key used by gRPC and webserver. |
| collectors.containerEngine | object | `{"enabled":false,"engines":{"bpm":{"enabled":true},"containerd":{"enabled":true,"sockets":["/run/containerd/containerd.sock"]},"cri":{"enabled":true,"sockets":["/run/crio/crio.sock"]},"docker":{"enabled":true,"sockets":["/var/run/docker.sock"]},"libvirt_lxc":{"enabled":true},"lxc":{"enabled":true},"podman":{"enabled":true,"sockets":["/run/podman/podman.sock"]}},"hooks":["create"],"labelMaxLen":100,"pluginRef":"ghcr.io/falcosecurity/plugins/plugin/container:0.3.0","withSize":false}` | This collector is the new container engine collector that replaces the old docker, containerd, crio and podman collectors. It is designed to collect metadata from various container engines and provide a unified interface through the container plugin. When enabled, it will deploy the container plugin and use it to collect metadata from the container engines. Keep in mind that the old collectors (docker, containerd, crio, podman) will use the container plugin to collect metadata under the hood. |
| collectors.containerEngine.enabled | bool | `false` | Enable Container Engine support. |
| collectors.containerEngine | object | `{"enabled":true,"engines":{"bpm":{"enabled":true},"containerd":{"enabled":true,"sockets":["/run/host-containerd/containerd.sock"]},"cri":{"enabled":true,"sockets":["/run/containerd/containerd.sock","/run/crio/crio.sock","/run/k3s/containerd/containerd.sock","/run/host-containerd/containerd.sock"]},"docker":{"enabled":true,"sockets":["/var/run/docker.sock"]},"libvirt_lxc":{"enabled":true},"lxc":{"enabled":true},"podman":{"enabled":true,"sockets":["/run/podman/podman.sock"]}},"hooks":["create"],"labelMaxLen":100,"pluginRef":"ghcr.io/falcosecurity/plugins/plugin/container:0.3.5","withSize":false}` | This collector is the new container engine collector that replaces the old docker, containerd, crio and podman collectors. It is designed to collect metadata from various container engines and provide a unified interface through the container plugin. When enabled, it will deploy the container plugin and use it to collect metadata from the container engines. Keep in mind that the old collectors (docker, containerd, crio, podman) will use the container plugin to collect metadata under the hood. |
| collectors.containerEngine.enabled | bool | `true` | Enable Container Engine support. |
| collectors.containerEngine.engines | object | `{"bpm":{"enabled":true},"containerd":{"enabled":true,"sockets":["/run/host-containerd/containerd.sock"]},"cri":{"enabled":true,"sockets":["/run/containerd/containerd.sock","/run/crio/crio.sock","/run/k3s/containerd/containerd.sock","/run/host-containerd/containerd.sock"]},"docker":{"enabled":true,"sockets":["/var/run/docker.sock"]},"libvirt_lxc":{"enabled":true},"lxc":{"enabled":true},"podman":{"enabled":true,"sockets":["/run/podman/podman.sock"]}}` | engines specify the container engines that will be used to collect metadata. See https://github.com/falcosecurity/plugins/blob/main/plugins/container/README.md#configuration |
| collectors.containerEngine.hooks | list | `["create"]` | hooks specify the hooks that will be used to collect metadata from the container engine. The available hooks are: create, start. |
| collectors.containerEngine.labelMaxLen | int | `100` | labelMaxLen is the maximum length of the labels that can be used in the container plugin. container labels larger than this value won't be collected. |
| collectors.containerEngine.pluginRef | string | `"ghcr.io/falcosecurity/plugins/plugin/container:0.3.0"` | pluginRef is the OCI reference for the container plugin. It could be a full reference such as "ghcr.io/falcosecurity/plugins/plugin/container:0.3.0". Or just name + tag: container:0.3.0. |
| collectors.containerEngine.pluginRef | string | `"ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"` | pluginRef is the OCI reference for the container plugin. It could be a full reference such as "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5". Or just name + tag: container:0.3.5. |
| collectors.containerEngine.withSize | bool | `false` | withSize specifies whether to enable container size inspection, which is inherently slow. |
| collectors.containerd | object | `{"enabled":true,"socket":"/run/containerd/containerd.sock"}` | This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead. |
| collectors.containerd.enabled | bool | `true` | Enable ContainerD support. |
| collectors.containerd.socket | string | `"/run/containerd/containerd.sock"` | The path of the ContainerD socket. |
| collectors.crio | object | `{"enabled":true,"socket":"/run/crio/crio.sock"}` | This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead. |
| collectors.crio.enabled | bool | `true` | Enable CRI-O support. |
| collectors.containerd | object | `{"enabled":false,"socket":"/run/host-containerd/containerd.sock"}` | This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead. |
| collectors.containerd.enabled | bool | `false` | Enable ContainerD support. |
| collectors.containerd.socket | string | `"/run/host-containerd/containerd.sock"` | The path of the ContainerD socket. |
| collectors.crio | object | `{"enabled":false,"socket":"/run/crio/crio.sock"}` | This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead. |
| collectors.crio.enabled | bool | `false` | Enable CRI-O support. |
| collectors.crio.socket | string | `"/run/crio/crio.sock"` | The path of the CRI-O socket. |
| collectors.docker | object | `{"enabled":true,"socket":"/var/run/docker.sock"}` | This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead. |
| collectors.docker.enabled | bool | `true` | Enable Docker support. |
| collectors.docker | object | `{"enabled":false,"socket":"/var/run/docker.sock"}` | This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead. |
| collectors.docker.enabled | bool | `false` | Enable Docker support. |
| collectors.docker.socket | string | `"/var/run/docker.sock"` | The path of the Docker daemon socket. |
| collectors.enabled | bool | `true` | Enable/disable all the metadata collectors. |
| collectors.kubernetes | object | `{"collectorHostname":"","collectorPort":"","enabled":false,"hostProc":"/host","pluginRef":"ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.0","verbosity":"info"}` | kubernetes holds the configuration for the kubernetes collector. Starting from version 0.37.0 of Falco, the legacy kubernetes client has been removed. A new standalone component named k8s-metacollector and a Falco plugin have been developed to solve the issues that were present in the old implementation. More info here: https://github.com/falcosecurity/falco/issues/2973 |
| collectors.kubernetes | object | `{"collectorHostname":"","collectorPort":"","enabled":false,"hostProc":"/host","pluginRef":"ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1","verbosity":"info"}` | kubernetes holds the configuration for the kubernetes collector. Starting from version 0.37.0 of Falco, the legacy kubernetes client has been removed. A new standalone component named k8s-metacollector and a Falco plugin have been developed to solve the issues that were present in the old implementation. More info here: https://github.com/falcosecurity/falco/issues/2973 |
| collectors.kubernetes.collectorHostname | string | `""` | collectorHostname is the address of the k8s-metacollector. When not specified it will be set to match k8s-metacollector service. e.x: falco-k8smetacollecto.falco.svc. If for any reason you need to override it, make sure to set here the address of the k8s-metacollector. It is used by the k8smeta plugin to connect to the k8s-metacollector. |
| collectors.kubernetes.collectorPort | string | `""` | collectorPort designates the port on which the k8s-metacollector gRPC service listens. If not specified the value of the port named `broker-grpc` in k8s-metacollector.service.ports is used. The default values is 45000. It is used by the k8smeta plugin to connect to the k8s-metacollector. |
| collectors.kubernetes.enabled | bool | `false` | enabled specifies whether the Kubernetes metadata should be collected using the k8smeta plugin and the k8s-metacollector component. It will deploy the k8s-metacollector external component that fetches Kubernetes metadata and pushes them to Falco instances. For more info see: https://github.com/falcosecurity/k8s-metacollector https://github.com/falcosecurity/charts/tree/master/charts/k8s-metacollector When this option is disabled, Falco falls back to the container annotations to grab the metadata. In such a case, only the ID, name, namespace, labels of the pod will be available. |
| collectors.kubernetes.pluginRef | string | `"ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.0"` | pluginRef is the OCI reference for the k8smeta plugin. It could be a full reference such as: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0". Or just name + tag: k8smeta:0.1.0. |
| collectors.kubernetes.pluginRef | string | `"ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"` | pluginRef is the OCI reference for the k8smeta plugin. It could be a full reference such as: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0". Or just name + tag: k8smeta:0.1.0. |
| containerSecurityContext | object | `{}` | Set securityContext for the Falco container.For more info see the "falco.securityContext" helper in "pod-template.tpl" |
| controller.annotations | object | `{}` | |
| controller.daemonset.updateStrategy.type | string | `"RollingUpdate"` | Perform rolling updates by default in the DaemonSet agent ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ |
@ -798,6 +799,7 @@ The following table lists the main configurable parameters of the falco chart v6
| namespaceOverride | string | `""` | Override the deployment namespace |
| nodeSelector | object | `{}` | Selectors used to deploy Falco on a given node/nodes. |
| podAnnotations | object | `{}` | Add additional pod annotations |
| podHostname | string | `nil` | Override hostname in falco pod |
| podLabels | object | `{}` | Add additional pod labels |
| podPriorityClassName | string | `nil` | Set pod priorityClassName |
| podSecurityContext | object | `{}` | Set securityContext for the pods These security settings are overriden by the ones specified for the specific containers when there is overlap. |

View File

@ -440,7 +440,7 @@ This helper is used to add the container plugin to the falco configuration.
{{ define "falco.containerPlugin" -}}
{{ if and .Values.driver.enabled .Values.collectors.enabled -}}
{{ if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{ else if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled .Values.collectors.containerEngine.enabled -}}
{{ if or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled -}}
{{ $_ := set .Values.collectors.containerEngine.engines.docker "enabled" .Values.collectors.docker.enabled -}}
@ -482,7 +482,7 @@ This helper is used to add container plugin volumes to the falco pod.
{{- define "falco.containerPluginVolumes" -}}
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{- end -}}
{{ $volumes := list -}}
{{- if .Values.collectors.docker.enabled -}}
@ -495,10 +495,19 @@ This helper is used to add container plugin volumes to the falco pod.
{{ $volumes = append $volumes (dict "name" "containerd-socket" "hostPath" (dict "path" .Values.collectors.containerd.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerEngine.enabled -}}
{{- range $key, $val := .Values.collectors.containerEngine.engines -}}
{{- if and $val.enabled -}}
{{- $seenPaths := dict -}}
{{- $idx := 0 -}}
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
{{- range $engineName := $engineOrder -}}
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
{{- if and $val $val.enabled -}}
{{- range $index, $socket := $val.sockets -}}
{{ $volumes = append $volumes (dict "name" (printf "%s-socket-%d" $key $index) "hostPath" (dict "path" $socket)) -}}
{{- $mountPath := print "/host" $socket -}}
{{- if not (hasKey $seenPaths $mountPath) -}}
{{ $volumes = append $volumes (dict "name" (printf "container-engine-socket-%d" $idx) "hostPath" (dict "path" $socket)) -}}
{{- $idx = add $idx 1 -}}
{{- $_ := set $seenPaths $mountPath true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
@ -515,7 +524,7 @@ This helper is used to add container plugin volumeMounts to the falco pod.
{{- define "falco.containerPluginVolumeMounts" -}}
{{- if and .Values.driver.enabled .Values.collectors.enabled -}}
{{- if and (or .Values.collectors.docker.enabled .Values.collectors.crio.enabled .Values.collectors.containerd.enabled) .Values.collectors.containerEngine.enabled -}}
{{ fail "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{ fail "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated." }}
{{- end -}}
{{ $volumeMounts := list -}}
{{- if .Values.collectors.docker.enabled -}}
@ -528,16 +537,25 @@ This helper is used to add container plugin volumeMounts to the falco pod.
{{ $volumeMounts = append $volumeMounts (dict "name" "containerd-socket" "mountPath" (print "/host" .Values.collectors.containerd.socket)) -}}
{{- end -}}
{{- if .Values.collectors.containerEngine.enabled -}}
{{- range $key, $val := .Values.collectors.containerEngine.engines -}}
{{- if and $val.enabled -}}
{{- $seenPaths := dict -}}
{{- $idx := 0 -}}
{{- $engineOrder := list "docker" "podman" "containerd" "cri" "lxc" "libvirt_lxc" "bpm" -}}
{{- range $engineName := $engineOrder -}}
{{- $val := index $.Values.collectors.containerEngine.engines $engineName -}}
{{- if and $val $val.enabled -}}
{{- range $index, $socket := $val.sockets -}}
{{ $volumeMounts = append $volumeMounts (dict "name" (printf "%s-socket-%d" $key $index) "mountPath" (print "/host" $socket)) -}}
{{- $mountPath := print "/host" $socket -}}
{{- if not (hasKey $seenPaths $mountPath) -}}
{{ $volumeMounts = append $volumeMounts (dict "name" (printf "container-engine-socket-%d" $idx) "mountPath" $mountPath) -}}
{{- $idx = add $idx 1 -}}
{{- $_ := set $seenPaths $mountPath true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if gt (len $volumeMounts) 0 -}}
{{ toYaml $volumeMounts }}
{{ toYaml ($volumeMounts) }}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -27,6 +27,9 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.falco.podHostname }}
hostname: {{ .Values.falco.podHostname }}
{{- end }}
serviceAccountName: {{ include "falco.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:

View File

@ -2,18 +2,12 @@ package containerPlugin
var volumeNames = []string{
"docker-socket",
"docker-socket-0",
"docker-socket-1",
"docker-socket-2",
"containerd-socket",
"containerd-socket-0",
"containerd-socket-1",
"containerd-socket-2",
"crio-socket",
"cri-socket-0",
"cri-socket-1",
"cri-socket-2",
"podman-socket-0",
"podman-socket-1",
"podman-socket-2",
"container-engine-socket-0",
"container-engine-socket-1",
"container-engine-socket-2",
"container-engine-socket-3",
"container-engine-socket-4",
"container-engine-socket-5",
}

View File

@ -29,20 +29,10 @@ func TestContainerPluginConfiguration(t *testing.T) {
nil,
func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
// Get init config.
initConfig, ok := plugin["init_config"]
require.True(t, ok)
require.Len(t, initConfig, 4, "checking number of config entries in the init section")
initConfigMap := initConfig.(map[string]interface{})
// Check the default values.
labelMaxLen := initConfigMap["label_max_len"]
require.Equal(t, float64(100), labelMaxLen.(float64), "checking default value for label_max_len")
withSize := initConfigMap["with_size"]
require.False(t, withSize.(bool), "checking default value for with_size")
hooks := initConfigMap["hooks"].([]interface{})
require.Len(t, hooks, 1, "checking number of hooks")
require.True(t, slices.Contains(hooks, "create"), "checking if create hook is present")
// Check engines configurations.
engines, ok := initConfigMap["engines"].(map[string]interface{})
@ -58,24 +48,25 @@ func TestContainerPluginConfiguration(t *testing.T) {
require.True(t, engineConfig.Docker.Enabled)
require.Equal(t, []string{"/var/run/docker.sock"}, engineConfig.Docker.Sockets)
require.False(t, engineConfig.Podman.Enabled)
require.True(t, engineConfig.Podman.Enabled)
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/run/containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/run/crio/crio.sock"}, engineConfig.CRI.Sockets)
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
require.False(t, engineConfig.LXC.Enabled)
require.False(t, engineConfig.LibvirtLXC.Enabled)
require.False(t, engineConfig.BPM.Enabled)
require.True(t, engineConfig.LXC.Enabled)
require.True(t, engineConfig.LibvirtLXC.Enabled)
require.True(t, engineConfig.BPM.Enabled)
},
},
{
name: "changeDockerSocket",
values: map[string]string{
"collectors.docker.socket": "/custom/docker.sock",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
@ -97,9 +88,10 @@ func TestContainerPluginConfiguration(t *testing.T) {
},
},
{
name: "changeCrioSocket",
name: "changeCriSocket",
values: map[string]string{
"collectors.crio.socket": "/custom/crio.sock",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/cri.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
@ -117,13 +109,13 @@ func TestContainerPluginConfiguration(t *testing.T) {
require.NoError(t, err)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/custom/crio.sock"}, engineConfig.CRI.Sockets)
require.Equal(t, []string{"/custom/cri.sock"}, engineConfig.CRI.Sockets)
},
},
{
name: "disableDockerSocket",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
@ -144,9 +136,9 @@ func TestContainerPluginConfiguration(t *testing.T) {
},
},
{
name: "disableCrioSocket",
name: "disableCriSocket",
values: map[string]string{
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
@ -169,7 +161,8 @@ func TestContainerPluginConfiguration(t *testing.T) {
{
name: "changeContainerdSocket",
values: map[string]string{
"collectors.containerd.socket": "/custom/containerd.sock",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
@ -193,7 +186,7 @@ func TestContainerPluginConfiguration(t *testing.T) {
{
name: "disableContainerdSocket",
values: map[string]string{
"collectors.containerd.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
@ -214,13 +207,8 @@ func TestContainerPluginConfiguration(t *testing.T) {
},
},
{
name: "defaultContainerEngineConfig",
values: map[string]string{
"collectors.containerEngine.enabled": "true",
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
},
name: "defaultContainerEngineConfig",
values: map[string]string{},
expected: func(t *testing.T, config any) {
plugin := config.(map[string]interface{})
initConfig, ok := plugin["init_config"]
@ -249,10 +237,10 @@ func TestContainerPluginConfiguration(t *testing.T) {
require.Equal(t, []string{"/run/podman/podman.sock"}, engineConfig.Podman.Sockets)
require.True(t, engineConfig.Containerd.Enabled)
require.Equal(t, []string{"/run/containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.Equal(t, []string{"/run/host-containerd/containerd.sock"}, engineConfig.Containerd.Sockets)
require.True(t, engineConfig.CRI.Enabled)
require.Equal(t, []string{"/run/crio/crio.sock"}, engineConfig.CRI.Sockets)
require.Equal(t, []string{"/run/containerd/containerd.sock", "/run/crio/crio.sock", "/run/k3s/containerd/containerd.sock", "/run/host-containerd/containerd.sock"}, engineConfig.CRI.Sockets)
require.True(t, engineConfig.LXC.Enabled)
require.True(t, engineConfig.LibvirtLXC.Enabled)
@ -638,17 +626,17 @@ func TestInvalidCollectorConfiguration(t *testing.T) {
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
{
name: "containerdAndContainerEngine",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectoars.containerd.enabled": "true",
"collectors.containerd.enabled": "true",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
{
name: "crioAndContainerEngine",
@ -658,7 +646,7 @@ func TestInvalidCollectorConfiguration(t *testing.T) {
"collectors.crio.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time. Please use the containerEngine configuration since the old configurations are deprecated.",
},
}
@ -697,7 +685,7 @@ func TestFalcoctlRefs(t *testing.T) {
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 2)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.0"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
refShouldNotBeSet := func(t *testing.T, config any) {
@ -713,7 +701,7 @@ func TestFalcoctlRefs(t *testing.T) {
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 1)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.False(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.0"))
require.False(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
testCases := []struct {

View File

@ -26,128 +26,148 @@ func TestContainerPluginVolumeMounts(t *testing.T) {
name: "defaultValues",
values: nil,
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 3)
require.Equal(t, "docker-socket", volumeMounts[0].Name)
require.Len(t, volumeMounts, 6)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
require.Equal(t, "crio-socket", volumeMounts[1].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[1].MountPath)
require.Equal(t, "containerd-socket", volumeMounts[2].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
require.Equal(t, "/host/run/podman/podman.sock", volumeMounts[1].MountPath)
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[3].MountPath)
require.Equal(t, "container-engine-socket-4", volumeMounts[4].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[4].MountPath)
require.Equal(t, "container-engine-socket-5", volumeMounts[5].Name)
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[5].MountPath)
},
},
{
name: "defaultDockerVolumeMount",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "docker-socket", volumeMounts[0].Name)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/var/run/docker.sock", volumeMounts[0].MountPath)
},
},
{
name: "customDockerSocket",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.docker.socket": "/custom/docker.sock",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "docker-socket", volumeMounts[0].Name)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/docker.sock", volumeMounts[0].MountPath)
},
},
{
name: "defaultCrioVolumeMount",
name: "defaultCriVolumeMount",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "crio-socket", volumeMounts[0].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[0].MountPath)
require.Len(t, volumeMounts, 4)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[0].MountPath)
require.Equal(t, "container-engine-socket-1", volumeMounts[1].Name)
require.Equal(t, "/host/run/crio/crio.sock", volumeMounts[1].MountPath)
require.Equal(t, "container-engine-socket-2", volumeMounts[2].Name)
require.Equal(t, "/host/run/k3s/containerd/containerd.sock", volumeMounts[2].MountPath)
require.Equal(t, "container-engine-socket-3", volumeMounts[3].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[3].MountPath)
},
},
{
name: "customCrioSocket",
name: "customCriSocket",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "true",
"collectors.crio.socket": "/custom/crio.sock",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "crio-socket", volumeMounts[0].Name)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/crio.sock", volumeMounts[0].MountPath)
},
},
{
name: "defaultContainerdVolumeMount",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "true",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "containerd-socket", volumeMounts[0].Name)
require.Equal(t, "/host/run/containerd/containerd.sock", volumeMounts[0].MountPath)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/run/host-containerd/containerd.sock", volumeMounts[0].MountPath)
},
},
{
name: "customContainerdSocket",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "true",
"collectors.containerd.socket": "/custom/containerd.sock",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 1)
require.Equal(t, "containerd-socket", volumeMounts[0].Name)
require.Equal(t, "container-engine-socket-0", volumeMounts[0].Name)
require.Equal(t, "/host/custom/containerd.sock", volumeMounts[0].MountPath)
},
},
{
name: "ContainerEnginesDefaultValues",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
},
name: "ContainerEnginesDefaultValues",
values: map[string]string{},
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 4)
require.Len(t, volumeMounts, 6)
dockerV := findVolumeMount("docker-socket-0", volumeMounts)
require.NotNil(t, dockerV)
require.Equal(t, "/host/var/run/docker.sock", dockerV.MountPath)
podmanV := findVolumeMount("podman-socket-0", volumeMounts)
require.NotNil(t, podmanV)
require.Equal(t, "/host/run/podman/podman.sock", podmanV.MountPath)
containerdV := findVolumeMount("containerd-socket-0", volumeMounts)
require.NotNil(t, containerdV)
require.Equal(t, "/host/run/containerd/containerd.sock", containerdV.MountPath)
crioV := findVolumeMount("cri-socket-0", volumeMounts)
require.NotNil(t, crioV)
require.Equal(t, "/host/run/crio/crio.sock", crioV.MountPath)
// dockerV := findVolumeMount("docker-socket-0", volumeMounts)
// require.NotNil(t, dockerV)
// require.Equal(t, "/host/var/run/docker.sock", dockerV.MountPath)
// podmanV := findVolumeMount("podman-socket-0", volumeMounts)
// require.NotNil(t, podmanV)
// require.Equal(t, "/host/run/podman/podman.sock", podmanV.MountPath)
// containerdV := findVolumeMount("containerd-socket-0", volumeMounts)
// require.NotNil(t, containerdV)
// require.Equal(t, "/host/run/host-containerd/containerd.sock", containerdV.MountPath)
// crioV0 := findVolumeMount("cri-socket-0", volumeMounts)
// require.NotNil(t, crioV0)
// require.Equal(t, "/host/run/containerd/containerd.sock", crioV0.MountPath)
// crioV1 := findVolumeMount("cri-socket-1", volumeMounts)
// require.NotNil(t, crioV1)
// require.Equal(t, "/host/run/crio/crio.sock", crioV1.MountPath)
// crioV2 := findVolumeMount("cri-socket-2", volumeMounts)
// require.NotNil(t, crioV2)
// require.Equal(t, "/host/run/k3s/containerd/containerd.sock", crioV2.MountPath)
},
},
{
name: "ContainerEnginesDockerWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/var/run/docker.sock",
"collectors.containerEngine.engines.docker.sockets[1]": "/custom/docker.sock",
@ -158,11 +178,11 @@ func TestContainerPluginVolumeMounts(t *testing.T) {
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 2)
dockerV0 := findVolumeMount("docker-socket-0", volumeMounts)
dockerV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
require.NotNil(t, dockerV0)
require.Equal(t, "/host/var/run/docker.sock", dockerV0.MountPath)
dockerV1 := findVolumeMount("docker-socket-1", volumeMounts)
dockerV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
require.NotNil(t, dockerV1)
require.Equal(t, "/host/custom/docker.sock", dockerV1.MountPath)
},
@ -170,10 +190,6 @@ func TestContainerPluginVolumeMounts(t *testing.T) {
{
name: "ContainerEnginesCrioWithMultipleSockets",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
@ -184,11 +200,11 @@ func TestContainerPluginVolumeMounts(t *testing.T) {
expected: func(t *testing.T, volumeMounts []corev1.VolumeMount) {
require.Len(t, volumeMounts, 2)
crioV0 := findVolumeMount("cri-socket-0", volumeMounts)
crioV0 := findVolumeMount("container-engine-socket-0", volumeMounts)
require.NotNil(t, crioV0)
require.Equal(t, "/host/run/crio/crio.sock", crioV0.MountPath)
crioV1 := findVolumeMount("cri-socket-1", volumeMounts)
crioV1 := findVolumeMount("container-engine-socket-1", volumeMounts)
require.NotNil(t, crioV1)
require.Equal(t, "/host/custom/crio.sock", crioV1.MountPath)
},
@ -263,7 +279,7 @@ func TestInvalidVolumeMountConfiguration(t *testing.T) {
"collectors.docker.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
},
}

View File

@ -27,93 +27,111 @@ func TestContainerPluginVolumes(t *testing.T) {
name: "defaultValues",
values: nil,
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 3)
require.Equal(t, "docker-socket", volumes[0].Name)
require.Len(t, volumes, 6)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "crio-socket", volumes[1].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[1].HostPath.Path)
require.Equal(t, "containerd-socket", volumes[2].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
},
},
{
name: "defaultDockerVolume",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "docker-socket", volumes[0].Name)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
},
},
{
name: "customDockerSocket",
values: map[string]string{
"collectors.docker.enabled": "true",
"collectors.docker.socket": "/custom/docker.sock",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "true",
"collectors.containerEngine.engines.docker.sockets[0]": "/custom/docker.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "docker-socket", volumes[0].Name)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/docker.sock", volumes[0].HostPath.Path)
},
},
{
name: "defaultCrioVolume",
name: "defaultCriVolume",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "true",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "crio-socket", volumes[0].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[0].HostPath.Path)
require.Len(t, volumes, 4)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[3].HostPath.Path)
},
},
{
name: "customCrioSocket",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "false",
"collectors.crio.enabled": "true",
"collectors.crio.socket": "/custom/crio.sock",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.cri.enabled": "true",
"collectors.containerEngine.engines.cri.sockets[0]": "/custom/crio.sock",
"collectors.containerEngine.engines.containerd.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "crio-socket", volumes[0].Name)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/crio.sock", volumes[0].HostPath.Path)
},
},
{
name: "defaultContainerdVolume",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "true",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "containerd-socket", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[0].HostPath.Path)
},
},
{
name: "customContainerdSocket",
values: map[string]string{
"collectors.docker.enabled": "false",
"collectors.containerd.enabled": "true",
"collectors.containerd.socket": "/custom/containerd.sock",
"collectors.crio.enabled": "false",
"collectors.containerEngine.engines.docker.enabled": "false",
"collectors.containerEngine.engines.containerd.enabled": "true",
"collectors.containerEngine.engines.containerd.sockets[0]": "/custom/containerd.sock",
"collectors.containerEngine.engines.cri.enabled": "false",
"collectors.containerEngine.engines.podman.enabled": "false",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 1)
require.Equal(t, "containerd-socket", volumes[0].Name)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/containerd.sock", volumes[0].HostPath.Path)
},
},
@ -127,20 +145,19 @@ func TestContainerPluginVolumes(t *testing.T) {
"collectors.containerEngine.enabled": "true",
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 4)
dockerV := findVolume("docker-socket-0", volumes)
require.NotNil(t, dockerV)
require.Equal(t, "/var/run/docker.sock", dockerV.HostPath.Path)
podmanV := findVolume("podman-socket-0", volumes)
require.NotNil(t, podmanV)
require.Equal(t, "/run/podman/podman.sock", podmanV.HostPath.Path)
containerdV := findVolume("containerd-socket-0", volumes)
require.NotNil(t, containerdV)
require.Equal(t, "/run/containerd/containerd.sock", containerdV.HostPath.Path)
crioV := findVolume("cri-socket-0", volumes)
require.NotNil(t, crioV)
require.Equal(t, "/run/crio/crio.sock", crioV.HostPath.Path)
require.Len(t, volumes, 6)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[3].HostPath.Path)
require.Equal(t, "container-engine-socket-4", volumes[4].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[4].HostPath.Path)
require.Equal(t, "container-engine-socket-5", volumes[5].Name)
require.Equal(t, "/run/k3s/containerd/containerd.sock", volumes[5].HostPath.Path)
},
},
{
@ -159,14 +176,10 @@ func TestContainerPluginVolumes(t *testing.T) {
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
dockerV0 := findVolume("docker-socket-0", volumes)
require.NotNil(t, dockerV0)
require.Equal(t, "/var/run/docker.sock", dockerV0.HostPath.Path)
dockerV1 := findVolume("docker-socket-1", volumes)
require.NotNil(t, dockerV1)
require.Equal(t, "/custom/docker.sock", dockerV1.HostPath.Path)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/var/run/docker.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/docker.sock", volumes[1].HostPath.Path)
},
},
{
@ -185,14 +198,10 @@ func TestContainerPluginVolumes(t *testing.T) {
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
crioV0 := findVolume("cri-socket-0", volumes)
require.NotNil(t, crioV0)
require.Equal(t, "/run/crio/crio.sock", crioV0.HostPath.Path)
crioV1 := findVolume("cri-socket-1", volumes)
require.NotNil(t, crioV1)
require.Equal(t, "/custom/crio.sock", crioV1.HostPath.Path)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/crio/crio.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/crio.sock", volumes[1].HostPath.Path)
},
},
{
@ -211,14 +220,10 @@ func TestContainerPluginVolumes(t *testing.T) {
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
podmanV0 := findVolume("podman-socket-0", volumes)
require.NotNil(t, podmanV0)
require.Equal(t, "/run/podman/podman.sock", podmanV0.HostPath.Path)
podmanV1 := findVolume("podman-socket-1", volumes)
require.NotNil(t, podmanV1)
require.Equal(t, "/custom/podman.sock", podmanV1.HostPath.Path)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/podman.sock", volumes[1].HostPath.Path)
},
},
{
@ -237,14 +242,10 @@ func TestContainerPluginVolumes(t *testing.T) {
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 2)
containerdV0 := findVolume("containerd-socket-0", volumes)
require.NotNil(t, containerdV0)
require.Equal(t, "/run/containerd/containerd.sock", containerdV0.HostPath.Path)
containerdV1 := findVolume("containerd-socket-1", volumes)
require.NotNil(t, containerdV1)
require.Equal(t, "/custom/containerd.sock", containerdV1.HostPath.Path)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/run/containerd/containerd.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/custom/containerd.sock", volumes[1].HostPath.Path)
},
},
{
@ -264,22 +265,14 @@ func TestContainerPluginVolumes(t *testing.T) {
},
expected: func(t *testing.T, volumes []corev1.Volume) {
require.Len(t, volumes, 4)
dockerV0 := findVolume("docker-socket-0", volumes)
require.NotNil(t, dockerV0)
require.Equal(t, "/custom/docker/socket.sock", dockerV0.HostPath.Path)
containerdV0 := findVolume("containerd-socket-0", volumes)
require.NotNil(t, containerdV0)
require.Equal(t, "/run/containerd/containerd.sock", containerdV0.HostPath.Path)
crioV0 := findVolume("cri-socket-0", volumes)
require.NotNil(t, crioV0)
require.Equal(t, "/var/custom/crio.sock", crioV0.HostPath.Path)
podmanV0 := findVolume("podman-socket-0", volumes)
require.NotNil(t, podmanV0)
require.Equal(t, "/run/podman/podman.sock", podmanV0.HostPath.Path)
require.Equal(t, "container-engine-socket-0", volumes[0].Name)
require.Equal(t, "/custom/docker/socket.sock", volumes[0].HostPath.Path)
require.Equal(t, "container-engine-socket-1", volumes[1].Name)
require.Equal(t, "/run/podman/podman.sock", volumes[1].HostPath.Path)
require.Equal(t, "container-engine-socket-2", volumes[2].Name)
require.Equal(t, "/run/host-containerd/containerd.sock", volumes[2].HostPath.Path)
require.Equal(t, "container-engine-socket-3", volumes[3].Name)
require.Equal(t, "/var/custom/crio.sock", volumes[3].HostPath.Path)
},
},
{
@ -349,7 +342,7 @@ func TestInvalidVolumeConfiguration(t *testing.T) {
"collectors.docker.enabled": "true",
"collectors.containerEngine.enabled": "true",
},
expectedErr: "You can not enable one of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
expectedErr: "You can not enable any of the [docker, containerd, crio] collectors configuration and the containerEngine configuration at the same time",
},
}

View File

@ -18,12 +18,13 @@ package k8smetaPlugin
import (
"encoding/json"
"fmt"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/falcosecurity/charts/charts/falco/tests/unit"
"slices"
"github.com/gruntwork-io/terratest/modules/helm"
@ -589,8 +590,8 @@ func TestFalcoctlRefs(t *testing.T) {
refs := artifactConfig["install"].(map[string]interface{})["refs"].([]interface{})
require.Len(t, refs, 3)
require.True(t, slices.Contains(refs, "falco-rules:4"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.0"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.0"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"))
require.True(t, slices.Contains(refs, "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"))
}
testCases := []struct {

View File

@ -365,21 +365,21 @@ collectors:
# -- This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead.
docker:
# -- Enable Docker support.
enabled: true
enabled: false
# -- The path of the Docker daemon socket.
socket: /var/run/docker.sock
# -- This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead.
containerd:
# -- Enable ContainerD support.
enabled: true
enabled: false
# -- The path of the ContainerD socket.
socket: /run/containerd/containerd.sock
socket: /run/host-containerd/containerd.sock
# -- This collector is deprecated and will be removed in the future. Please use the containerEngine collector instead.
crio:
# -- Enable CRI-O support.
enabled: true
enabled: false
# -- The path of the CRI-O socket.
socket: /run/crio/crio.sock
@ -389,10 +389,10 @@ collectors:
# Keep in mind that the old collectors (docker, containerd, crio, podman) will use the container plugin to collect metadata under the hood.
containerEngine:
# -- Enable Container Engine support.
enabled: false
enabled: true
# -- pluginRef is the OCI reference for the container plugin. It could be a full reference such as
# "ghcr.io/falcosecurity/plugins/plugin/container:0.3.0". Or just name + tag: container:0.3.0.
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/container:0.3.0"
# "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5". Or just name + tag: container:0.3.5.
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/container:0.3.5"
# -- labelMaxLen is the maximum length of the labels that can be used in the container plugin.
# container labels larger than this value won't be collected.
labelMaxLen: 100
@ -401,6 +401,8 @@ collectors:
# -- hooks specify the hooks that will be used to collect metadata from the container engine.
# The available hooks are: create, start.
hooks: ["create"]
# -- engines specify the container engines that will be used to collect metadata.
# See https://github.com/falcosecurity/plugins/blob/main/plugins/container/README.md#configuration
engines:
docker:
enabled: true
@ -410,10 +412,16 @@ collectors:
sockets: ["/run/podman/podman.sock"]
containerd:
enabled: true
sockets: ["/run/containerd/containerd.sock"]
sockets: ["/run/host-containerd/containerd.sock"]
cri:
enabled: true
sockets: ["/run/crio/crio.sock"]
sockets:
[
"/run/containerd/containerd.sock",
"/run/crio/crio.sock",
"/run/k3s/containerd/containerd.sock",
"/run/host-containerd/containerd.sock",
]
lxc:
enabled: true
libvirt_lxc:
@ -435,7 +443,7 @@ collectors:
enabled: false
# --pluginRef is the OCI reference for the k8smeta plugin. It could be a full reference such as:
# "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0". Or just name + tag: k8smeta:0.1.0.
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.0"
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.3.1"
# -- collectorHostname is the address of the k8s-metacollector. When not specified it will be set to match
# k8s-metacollector service. e.x: falco-k8smetacollecto.falco.svc. If for any reason you need to override
# it, make sure to set here the address of the k8s-metacollector.
@ -466,6 +474,9 @@ extra:
# -- Additional initContainers for Falco pods.
initContainers: []
# -- Override hostname in falco pod
podHostname:
# -- certificates used by webserver and grpc server.
# paste certificate content or use helm with --set-file
# or use existing secret containing key, crt, ca as well as pem bundle