Compare commits

..

No commits in common. "master" and "2.29.0-rc.5" have entirely different histories.

112 changed files with 1898 additions and 4749 deletions

View File

@ -5,17 +5,15 @@ updates:
directory: "/"
schedule:
interval: "weekly"
groups:
gomod:
update-types:
- "patch"
labels:
- "area/dependency"
- "release-note-none"
- "ok-to-test"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
groups:
actions:
update-types:
- "minor"
- "patch"
labels:
- "area/dependency"
- "release-note-none"
- "ok-to-test"

View File

@ -9,23 +9,23 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: '1.23'
go-version: '1.22'
check-latest: true
cache: true
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
- uses: anchore/sbom-action/download-syft@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 # v0.20.4
- uses: anchore/sbom-action/download-syft@e8d2a6937ecead383dfe75190d104edd1f9c5751 # v0.16.0
- uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
- uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0
with:
install-only: true

View File

@ -14,14 +14,14 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: '1.23'
go-version: '1.22'
cache: false
check-latest: true
- name: golangci-lint
uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with:
version: v1.61
version: v1.57
args: --timeout=5m

View File

@ -14,25 +14,25 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: '1.23'
go-version: '1.22'
check-latest: true
cache: true
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
- uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
- uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
- uses: anchore/sbom-action/download-syft@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 # v0.20.4
- uses: anchore/sbom-action/download-syft@e8d2a6937ecead383dfe75190d104edd1f9c5751 # v0.16.0
- uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
- uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0
with:
install-only: true
@ -47,7 +47,7 @@ jobs:
# Push images to DockerHUB
- name: Login to Docker Hub
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_SECRET }}
@ -63,7 +63,7 @@ jobs:
# Push images to AWS Public ECR
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
with:
role-to-assume: arn:aws:iam::292999226676:role/github_actions-falcosidekick-ecr
aws-region: us-east-1

View File

@ -18,33 +18,33 @@ jobs:
tag_name: ${{ steps.tag.outputs.tag_name }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: '1.23'
go-version: '1.22'
check-latest: true
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
- uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
- uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
- uses: anchore/sbom-action/download-syft@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 # v0.20.4
- uses: anchore/sbom-action/download-syft@e8d2a6937ecead383dfe75190d104edd1f9c5751 # v0.16.0
# Push images to DockerHUB
- name: Login to Docker Hub
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_SECRET }}
# Push images to AWS Public ECR
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
with:
role-to-assume: arn:aws:iam::292999226676:role/github_actions-falcosidekick-ecr
aws-region: us-east-1
@ -68,10 +68,10 @@ jobs:
- name: Run GoReleaser
id: run-goreleaser
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0
with:
version: latest
args: release --clean --timeout 120m --parallelism 1
args: release --clean --timeout 120m
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
LDFLAGS: ${{ env.GO_FLAGS }}
@ -92,7 +92,7 @@ jobs:
actions: read # To read the workflow path.
id-token: write # To sign the provenance.
contents: write # To add assets to a release.
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
with:
base64-subjects: "${{ needs.release.outputs.hashes }}"
upload-assets: true

View File

@ -14,10 +14,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: '1.23'
go-version: '1.22'
check-latest: true
cache: true
- name: Run Go tests

View File

@ -1,5 +1,5 @@
run:
timeout: 5m
deadline: 5m
issues:
exclude-files:
- "zz_generated.*\\.go$"
@ -18,3 +18,7 @@ linters:
- unused
# Run with --fast=false for more extensive checks
fast: true
include:
- EXC0002 # include "missing comments" issues from golint
max-issues-per-linter: 0
max-same-issues: 0

View File

@ -10,7 +10,7 @@ env:
- COSIGN_YES=true
snapshot:
version_template: 'latest'
name_template: 'latest'
checksum:
name_template: 'checksums.txt'

View File

@ -1,52 +1,5 @@
# Changelog
## 2.31.1 - 2025-02-04
#### Fix
- Fix error while closing the writer for `GCPStorage` ([PR#1116](https://github.com/falcosecurity/falcosidekick/pull/1116) thanks to [@chanukya-yekollu-exa](https://github.com/chanukya-yekollu-exa))
## 2.31.0 - 2025-02-03
#### New
- New output: **OTLP Logs** ([PR#1109](https://github.com/falcosecurity/falcosidekick/pull/1109))
#### Enhancement
- Add the namespace and the pod name as labels by default in `Loki` payload ([PR#1087](https://github.com/falcosecurity/falcosidekick/pull/1087) thanks to [@afreyermuth98](https://github.com/afreyermuth98))
- Allow to set the format for the `Loki` payload to JSON ([PR#1091](https://github.com/falcosecurity/falcosidekick/pull/1091))
- Allow to set a template for the subjets for `NATS`/`STAN` outputs ([PR#1099](https://github.com/falcosecurity/falcosidekick/pull/1099))
- Improve the logger with a generic and extensible method ([PR#1102](https://github.com/falcosecurity/falcosidekick/pull/1102))
#### Fix
- Remove forgotten debug line ([PR#1088](https://github.com/falcosecurity/falcosidekick/pull/1088))
- Fix missing templated fields as labls in `Loki` payload ([PR#1091](https://github.com/falcosecurity/falcosidekick/pull/1091))
- Fix creation error of `ClusterPolicyReports` ([PR#1100](https://github.com/falcosecurity/falcosidekick/pull/100))
- Fix missing custom headers for HTTP requests for `Loki` ([PR#1107](https://github.com/falcosecurity/falcosidekick/pull/1107) thanks to [@lsroe](https://github.com/lsroe))
- Fix wrong key format for `Prometheus` format ([PR#1110](https://github.com/falcosecurity/falcosidekick/pull/1110) thanks to [@rubensf](https://github.com/rubensf))
## 2.30.0 - 2024-11-28
#### New
- New output: **Webex** ([PR#979](https://github.com/falcosecurity/falcosidekick/pull/979) thanks to [@k0rventen](https://github.com/k0rventen))
- New output: **OTLP Metrics** ([PR#1012](https://github.com/falcosecurity/falcosidekick/pull/1012) thanks to [@ekoops](https://github.com/ekoops))
- New output: **Datadog Logs** ([PR#1052](https://github.com/falcosecurity/falcosidekick/pull/1052) thanks to [@yohboy](https://github.com/yohboy))
#### Enhancement
- Reuse of the http client for 3-4x increase of the throughput ([PR#962](https://github.com/falcosecurity/falcosidekick/pull/962) thanks to [@alekmaus](https://github.com/aleksmaus))
- Improve outputs throughput handling ([PR#966](https://github.com/falcosecurity/falcosidekick/pull/966) thanks to [@alekmaus](https://github.com/aleksmaus))
- Batching and gzip compression for the `Elastticsearch` output ([PR#967](https://github.com/falcosecurity/falcosidekick/pull/967) thanks to [@alekmaus](https://github.com/aleksmaus))
- Use the same convention for the Prometheus metrics than Falco ([PR#995](https://github.com/falcosecurity/falcosidekick/pull/995))
- Add `APIKey` for `Elasticsearch` output ([PR#980](https://github.com/falcosecurity/falcosidekick/pull/980) thanks to [@alekmaus](https://github.com/aleksmaus))
- Add `Pipeline` configuration for `Elasticsearch` output ([PR#981](https://github.com/falcosecurity/falcosidekick/pull/981 ) thanks to [@alekmaus](https://github.com/aleksmaus))
- Add `MessageThreadID` configuration in `Telegram` output ([PR#1008](https://github.com/falcosecurity/falcosidekick/pull/1008) thanks to [@vashian](https://github.com/vashian))
- Support multi-architecture in build ([PR#1024](https://github.com/falcosecurity/falcosidekick/pull/1024) thanks to [@nickytd](https://github.com/nickytd))
- Add `falco` as source for the `Datadog Events` ([PR#1043](https://github.com/falcosecurity/falcosidekick/pull/1043) thanks to [@maxd-wttj](https://github.com/maxd-wttj))
- Support `AlertManager` output in HA mode ([PR#1051](https://github.com/falcosecurity/falcosidekick/pull/1051))
#### Fix
- Fix `PolicyReports` created in the same namespace than previous event ([PR#978](https://github.com/falcosecurity/falcosidekick/pull/978))
- Fix missing `customFields/extraFields` in the `Elasticsearch` payload ([PR#1033](https://github.com/falcosecurity/falcosidekick/pull/1033))
- Fix incorrect key name for `CloudEvent` spec attribute ([PR#1051](https://github.com/falcosecurity/falcosidekick/pull/1051))
> [!WARNING]
> Breaking change: The Prometheus metrics have different names from this release, it might break the queries for the dashboards and alerts.
## 2.29.0 - 2024-07-01
#### New
- New output: **Dynatrace** ([PR#575](https://github.com/falcosecurity/falcosidekick/pull/575) thanks to [@blu3r4y](https://github.com/blu3r4y))

View File

@ -1,7 +1,7 @@
ARG BASE_IMAGE=alpine:3.19
# Final Docker image
FROM ${BASE_IMAGE} AS final-stage
LABEL MAINTAINER="Thomas Labarussias <issif+falcosidekick@gadz.org>"
LABEL MAINTAINER "Thomas Labarussias <issif+falcosidekick@gadz.org>"
RUN apk add --update --no-cache ca-certificates gcompat

View File

@ -12,7 +12,7 @@ RUN make falcosidekick
# Final Docker image
FROM ${BASE_IMAGE} AS final-stage
LABEL MAINTAINER="Thomas Labarussias <issif+falcosidekick@gadz.org>"
LABEL MAINTAINER "Thomas Labarussias <issif+falcosidekick@gadz.org>"
RUN apk add --update --no-cache ca-certificates

View File

@ -2,6 +2,7 @@
SHELL=/bin/bash -o pipefail
.DEFAULT_GOAL:=help
GOPATH := $(shell go env GOPATH)
GOARCH := $(shell go env GOARCH)
GOOS := $(shell go env GOOS)
@ -54,15 +55,15 @@ IMAGE_TAG := falcosecurity/falcosidekick:latest
.PHONY: falcosidekick
falcosidekick:
$(GO) mod download
GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO) build -trimpath -ldflags "$(LDFLAGS)" -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o $@ .
$(GO) build -trimpath -ldflags "$(LDFLAGS)" -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o $@ .
.PHONY: falcosidekick-linux
falcosidekick-linux:
.PHONY: falcosidekick-linux-amd64
falcosidekick-linux-amd64:
$(GO) mod download
GOOS=linux GOARCH=$(GOARCH) $(GO) build -ldflags "$(LDFLAGS)" -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o falcosidekick .
GOOS=linux GOARCH=amd64 $(GO) build -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o falcosidekick .
.PHONY: build-image
build-image: falcosidekick-linux
build-image: falcosidekick-linux-amd64
$(DOCKER) build -t $(IMAGE_TAG) .
.PHONY: push-image

View File

@ -78,7 +78,6 @@ Follow the links to get the configuration of each output.
- [**Rocketchat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rocketchat.md)
- [**Mattermost**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mattermost.md)
- [**Teams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/teams.md)
- [**Webex**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/webex.md)
- [**Discord**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/discord.md)
- [**Google Chat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/googlechat.md)
- [**Zoho Cliq**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cliq.md)
@ -95,7 +94,6 @@ Follow the links to get the configuration of each output.
- [**Spyderbat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/spyderbat.md)
- [**TimescaleDB**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/timescaledb.md)
- [**Dynatrace**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dynatrace.md)
- [**OTEL Metrics**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/otlp_metrics.md) (for both events and monitoring of `falcosidekick`)
### Alerting
@ -115,8 +113,6 @@ Follow the links to get the configuration of each output.
- [**OpenObserve**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openobserve.md)
- [**SumoLogic**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/sumologic.md)
- [**Quickwit**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/quickwit.md)
- [**Datadog Logs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/datadog_logs.md)
- [**Logstash**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/logstash.md)
### Object Storage
@ -332,9 +328,6 @@ customfields: # custom fields are added to falco events, if the value starts wit
# Ckey: "CValue"
templatedfields: # templated fields are added to falco events and metrics, it uses Go template + output_fields values
# Dkey: '{{ or (index . "k8s.ns.labels.foo") "bar" }}'
customtags: # custom tags are added to the falco events, if the value starts with % the relative env var is used
# - tagA
# - tagB
# bracketreplacer: "_" # if not empty, replace the brackets in keys of Output Fields
outputFieldFormat: "<timestamp>: <priority> <output> <custom_fields> <templated_fields>" # if not empty, allow to change the format of the output field. (default: "<timestamp>: <priority> <output>")
mutualtlsfilespath: "/etc/certs" # folder which will used to store client.crt, client.key and ca.crt files for mutual tls for outputs, will be deprecated in the future (default: "/etc/certs")

918
config.go

File diff suppressed because it is too large Load Diff

View File

@ -8,9 +8,6 @@ customfields: # custom fields are added to falco events and metrics, if the valu
templatedfields: # templated fields are added to falco events and metrics, it uses Go template + output_fields values
# Dkey: '{{ or (index . "k8s.ns.labels.foo") "bar" }}'
# bracketreplacer: "_" # if not empty, the brackets in keys of Output Fields are replaced
customtags: # custom tags are added to the falco events, if the value starts with % the relative env var is used
- tagA
- tagB
outputFieldFormat: "<timestamp>: <priority> <output> <custom_fields> <templated_fields>" # if not empty, allow to change the format of the output field. (default: "<timestamp>: <priority> <output>")
mutualtlsfilespath: "/etc/certs" # folder which will used to store client.crt, client.key and ca.crt files for mutual tls for outputs, will be deprecated in the future (default: "/etc/certs")
mutualtlsclient: # takes priority over mutualtlsfilespath if not emtpy
@ -68,23 +65,13 @@ teams:
outputformat: "all" # all (default), text, facts
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
webex:
# webhookurl: "" # Webex WebhookURL, if not empty, Teams Webex is enabled
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
datadog:
# apikey: "" # Datadog API Key, if not empty, Datadog output is enabled
# host: "" # Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://api.datadoghq.com"
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
datadoglogs:
# apikey: "" # Datadog API Key, if not empty, Datadog Logs output is enabled
# host: "" # Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://http-intake.logs.datadoghq.com/"
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# service: "" # The name of the application or service generating the log events.
alertmanager:
# hostport: "" # Comma separated list of http://{domain or ip}:{port} that will all receive the payload, if not empty, Alertmanager output is enabled
# hostport: "" # http://{domain or ip}:{port}, if not empty, Alertmanager output is enabled
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
@ -102,12 +89,10 @@ elasticsearch:
# hostport: "" # http://{domain or ip}:{port}, if not empty, Elasticsearch output is enabled
# index: "falco" # index (default: falco)
# type: "_doc"
# pipeline: "" # optional ingest pipeline name
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# suffix: "daily" # date suffix for index rotation : daily (default), monthly, annually, none
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
# apikey: "" # use this APIKey to authenticate to Elasticsearch if the APIKey is not empty (default: "")
# username: "" # use this username to authenticate to Elasticsearch if the username is not empty (default: "")
# password: "" # use this password to authenticate to Elasticsearch if the password is not empty (default: "")
# flattenfields: false # replace . by _ to avoid mapping conflicts, force to true if createindextemplate==true (default: false)
@ -116,12 +101,6 @@ elasticsearch:
# numberofreplicas: 3 # number of replicas set by the index template (default: 3)
# customHeaders: # Custom headers to add in POST, useful for Authentication
# key: value
# enablecompression: false # if true enables gzip compression for http requests (default: false)
# batching: # batching configuration, improves throughput dramatically utilizing _bulk Elasticsearch API
# enabled: true # if true enables batching
# batchsize: 5242880 # batch size in bytes (default: 5 MB)
# flushinterval: 1s # batch fush interval (default: 1s)
# maxconcurrentrequests: 1 # max number of concurrent http requests (default: 1)
quickwit:
# hostport: "" # http(s)://{domain or ip}:{port}, if not empty, Quickwit output is enabled
@ -156,7 +135,6 @@ loki:
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
# tenant: "" # Add the Tenant header
# format: "text" # Format for the log entry value: json, text (default)
# endpoint: "/loki/api/v1/push" # The endpoint URL path, default is "/loki/api/v1/push" more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush
# extralabels: "" # comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields
# customHeaders: # Custom headers to add in POST, useful for Authentication
@ -164,7 +142,6 @@ loki:
nats:
# hostport: "" # nats://{domain or ip}:{port}, if not empty, NATS output is enabled
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
@ -173,7 +150,6 @@ stan:
# hostport: "" # nats://{domain or ip}:{port}, if not empty, STAN output is enabled
# clusterid: "" # Cluster name, if not empty, STAN output is enabled
# clientid: "" # Client ID, if not empty, STAN output is enabled
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
@ -540,7 +516,7 @@ sumologic:
otlp:
traces:
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/traces), if not empty, OTLP Traces output is enabled
# endpoint: "" # OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces
# protocol: "" # OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json)
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
@ -551,41 +527,8 @@ otlp:
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# checkcert: true # Set if you want to skip TLS certificate validation (default: true)
logs:
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/logs), if not empty, OTLP Traces output is enabled
# protocol: "" # OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json)
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
# extraenvvars: # Extra env vars (override the other settings)
# OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: 10000
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# checkcert: true # Set if you want to skip TLS certificate validation (default: true)
metrics:
# endpoint: "" # OTLP endpoint, typically in the form http(s)://{domain or ip}:4318(/v1/metrics), if not empty, OTLP Metrics output is enabled
# protocol: "" # OTLP transport protocol to be used for metrics data; it can be "grpc" or "http/protobuf" (default: "grpc")
# timeout: "" # OTLP timeout for outgoing metrics in milliseconds (default: "" which uses SDK default: 10000)
# headers: "" # List of headers to apply to all outgoing metrics in the form of "some-key=some-value,other-key=other-value" (default: "")
# extraenvvars: # Extra env vars (override the other settings) (default: "")
# OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: 10000
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
# minimumpriority: "" # Minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "")
# checkcert: true # Set to false if you want to skip TLS certificate validation (only with https) (default: true)
# extraattributes: "" # Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields
talon:
# address: "" # Falco talon address, if not empty, Falco Talon output is enabled
# checkcert: false # check if ssl certificate of the output is valid (default: true)
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
logstash:
# address: "" # Logstash address, if not empty, Logstash output is enabled
# port: 5044 # Logstash port number (default: 5044)
# tls: false # communicate over tls; requires Logstash version 8+ to work
# mutualtls: false # or authenticate to the output with TLS; if true, checkcert flag will be ignored (server cert will always be checked) (default: false)
# checkcert: true # Check if ssl certificate of the output is valid (default: true)
# certfile: "" # Use this certificate file instead of the client certificate when using mutual TLS (default: "")
# keyfile: "" # Use this key file instead of the client certificate when using mutual TLS (default: "")
# cacertfile: "" # Use this CA certificate file instead of the client certificate when using mutual TLS (default: "")
# minimumpriority: minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "debug")
# tags: ["falco"] # An additional list of tags that will be added to those produced by Falco (default: [])
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)

View File

@ -13,9 +13,9 @@
## Configuration
| Setting | Env var | Default value | Description |
| Setting | Env var | Default value | Description |
| --------------------------------------- | --------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `alertmanager.hostport` | `ALERTMANAGER_HOSTPORT` | | Comma separated list of http://{domain or ip}:{port} that will all receive the payload, if not empty, Alertmanager output is **enabled** |
| `alertmanager.hostport` | `ALERTMANAGER_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Alertmanager output is **enabled** |
| `alertmanager.mutualtls` | `ALERTMANAGER_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
| `alertmanager.checkcert` | `ALERTMANAGER_CHECKCERT` | `true` | check if ssl certificate of the output is valid |
| `alertmanager.endpoint` | `ALERTMANAGER_ENDPOINT` | `/api/v1/alerts` | Alertmanager endpoint for posting alerts `/api/v1/alerts` or `/api/v2/alerts` |

View File

@ -38,4 +38,4 @@ Filter the events in the UI with `sources: falco`.
## Screenshots
![datadog example](images/datadog.png)
![datadog example](mages/datadog.png)

View File

@ -1,43 +0,0 @@
# Datadog Logs
- **Category**: Logs
- **Website**: https://www.datadoghq.com/
## Table of content
- [Datadog Logs](#datadogLogs)
- [Table of content](#table-of-content)
- [Configuration](#configuration)
- [Example of config.yaml](#example-of-configyaml)
- [Additional info](#additional-info)
- [Screenshots](#screenshots)
## Configuration
| Setting | Env var | Default value | Description |
|-------------------------------|-----------------------------| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
| `datadoglogs.apikey` | `DATADOGLOGS_APIKEY` | | Datadog API Key, if not empty, Datadog Logs output is **enabled** |
| `datadoglogs.host` | `DATADOGLOGS_HOST` | `https://http-intake.logs.datadoghq.com/` | Datadog host. Override if you are on the Datadog EU site |
| `datadoglogs.minimumpriority` | `DATADOGLOGS_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `datadoglogs.service` | `DATADOGLOGS_SERVICE` | `""` | The name of the application or service generating the log events. |
> [!NOTE]
The Env var values override the settings from yaml file.
## Example of config.yaml
```yaml
datadoglogs:
apikey: "" # Datadog API Key, if not empty, Datadog Logs output is enabled
# host: "" # Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://http-intake.logs.datadoghq.com/"
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# service: "" # The name of the application or service generating the log events.
```
## Additional info
Filter the logs in the UI with `sources: falco`.
## Screenshots
![datadog example](images/datadog_logs.png)

View File

@ -13,40 +13,26 @@
## Configuration
| Setting | Env var | Default value | Description |
| ------------------------------------- | -------------------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `elasticsearch.hostport` | `ELASTICSEARCH_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Elasticsearch output is **enabled** |
| `elasticsearch.index` | `ELASTICSEARCH_INDEX` | `falco` | Index |
| `elasticsearch.type` | `ELASTICSEARCH_TYPE` | `_doc` | Index |
| `elasticsearch.pipeline` | `ELASTICSEARCH_PIPELINE` | | Optional ingest pipeline name. Documentation: https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html |
| `elasticsearch.suffix` | `ELASTICSEARCH_SUFFIX` | `daily` | Date suffix for index rotation : `daily`, `monthly`, `annually`, `none` |
| `elasticsearch.apikey` | `ELASTICSEARCH_APIKEY` | | Use this APIKey to authenticate to Elasticsearch |
| `elasticsearch.username` | `ELASTICSEARCH_USERNAME` | | Use this username to authenticate to Elasticsearch |
| `elasticsearch.password` | `ELASTICSEARCH_PASSWORD` | | Use this password to authenticate to Elasticsearch |
| `elasticsearch.flattenfields` | `ELASTICSEARCH_FLATTENFIELDS` | `false` | Replace . by _ to avoid mapping conflicts, force to true if `createindextemplate=true` |
| `elasticsearch.createindextemplate` | `ELASTICSEARCH_CREATEINDEXTEMPLATE` | `false` | Create an index template |
| `elasticsearch.numberofshards` | `ELASTICSEARCH_NUMBEROFSHARDS` | `3` | Number of shards set by the index template |
| `elasticsearch.numberofreplicas` | `ELASTICSEARCH_NUMBEROFREPLICAS` | `3` | Number of replicas set by the index template |
| `elasticsearch.customheaders` | `ELASTICSEARCH_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication |
| `elasticsearch.mutualtls` | `ELASTICSEARCH_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
| `elasticsearch.checkcert` | `ELASTICSEARCH_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
| `elasticsearch.minimumpriority` | `ELASTICSEARCH_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `elasticsearch.maxconcurrentrequests` | `ELASTICSEARCH_MAXCONCURRENTREQUESTS` | `1` | Max number of concurrent requests |
| `elasticsearch.enablecompression` | `ELASTICSEARCH_ENABLECOMPRESSION` | `false` | Enables gzip compression |
| `elasticsearch.batching.enabled` | `ELASTICSEARCH_BATCHING_ENABLED` | `false` | Enables batching (utilizing Elasticsearch bulk API) |
| `elasticsearch.batching.batchsize` | `ELASTICSEARCH_BATCHING_BATCHSIZE` | `5242880` | Batch size in bytes, default 5MB |
| `elasticsearch.batching.flushinterval`| `ELASTICSEARCH_BATCHING_FLUSHINTERVAL` | `1s` | Batch flush interval, use valid Go duration string |
| Setting | Env var | Default value | Description |
| ----------------------------------- | ----------------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `elasticsearch.hosport` | `ELASTICSEARCH_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Elasticsearch output is **enabled** |
| `elasticsearch.index` | `ELASTICSEARCH_INDEX` | `falco` | Index |
| `elasticsearch.type` | `ELASTICSEARCH_TYPE` | `_doc` | Index |
| `elasticsearch.suffix` | `ELASTICSEARCH_SUFFIX` | `daily` | Date suffix for index rotation : `daily`, `monthly`, `annually`, `none` |
| `elasticsearch.username` | `ELASTICSEARCH_USERNAME` | | Use this username to authenticate to Elasticsearch |
| `elasticsearch.password` | `ELASTICSEARCH_PASSWORD` | | Use this password to authenticate to Elasticsearch |
| `elasticsearch.flattenfields` | `ELASTICSEARCH_FLATTENFIELDS` | `false` | Replace . by _ to avoid mapping conflicts, force to true if `createindextemplate=true` |
| `elasticsearch.createindextemplate` | `ELASTICSEARCH_CREATEINDEXTEMPLATE` | `false` | Create an index template |
| `elasticsearch.numberofshards` | `ELASTICSEARCH_NUMBEROFSHARDS` | `3` | Number of shards set by the index template |
| `elasticsearch.numberofreplicas` | `ELASTICSEARCH_REPLICAS` | `3` | Number of replicas set by the index template |
| `elasticsearch.customheaders` | `ELASTICSEARCH_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication |
| `elasticsearch.mutualtls` | `ELASTICSEARCH_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
| `elasticsearch.checkcert` | `ELASTICSEARCH_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
| `elasticsearch.minimumpriority` | `ELASTICSEARCH_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
> [!NOTE]
The Env var values override the settings from yaml file.
> [!NOTE]
Increasing the default number of concurrent requests is a good way to increase throughput of the http outputs. This also increases the potential number of open connections. Choose wisely.
> [!NOTE]
Enabling batching for Elasticsearch is invaluable when the expected number of falco alerts is in the hundreds or thousands per second. The batching of data can be fine-tuned for your specific use case. The batch request is sent to Elasticsearch when the pending data size reaches `batchsize` or upon the `flushinterval`.
Enabling gzip compression increases throughput even further.
> [!WARNING]
By enabling the creation of the index template with `elasticsearch.createindextemplate=true`, the output fields of the Falco events will be flatten to avoid any mapping conflict.
@ -65,12 +51,6 @@ elasticsearch:
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# enablecompression: # if true enables gzip compression for http requests (default: false)
# batching: # batching configuration, improves throughput dramatically utilizing _bulk Elasticsearch API
# enabled: true # if true enables batching
# batchsize: 5242880 # batch size in bytes (default: 5 MB)
# flushinterval: 1s # batch fush interval (default: 1s)
# maxconcurrentrequests: # max number of concurrent http requests (default: 1)
```
## Screenshots

Binary file not shown.

Before

Width:  |  Height:  |  Size: 375 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

View File

@ -1,51 +0,0 @@
# Logstash
- **Category**: Logs
- **Website**: https://github.com/elastic/logstash
## Table of content
- [Logstash](#logstash)
- [Table of content](#table-of-content)
- [Configuration](#configuration)
- [Example of config.yaml](#example-of-configyaml)
- [Additional info](#additional-info)
- [Screenshots](#screenshots)
## Configuration
| Setting | Env var | Default value | Description |
| -------------------------- | -------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `logstash.address` | `LOGSTASH_ADDRESS` | | Logstash address, if not empty, Logstash output is **enabled** |
| `logstash.port` | `LOGSTASH_PORT` | 5044 | Logstash port number |
| `logstash.tls` | `LOGSTASH_TLS` | false | Use TLS connection (true/false) |
| `logstash.mutualtls` | `LOGSTASH_MUTUALTLS` | false | Authenticate to the output with TLS; if true, checkcert flag will be ignored (server cert will always be checked) |
| `logstash.checkcert` | `LOGSTASH_CHECKCERT` | true | Check if ssl certificate of the output is valid |
| `logstash.certfile` | `LOGSTASH_CERTFILE` | | Use this certificate file instead of the client certificate when using mutual TLS |
| `logstash.keyfile` | `LOGSTASH_KEYFILE` | | Use this key file instead of the client certificate when using mutual TLS |
| `logstash.cacertfile` | `LOGSTASH_CACERTFILE` | | Use this CA certificate file instead of the client certificate when using mutual TLS |
| `logstash.minimumpriority` | `LOGSTASH_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `logstash.tags` | `LOGSTASH_TAGS` | | An additional list of tags that will be added to those produced by Falco; these tags may help in decision-making while routing logs |
> [!NOTE]
Values stored in environment variables will override the settings from yaml file.
## Example of config.yaml
```yaml
logstash:
address: "" # Logstash address, if not empty, Logstash output is enabled
# port: 5044 # Logstash port number (default: 5044)
# tls: false # communicate over tls; requires Logstash version 8+ to work
# mutualtls: false # or authenticate to the output with TLS; if true, checkcert flag will be ignored (server cert will always be checked) (default: false)
# checkcert: true # Check if ssl certificate of the output is valid (default: true)
# certfile: "" # Use this certificate file instead of the client certificate when using mutual TLS (default: "")
# keyfile: "" # Use this key file instead of the client certificate when using mutual TLS (default: "")
# cacertfile: "" # Use this CA certificate file instead of the client certificate when using mutual TLS (default: "")
# minimumpriority: minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "debug")
# tags: ["falco"] # An additional list of tags that will be added to those produced by Falco (default: [])
```
## Additional info
## Screenshots

View File

@ -14,19 +14,18 @@
## Configuration
| Setting | Env var | Default value | Description | | | | |
| ---------------------- | ---------------------- | ------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | ---------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `loki.hostport` | `LOKI_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Loki output is **enabled** | | | | |
| `loki.user` | `LOKI_USER` | | User for Grafana Logs | | | | |
| `loki.apikey` | `LOKI_APIKEY` | | API KEy for Grafana Logs | | | | |
| `loki.tenant` | `LOKI_TENANT` | | Add the tenant header if needed | | | | |
| `loki.format` | `LOKI_FORMAT` | `text` | Format for the log entry value: json, text | | | | |
| `loki.endpoint` | `LOKI_ENDPOINT` | `/loki/api/v1/push` | The endpoint URL path, more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush | | | | |
| `loki.extralabels` | `LOKI_EXTRALABELS` | | comma separated list of fields to use as labels additionally to `rule`, `source`, `priority`, `tags` and `custom_fields` | | | | |
| `loki.customheaders` | `LOKI_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication | | | | |
| `loki.mutualtls` | `LOKI_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) | | | | |
| `loki.checkcert` | `LOKI_CHECKCERT` | `/api/v1/alerts` | Check if ssl certificate of the output is valid | `mattermost.minimumpriority` | `MATTERMOST_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `loki.minimumpriority` | `LOKI_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` | | | | |
| Setting | Env var | Default value | Description |
| ---------------------- | ---------------------- | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `loki.hostport` | `LOKI_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Loki output is **enabled** |
| `loki.user` | `LOKI_USER` | | User for Grafana Logs |
| `loki.apikey` | `LOKI_APIKEY` | | API KEy for Grafana Logs |
| `loki.tenant` | `LOKI_TENANT` | | Add the tenant header if needed |
| `loki.endpoint` | `LOKI_ENDPOINT` | `/api/prom/push ` | The endpoint URL path, more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush |
| `loki.extralabels` | `LOKI_EXTRALABELS` | | comma separated list of fields to use as labels additionally to `rule`, `source`, `priority`, `tags` and `custom_fields` |
| `loki.customheaders` | `LOKI_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication |
| `loki.mutualtls` | `LOKI_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
| `loki.checkcert` | `LOKI_CHECKCERT` | `/api/v1/alerts` | Check if ssl certificate of the output is valid | `mattermost.minimumpriority` | `MATTERMOST_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""`
| `loki.minimumpriority` | `LOKI_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
> [!NOTE]
@ -42,8 +41,7 @@ loki:
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
# tenant: "" # Add the tenant header if needed. Enabled if not empty
# format: "text" # Format for the log entry value: json, text (default)
# endpoint: "/loki/api/v1/push" # The endpoint URL path, default is "/loki/api/v1/push" more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush
# endpoint: "/api/prom/push" # The endpoint URL path, default is "/api/prom/push" more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush
# extralabels: "" # comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields
# customHeaders: # Custom headers to add in POST, useful for Authentication
# key: value

View File

@ -2,7 +2,7 @@
- **Category**: Chat/Messaging
- **Website**: https://github.com/mattermost/mattermost
- **Website**: https://rocket.chat
## Table of content
@ -65,4 +65,4 @@ Go templates also support some basic methods for text manipulation which can be
## Screenshots
![mattermost example](images/mattermost.png)
![mattermost example](images/mattermost.png)

View File

@ -8,23 +8,18 @@
- [NATS](#nats)
- [Table of content](#table-of-content)
- [Configuration](#configuration)
- [subjecttemplate: "falco.." # template for the subject, tokens and will be automatically replaced (default: falco..)](#subjecttemplate-falco--template-for-the-subject-tokens--and--will-be-automatically-replaced-default-falco)
- [Example of config.yaml](#example-of-configyaml)
- [Additional info](#additional-info)
- [Screenshots](#screenshots)
## Configuration
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
| Setting | Env var | Default value | Description |
| ---------------------- | ---------------------- | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `nats.hostport` | `NATS_HOSTPORT` | | nats://{domain or ip}:{port}, if not empty, NATS output is **enabled** |
| `nats.subjecttemplate` | `NATS_SUBJECTTEMPLATE` | `falco.<priority>.<rule>` | Template for the subject, tokens <priority> and <rule> will be automatically replaced |
| `nats.mutualtls` | `NATS_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
| `nats.checkcert` | `NATS_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
| `nats.minimumpriority` | `NATS_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| Setting | Env var | Default value | Description |
| ---------------------- | ---------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `nats.hostport` | `NATS_HOSTPORT` | | nats://{domain or ip}:{port}, if not empty, NATS output is **enabled** |
| `nats.mutualtls` | `NATS_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
| `nats.checkcert` | `NATS_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
| `nats.minimumpriority` | `NATS_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
> [!NOTE]
The Env var values override the settings from yaml file.
@ -35,7 +30,6 @@ The Env var values override the settings from yaml file.
nats:
hostport: "" # nats://{domain or ip}:{port}, if not empty, NATS output is enabled
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)
```

View File

@ -1,51 +0,0 @@
# OTEL Logs
- **Category**: Logs
- **Website**: <https://opentelemetry.io/docs/concepts/signals/logs/>
## Table of content
- [OTEL Logs](#otel-logs)
- [Table of content](#table-of-content)
- [Configuration](#configuration)
- [Example of config.yaml](#example-of-configyaml)
- [Additional info](#additional-info)
## Configuration
| Setting | Env var | Default value | Description |
| --------------------------- | --------------------------- | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `otlp.logs.endpoint` | `OTLP_LOGS_ENDPOINT` | | OTLP endpoint in the form of http://{domain or ip}:4318/v1/logs |
| `otlp.logs.protocol` | `OTLP_LOGS_PROTOCOL` | `http/protobuf` (from SDK) | OTLP Protocol: `http/protobuf`, `grpc` |
| `otlp.logs.timeout` | `OTLP_LOGS_TIMEOUT` | `10000` (from SDK) | Timeout value in milliseconds |
| `otlp.logs.headers` | `OTLP_LOGS_HEADERS` | | List of headers to apply to all outgoing logs in the form of "some-key=some-value,other-key=other-value" |
| `otlp.logs.synced` | `OTLP_LOGS_SYNCED` | `false` | Set to `true` if you want logs to be sent synchronously |
| `otlp.logs.minimumpriority` | `OTLP_LOGS_MINIMUMPRIORITY` | `""` (=`debug`) | minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `otlp.logs.checkcert` | `OTLP_LOGS_CHECKCERT` | `false` | Set if you want to skip TLS certificate validation |
| `otlp.logs.duration` | `OTLP_LOGS_DURATION` | `1000` | Artificial span duration in milliseconds (as Falco doesn't provide an ending timestamp) |
| `otlp.logs.extraenvvars` | `OTLP_LOGS_EXTRAENVVARS` | | Extra env vars (override the other settings) |
> [!NOTE]
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/)
## Example of config.yaml
```yaml
otlp:
logs:
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/logs), if not empty, OTLP Traces output is enabled
protocol: "" # OTLP protocol: http/protobuf, grpc (default: "" which uses SDK default: "http/protobuf")
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
# extraenvvars: # Extra env vars (override the other settings)
# OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: 10000
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# checkcert: true # Set if you want to skip TLS certificate validation (default: true)
```
## Additional info
> [!WARNING]
Because of the way the OTEL SDK is structured, the OTLP outputs don't appear in the metrics (Prometheus, Statsd, ...)
and the error logs just specify `OTEL` as output.

View File

@ -1,208 +0,0 @@
# OTEL Metrics
- **Category**: Metrics/Observability
- **Website**: <https://opentelemetry.io/docs/concepts/signals/metrics/>
## Table of content
- [OTEL Metrics](#otel-metrics)
- [Table of content](#table-of-content)
- [Configuration](#configuration)
- [Example of config.yaml](#example-of-configyaml)
- [Additional info](#additional-info)
- [Running a whole stack with docker-compose](#running-a-whole-stack-with-docker-compose)
- [Requirements](#requirements)
- [Configuration files](#configuration-files)
- [Run it](#run-it)
## Configuration
| Setting | Env var | Default value | Description |
| ------------------------------ | ------------------------------ | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- |
| `otlp.metrics.endpoint` | `OTLP_METRICS_ENDPOINT` | | OTLP endpoint, typically in the form http(s)://{domain or ip}:4318(/v1/metrics) |
| `otlp.metrics.protocol` | `OTLP_METRICS_PROTOCOL` | `http/protobuf` (from SDK) | OTLP Protocol: `http/protobuf`, `grpc` |
| `otlp.metrics.timeout` | `OTLP_METRICS_TIMEOUT` | `10000` (from SDK) | OTLP timeout for outgoing metrics in milliseconds |
| `otlp.metrics.headers` | `OTLP_METRICS_HEADERS` | `""` | List of headers to apply to all outgoing metrics in the form of `some-key=some-value,other-key=other-value` |
| `otlp.metrics.extraenvvars` | `OTLP_METRICS_EXTRAENVVARS` | `""` | Extra env vars (override the other settings) |
| `otlp.metrics.minimumpriority` | `OTLP_METRICS_MINIMUMPRIORITY` | `""` (=`debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `otlp.metrics.checkcert` | `OTLP_METRICS_CHECKCERT` | `true` | Set to false if you want to skip TLS certificate validation (only with https) |
| `otlp.metrics.extraattributes` | `OTLP_METRICS_EXTRAATTRIBUTES` | `""` | Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields |
> [!NOTE]
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/)
> [!WARNING]
If you use `grpc`, the endpoint format must be `http(s)://{domain or ip}:4318`
If you use `http/protobuf`, the endpoint format must be `http(s)://{domain or ip}:4318/v1/traces`
## Example of config.yaml
```yaml
otlp:
metrics:
# endpoint: "" # OTLP endpoint, typically in the form http(s)://{domain or ip}:4318(/v1/metrics), if not empty, OTLP Metrics output is enabled
# protocol: "" # OTLP protocol: http/protobuf, grpc (default: "" which uses SDK default: "http/protobuf")
# timeout: "" # OTLP timeout for outgoing metrics in milliseconds (default: "" which uses SDK default: 10000)
# headers: "" # List of headers to apply to all outgoing metrics in the form of "some-key=some-value,other-key=other-value" (default: "")
# extraenvvars: # Extra env vars (override the other settings) (default: "")
# OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: 10000
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
# minimumpriority: "" # Minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "")
# checkcert: true # Set to false if you want to skip TLS certificate validation (only with https) (default: true)
# extraattributes: "" # Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields
```
## Additional info
> [!NOTE]
This output is used to collect metrics about Falco events and Falcosidekick inputs and outputs in OTLP metrics format.
> [!WARNING]
Because of the way the OTEL SDK is structured, the OTLP outputs don't appear in the metrics (Prometheus, Statsd, ...)
and the error logs just specify `OTEL` as output.
## Running a whole stack with docker-compose
Below `docker-compose` file runs a stack of:
- `falco`
- `falcosidekick`
- `prometheus` as metrics backend
- OTEL collector to collect OTEL metrics from `falcosidekick` and let prometheus scrape them
- `events-generator` to generate arbitrary Falco events
### Requirements
A local Linux kernel capable of running `falco`--modern-bpf`, see <https://falco.org/blog/falco-modern-bpf/>.
### Configuration files
You need to create these files:
- `./docker-compose.yaml`: minimal docker-compose configuration
```yaml
---
services:
falco:
image: falcosecurity/falco:0.39.0
privileged: true
volumes:
- /var/run/docker.sock:/host/var/run/docker.sock
- /dev:/host/dev
- /proc:/host/proc:ro
- /boot:/host/boot:ro
- /lib/modules:/host/lib/modules:ro
- /usr:/host/usr:ro
- /etc/falco:/host/etc:ro
command: [
"/usr/bin/falco" ,
"-o", "json_output=true",
"-o", "http_output.enabled=true",
"-o", "http_output.url=http://sidekick:2801", # Set the HTTP output url to Falcosidekick endpoint
"-o", "http_output.insecure=true"
]
sidekick:
image: falcosidekick:latest
ports:
- "2801:2801" # Expose default port towards Falco instance
environment:
- OTLP_METRICS_ENDPOINT=http://otel-collector:4317
- OTLP_METRICS_CHECKCERT=false
otel-collector:
image: otel/opentelemetry-collector-contrib
volumes:
- ./config.yaml:/etc/otelcol-contrib/config.yaml
ports:
- "4317:4317" # Expose OTLP gRPC port
prometheus:
image: prom/prometheus:latest
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- "9090:9090" # Expose port to access Prometheus expression browser
event-generator:
image: falcosecurity/event-generator
command: run
restart: always
trigger:
image: alpine
command: [ # Alternate reads to /etc/shadow with creations of symlinks from it
"sh",
"-c",
"while true; do cat /etc/shadow > /dev/null; sleep 5; ln -s /etc/shadow shadow; rm shadow; sleep 5; done"
]
```
> `./docker-compose.yaml` mentions the `falcosidekick:latest` docker image, that must be locally available before
> bringing up the stack. You can build it from source by cloning the repository and issuing the building commands:
> ```shell
> git clone https://github.com/falcosecurity/falcosidekick.git
> cd falcosidekick
> go build . && docker build . -t falcosidekick:latest
> ```
- `./config.yaml`: minimal OTEL collector configuration
```yaml
---
receivers:
otlp:
protocols:
grpc:
endpoint: "0.0.0.0:4317"
exporters:
prometheus:
endpoint: "0.0.0.0:9090"
service:
pipelines:
metrics:
receivers: [otlp]
processors: []
exporters: [prometheus]
```
- `./prometheus.yml`: minimal prometheus configuration
```yaml
global:
scrape_interval: 5s
scrape_configs:
- job_name: 'otel-collector'
static_configs:
- targets: ['otel-collector:9090']
```
### Run it
To bring up the stack, and see the results on prometheus expression browser:
1. Bring up the stack
```shell
docker compose up
```
2. Navigate to <http://localhost:9090/graph> to start browsing the local prometheus expression browser
3. Navigate to the `Graph` tab and adjust the time interval to be comparable to the stack uptime (e.g.: 15 minutes)
5. To get information regarding the `falcosecurity_falco_rules_matches_total` metric, you can enter a simple query like
`falcosecurity_falco_rules_matches_total` or `sum by (rule) (falcosecurity_falco_rules_matches_total)` and press
`Execute`
6. Explore the obtained results
![Falco metrics view](images/otlp_metrics-prom_view.png)
1. Bring down the stack
```shell
docker compose down
```

View File

@ -11,38 +11,31 @@
- [Example of config.yaml](#example-of-configyaml)
- [Additional info](#additional-info)
- [Running a whole stack with docker-compose](#running-a-whole-stack-with-docker-compose)
- [Requirements](#requirements)
- [Configuration files](#configuration-files)
- [Run it](#run-it)
## Configuration
| Setting | Env var | Default value | Description |
| ----------------------------- | ----------------------------- | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `otlp.traces.endpoint` | `OTLP_TRACES_ENDPOINT` | | OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/traces) |
| `otlp.traces.protocol` | `OTLP_TRACES_PROTOCOL` | `http/protobuf` (from SDK) | OTLP Protocol: `http/protobuf`, `grpc` |
| `otlp.traces.timeout` | `OTLP_TRACES_TIMEOUT` | `10000` (from SDK) | Timeout value in milliseconds |
| `otlp.traces.headers` | `OTLP_TRACES_HEADERS` | | List of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" |
| `otlp.traces.synced` | `OTLP_TRACES_SYNCED` | `false` | Set to `true` if you want traces to be sent synchronously |
| `otlp.traces.minimumpriority` | `OTLP_TRACES_MINIMUMPRIORITY` | `""` (=`debug`) | minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `otlp.traces.checkcert` | `OTLP_TRACES_CHECKCERT` | `false` | Set if you want to skip TLS certificate validation |
| `otlp.traces.duration` | `OTLP_TRACES_DURATION` | `1000` | Artificial span duration in milliseconds (as Falco doesn't provide an ending timestamp) |
| `otlp.traces.extraenvvars` | `OTLP_TRACES_EXTRAENVVARS` | | Extra env vars (override the other settings) |
| Setting | Env var | Default value | Description |
| ----------------------------- | ----------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------- |
| `otlp.traces.endpoint` | `OTLP_TRACES_ENDPOINT` | | OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces |
| `otlp.traces.protocol` | `OTLP_TRACES_PROTOCOL` | `http` (from SDK) | OTLP Protocol |
| `otlp.traces.timeout` | `OTLP_TRACES_TIMEOUT` | `10000` (from SDK) | Timeout value in milliseconds |
| `otlp.traces.headers` | `OTLP_TRACES_HEADERS` | | List of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" |
| `otlp.traces.synced` | `OTLP_TRACES_SYNCED` | `false` | Set to `true` if you want traces to be sent synchronously |
| `otlp.traces.minimumpriority` | `OTLP_TRACES_MINIMUMPRIORITY` | `""` (=`debug`) | minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| `otlp.traces.checkcert` | `OTLP_TRACES_CHECKCERT` | `false` | Set if you want to skip TLS certificate validation |
| `otlp.traces.duration` | `OTLP_TRACES_DURATION` | `1000` | Artificial span duration in milliseconds (as Falco doesn't provide an ending timestamp) |
| `otlp.traces.extraenvvars` | `OTLP_TRACES_EXTRAENVVARS` | | Extra env vars (override the other settings) |
> [!NOTE]
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/)
> [!WARNING]
If you use `grpc`, the endpoint format must be `http(s)://{domain or ip}:4318`
If you use `http/protobuf`, the endpoint format must be `http(s)://{domain or ip}:4318/v1/traces`
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/):
## Example of config.yaml
```yaml
otlp:
traces:
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/traces), if not empty, OTLP Traces output is enabled
# protocol: "" # OTLP protocol: http/protobuf, grpc (default: "" which uses SDK default: "http/protobuf")
# endpoint: "" # OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces
# protocol: "" # OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json)
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
# synced: false # Set to true if you want traces to be sent synchronously (default: false)
@ -59,10 +52,6 @@ otlp:
> [!NOTE]
The OTLP Traces are only available for the source: `syscalls`.
> [!WARNING]
Because of the way the OTEL SDK is structured, the OTLP outputs don't appear in the metrics (Prometheus, Statsd, ...)
and the error logs just specify `OTEL` as output.
## Running a whole stack with docker-compose
Below `docker-compose` file runs a stack of:

View File

@ -15,7 +15,7 @@
| Setting | Env var | Default value | Description |
| ------------------------------- | ------------------------------- | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
| `quickwit.hostport` | `QUICKWIT_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Quickwit output is **enabled** |
| `quickwit.hosport` | `QUICKWIT_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Quickwit output is **enabled** |
| `quickwit.apiendpoint` | `QUICKWIT_APIENDPOINT` | `api/v1` | API endpoint (containing the API version, overideable in case of quickwit behind a reverse proxy with URL rewriting) |
| `quickwit.index` | `QUICKWIT_INDEX` | `falco` | Index |
| `quickwit.version` | `QUICKWIT_VERSION` | `0.7` | Version of quickwit |

View File

@ -14,14 +14,13 @@
## Configuration
| Setting | Env var | Default value | Description |
| ---------------------- | ---------------------- | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `stan.hostport` | `STAN_HOSTPORT` | | stan://{domain or ip}:{port}, if not empty, STAN output is **enabled** |
| `stan.subjecttemplate` | `STAN_SUBJECTTEMPLATE` | `falco.<priority>.<rule>` | Template for the subject, tokens <priority> and <rule> will be automatically replaced |
| `stan.clusterid` | `STAN_CLUSTERID` | | Cluster name (mandatory) |
| `stan.clientid` | `STAN_CLIENTID` | | Client ID (mandatory) |
| `stan.checkcert` | `STAN_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
| `stan.minimumpriority` | `STAN_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
| Setting | Env var | Default value | Description |
| ---------------------- | ---------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `stan.hostport` | `STAN_HOSTPORT` | | stan://{domain or ip}:{port}, if not empty, STAN output is **enabled** |
| `stan.clusterid` | `STAN_CLUSTERID` | | Cluster name (mandatory) |
| `stan.clientid` | `STAN_CLIENTID` | | Client ID (mandatory) |
| `stan.checkcert` | `STAN_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
| `stan.minimumpriority` | `STAN_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
> [!NOTE]
The Env var values override the settings from yaml file.
@ -33,7 +32,6 @@ stan:
hostport: "" # stan://{domain or ip}:{port}, if not empty, STAN output is enabled
clusterid: "" # Cluster name (mandatory)
clientid: "" # Client ID (mandatory)
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
# checkcert: true # check if ssl certificate of the output is valid (default: true)

View File

@ -18,7 +18,6 @@
| -------------------------- | -------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `telegram.chatid` | `TELEGRAM_CHATID` | | Telegram Identifier of the shared chat, if not empty, Telegram is **enabled** |
| `telegram.token` | `TELEGRAM_TOKEN` | | Telegram bot authentication token |
`telegram.message_thread_id` | `TELEGRAM_MESSAGE_THREAD_ID` | | Telegram individual chats within the group
| `telegram.minimumpriority` | `TELEGRAM_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
> [!NOTE]
@ -30,7 +29,6 @@ The Env var values override the settings from yaml file.
telegram:
chatid: "" # Telegram Identifier of the shared chat, if not empty, Telegram is enabled
token: "" # Telegram bot authentication token
# message_thread_id: "" # Telegram individual chats within the group
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
```

View File

@ -1,35 +0,0 @@
# Webex
- **Category**: Chat/Messaging
- **Website**: https://webex.com
## Table of content
- [Teams](#teams)
- [Table of content](#table-of-content)
- [Configuration](#configuration)
- [Example of config.yaml](#example-of-configyaml)
- [Screenshots](#screenshots)
## Configuration
| Setting | Env var | Default value | Description |
| ----------------------- | ----------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `webex.webhookurl` | `WEBEX_WEBHOOKURL` | | Teams WebhookURL, if not empty, Webex output is **enabled** |
| `webex.minimumpriority` | `WEBEX_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
> [!NOTE]
The Env var values override the settings from yaml file.
## Example of config.yaml
```yaml
webex:
webhookurl: "" # Webex WebhookURL, if not empty, Webex output is enabled
# minimumpriority: "debug" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
```
## Screenshots
![webex example](images/webex.png)

237
go.mod
View File

@ -1,211 +1,160 @@
module github.com/falcosecurity/falcosidekick
go 1.23.0
toolchain go1.23.4
go 1.22.0
require (
cloud.google.com/go/functions v1.19.6
cloud.google.com/go/pubsub v1.49.0
cloud.google.com/go/storage v1.56.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.3.2
cloud.google.com/go/functions v1.16.3
cloud.google.com/go/pubsub v1.40.0
cloud.google.com/go/storage v1.42.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1
github.com/DataDog/datadog-go v4.8.3+incompatible
github.com/PagerDuty/go-pagerduty v1.8.0
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/aws/aws-sdk-go-v2 v1.37.1
github.com/aws/aws-sdk-go-v2/config v1.25.3
github.com/aws/aws-sdk-go-v2/credentials v1.16.2
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.47.3
github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.4
github.com/aws/aws-sdk-go-v2/service/lambda v1.74.1
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
github.com/aws/aws-sdk-go-v2/service/sns v1.17.4
github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19
github.com/aws/aws-sdk-go v1.54.11
github.com/cloudevents/sdk-go/v2 v2.15.2
github.com/eclipse/paho.mqtt.golang v1.5.0
github.com/eclipse/paho.mqtt.golang v1.4.3
github.com/embano1/memlog v0.4.6
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6
github.com/emersion/go-smtp v0.22.0
github.com/google/go-cmp v0.7.0
github.com/emersion/go-sasl v0.0.0-20231106173351-e73c9f7bad43
github.com/emersion/go-smtp v0.21.2
github.com/google/uuid v1.6.0
github.com/googleapis/gax-go/v2 v2.15.0
github.com/jackc/pgx/v5 v5.7.5
github.com/nats-io/nats.go v1.39.1
github.com/googleapis/gax-go/v2 v2.12.5
github.com/jackc/pgx/v5 v5.6.0
github.com/nats-io/nats.go v1.36.0
github.com/nats-io/stan.go v0.10.4
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_golang v1.19.1
github.com/rabbitmq/amqp091-go v1.10.0
github.com/redis/go-redis/v9 v9.8.0
github.com/segmentio/kafka-go v0.4.48
github.com/redis/go-redis/v9 v9.5.3
github.com/segmentio/kafka-go v0.4.47
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
github.com/stretchr/testify v1.9.0
github.com/wavefronthq/wavefront-sdk-go v0.15.0
github.com/xitongsys/parquet-go v1.6.2
github.com/xitongsys/parquet-go-source v0.0.0-20241021075129-b732d2ac9c9b
go.opentelemetry.io/contrib/bridges/otelslog v0.10.0
go.opentelemetry.io/otel v1.37.0
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0
go.opentelemetry.io/otel/metric v1.37.0
go.opentelemetry.io/otel/sdk v1.37.0
go.opentelemetry.io/otel/sdk/log v0.13.0
go.opentelemetry.io/otel/sdk/metric v1.36.0
go.opentelemetry.io/otel/trace v1.37.0
golang.org/x/oauth2 v0.30.0
golang.org/x/sync v0.16.0
golang.org/x/text v0.27.0
google.golang.org/api v0.243.0
google.golang.org/genproto v0.0.0-20250603155806-513f23925822
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/client-go v0.32.3
github.com/xitongsys/parquet-go-source v0.0.0-20240122235623-d6294584ab18
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
go.opentelemetry.io/otel/sdk v1.27.0
go.opentelemetry.io/otel/trace v1.27.0
golang.org/x/oauth2 v0.21.0
golang.org/x/text v0.16.0
google.golang.org/api v0.186.0
google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4
k8s.io/api v0.30.2
k8s.io/apimachinery v0.30.2
k8s.io/client-go v0.30.2
sigs.k8s.io/wg-policy-prototypes v0.0.0-20240327135653-0fc2ddc5d3e3
)
require (
cel.dev/expr v0.24.0 // indirect
cloud.google.com/go v0.121.4 // indirect
cloud.google.com/go/auth v0.16.3 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/longrunning v0.6.7 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/go-amqp v1.4.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
cloud.google.com/go v0.115.0 // indirect
cloud.google.com/go/auth v0.6.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.8 // indirect
cloud.google.com/go/longrunning v0.5.7 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/Azure/go-amqp v1.0.5 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect
github.com/apache/thrift v0.21.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0 // indirect
github.com/aws/smithy-go v1.22.5 // indirect
github.com/apache/thrift v0.20.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/caio/go-tdigest/v4 v4.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/magiconair/properties v1.8.9 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats-server/v2 v2.10.27 // indirect
github.com/nats-io/nats-streaming-server v0.24.6 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nats-server/v2 v2.9.23 // indirect
github.com/nats-io/nats-streaming-server v0.24.3 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/prometheus/common v0.53.0 // indirect
github.com/prometheus/procfs v0.14.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/telkomdev/go-stash v1.0.6
github.com/x448/float16 v0.8.4 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
github.com/zeebo/errs v1.4.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/log v0.13.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 // indirect
google.golang.org/grpc v1.74.2 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
sigs.k8s.io/controller-runtime v0.20.1 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 // indirect
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 // indirect
sigs.k8s.io/controller-runtime v0.17.3 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

543
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -7,20 +7,17 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/falcosecurity/falcosidekick/types"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"golang.org/x/text/cases"
"golang.org/x/text/language"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
const (
@ -38,8 +35,6 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Please send a valid request body", http.StatusBadRequest)
stats.Requests.Add("rejected", 1)
promStats.Inputs.With(map[string]string{"source": "requests", "status": "rejected"}).Inc()
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
attribute.String("status", "rejected")).Inc()
nullClient.CountMetric("inputs.requests.rejected", 1, []string{"error:nobody"})
return
@ -49,8 +44,6 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Please send with post http method", http.StatusBadRequest)
stats.Requests.Add("rejected", 1)
promStats.Inputs.With(map[string]string{"source": "requests", "status": "rejected"}).Inc()
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
attribute.String("status", "rejected")).Inc()
nullClient.CountMetric("inputs.requests.rejected", 1, []string{"error:nobody"})
return
@ -61,8 +54,6 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Please send a valid request body", http.StatusBadRequest)
stats.Requests.Add("rejected", 1)
promStats.Inputs.With(map[string]string{"source": "requests", "status": "rejected"}).Inc()
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
attribute.String("status", "rejected")).Inc()
nullClient.CountMetric("inputs.requests.rejected", 1, []string{"error:invalidjson"})
return
@ -71,8 +62,6 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
nullClient.CountMetric("inputs.requests.accepted", 1, []string{})
stats.Requests.Add("accepted", 1)
promStats.Inputs.With(map[string]string{"source": "requests", "status": "accepted"}).Inc()
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
attribute.String("status", "accepted")).Inc()
forwardEvent(falcopayload)
}
@ -91,7 +80,7 @@ func healthHandler(w http.ResponseWriter, r *http.Request) {
// testHandler sends a test event to all enabled outputs.
func testHandler(w http.ResponseWriter, r *http.Request) {
r.Body = io.NopCloser(bytes.NewReader([]byte(`{"output":"This is a test from falcosidekick","source":"debug","priority":"Debug","hostname":"falcosidekick", "rule":"Test rule","time":"` + time.Now().UTC().Format(time.RFC3339) + `","output_fields":{"proc.name":"falcosidekick","user.name":"falcosidekick"},"tags":["test","example"]}`)))
r.Body = io.NopCloser(bytes.NewReader([]byte(`{"output":"This is a test from falcosidekick","priority":"Debug","hostname": "falcosidekick", "rule":"Test rule", "time":"` + time.Now().UTC().Format(time.RFC3339) + `","output_fields": {"proc.name":"falcosidekick","user.name":"falcosidekick"}, "tags":["test","example"]}`)))
mainHandler(w, r)
}
@ -117,8 +106,6 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
}
}
falcopayload.Tags = append(falcopayload.Tags, config.Customtags...)
if falcopayload.Rule == "Test rule" {
falcopayload.Source = "internal"
}
@ -149,12 +136,12 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
for key, value := range config.Templatedfields {
tmpl, err := template.New("").Parse(value)
if err != nil {
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Parsing error for templated field '%v': %v", key, err))
log.Printf("[ERROR] : Parsing error for templated field '%v': %v\n", key, err)
continue
}
v := new(bytes.Buffer)
if err := tmpl.Execute(v, falcopayload.OutputFields); err != nil {
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Parsing error for templated field '%v': %v", key, err))
log.Printf("[ERROR] : Parsing error for templated field '%v': %v\n", key, err)
}
templatedFields += key + "=" + v.String() + " "
falcopayload.OutputFields[key] = v.String()
@ -167,14 +154,7 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
nullClient.CountMetric("falco.accepted", 1, []string{"priority:" + falcopayload.Priority.String()})
stats.Falco.Add(strings.ToLower(falcopayload.Priority.String()), 1)
promLabels := map[string]string{
"rule": falcopayload.Rule,
"priority_raw": strings.ToLower(falcopayload.Priority.String()),
"priority": strconv.Itoa(int(falcopayload.Priority)),
"source": falcopayload.Source,
"k8s_ns_name": kn,
"k8s_pod_name": kp,
}
promLabels := map[string]string{"rule": falcopayload.Rule, "priority": falcopayload.Priority.String(), "source": falcopayload.Source, "k8s_ns_name": kn, "k8s_pod_name": kp}
if falcopayload.Hostname != "" {
promLabels["hostname"] = falcopayload.Hostname
} else {
@ -186,11 +166,6 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
promLabels[key] = value
}
}
for key := range config.Templatedfields {
if regPromLabels.MatchString(strings.ReplaceAll(key, ".", "_")) {
promLabels[key] = fmt.Sprintf("%v", falcopayload.OutputFields[key])
}
}
for _, i := range config.Prometheus.ExtraLabelsList {
promLabels[strings.ReplaceAll(i, ".", "_")] = ""
for key, value := range falcopayload.OutputFields {
@ -206,43 +181,6 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
}
promStats.Falco.With(promLabels).Inc()
// Falco OTLP metric
hostname := falcopayload.Hostname
if hostname == "" {
hostname = "unknown"
}
attrs := []attribute.KeyValue{
attribute.String("source", falcopayload.Source),
attribute.String("priority", falcopayload.Priority.String()),
attribute.String("rule", falcopayload.Rule),
attribute.String("hostname", hostname),
attribute.StringSlice("tags", falcopayload.Tags),
}
for key, value := range config.Customfields {
if regOTLPMetricsAttributes.MatchString(key) {
attrs = append(attrs, attribute.String(key, value))
}
}
for _, attr := range config.OTLP.Metrics.ExtraAttributesList {
attrName := strings.ReplaceAll(attr, ".", "_")
attrValue := ""
for key, val := range falcopayload.OutputFields {
if key != attr {
continue
}
if keyName := strings.ReplaceAll(key, ".", "_"); !regOTLPMetricsAttributes.MatchString(keyName) {
continue
}
// Notice: Don't remove the _ for the second return value, otherwise will panic if it can convert the value
// to string
attrValue, _ = val.(string)
break
}
attrs = append(attrs, attribute.String(attrName, attrValue))
}
otlpMetrics.Falco.With(attrs...).Inc()
if config.BracketReplacer != "" {
for i, j := range falcopayload.OutputFields {
if strings.Contains(i, "[") {
@ -264,7 +202,6 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
n = strings.ReplaceAll(n, "<output>", o)
n = strings.ReplaceAll(n, "<custom_fields>", strings.TrimSuffix(customFields, " "))
n = strings.ReplaceAll(n, "<templated_fields>", strings.TrimSuffix(templatedFields, " "))
n = strings.ReplaceAll(n, "<tags>", strings.Join(falcopayload.Tags, ","))
n = strings.TrimSuffix(n, " ")
n = strings.TrimSuffix(n, "( )")
n = strings.TrimSuffix(n, "()")
@ -275,9 +212,9 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
if len(falcopayload.String()) > 4096 {
for i, j := range falcopayload.OutputFields {
switch l := j.(type) {
switch j.(type) {
case string:
if len(l) > 512 {
if len(j.(string)) > 512 {
k := j.(string)[:507] + "[...]"
falcopayload.Output = strings.ReplaceAll(falcopayload.Output, j.(string), k)
falcopayload.OutputFields[i] = k
@ -287,7 +224,7 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
}
if config.Debug {
utils.Log(utils.DebugLvl, "", fmt.Sprintf("Falco's payload : %v", falcopayload.String()))
log.Printf("[DEBUG] : Falco's payload : %v\n", falcopayload.String())
}
return falcopayload, nil
@ -314,26 +251,16 @@ func forwardEvent(falcopayload types.FalcoPayload) {
go teamsClient.TeamsPost(falcopayload)
}
if config.Webex.WebhookURL != "" && (falcopayload.Priority >= types.Priority(config.Webex.MinimumPriority) || falcopayload.Rule == testRule) {
go webexClient.WebexPost(falcopayload)
}
if config.Datadog.APIKey != "" && (falcopayload.Priority >= types.Priority(config.Datadog.MinimumPriority) || falcopayload.Rule == testRule) {
go datadogClient.DatadogPost(falcopayload)
}
if config.DatadogLogs.APIKey != "" && (falcopayload.Priority >= types.Priority(config.DatadogLogs.MinimumPriority) || falcopayload.Rule == testRule) {
go datadogLogsClient.DatadogLogsPost(falcopayload)
}
if config.Discord.WebhookURL != "" && (falcopayload.Priority >= types.Priority(config.Discord.MinimumPriority) || falcopayload.Rule == testRule) {
go discordClient.DiscordPost(falcopayload)
}
if len(config.Alertmanager.HostPort) != 0 && (falcopayload.Priority >= types.Priority(config.Alertmanager.MinimumPriority) || falcopayload.Rule == testRule) {
for _, i := range alertmanagerClients {
go i.AlertmanagerPost(falcopayload)
}
if config.Alertmanager.HostPort != "" && (falcopayload.Priority >= types.Priority(config.Alertmanager.MinimumPriority) || falcopayload.Rule == testRule) {
go alertmanagerClient.AlertmanagerPost(falcopayload)
}
if config.Elasticsearch.HostPort != "" && (falcopayload.Priority >= types.Priority(config.Elasticsearch.MinimumPriority) || falcopayload.Rule == testRule) {
@ -542,18 +469,10 @@ func forwardEvent(falcopayload types.FalcoPayload) {
}
if config.OTLP.Traces.Endpoint != "" && (falcopayload.Priority >= types.Priority(config.OTLP.Traces.MinimumPriority)) && (falcopayload.Source == syscall || falcopayload.Source == syscalls) {
go otlpTracesClient.OTLPTracesPost(falcopayload)
}
if config.OTLP.Logs.Endpoint != "" && (falcopayload.Priority >= types.Priority(config.OTLP.Logs.MinimumPriority)) {
go otlpLogsClient.OTLPLogsPost(falcopayload)
go otlpClient.OTLPTracesPost(falcopayload)
}
if config.Talon.Address != "" && (falcopayload.Priority >= types.Priority(config.Talon.MinimumPriority) || falcopayload.Rule == testRule) {
go talonClient.TalonPost(falcopayload)
}
if config.Logstash.Address != "" && (falcopayload.Priority >= types.Priority(config.Logstash.MinimumPriority) || falcopayload.Rule == testRule) {
go logstashClient.LogstashPost(falcopayload)
}
}

View File

@ -1,132 +0,0 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
package batcher
import (
"bytes"
"encoding/json"
"sync"
"time"
"github.com/falcosecurity/falcosidekick/types"
)
const (
defaultBatchSize = 5 * 1024 * 1024 // max batch size in bytes, 5MB by default
defaultFlushInterval = time.Second
)
type CallbackFunc func(falcoPayloads []types.FalcoPayload, serialized []byte)
type OptionFunc func(b *Batcher)
// MarshalFunc is a callback that allows the user of the batcher to overwrite the default JSON marshalling
type MarshalFunc func(payload types.FalcoPayload) ([]byte, error)
// Batcher A simple generic implementation of Falco payloads batching
// Batching can be configured by the batchSize which is a max number of payloads in the batch or the flushInterval.
// The callback function is called when the number of payloads reaches the batchSize or upon the flushInterval
type Batcher struct {
batchSize int
flushInterval time.Duration
callbackFn CallbackFunc
marshalFn MarshalFunc
mx sync.Mutex
pending bytes.Buffer
// Keeping the original payloads for errors resolution
pendingPayloads []types.FalcoPayload
curTimer *time.Timer
}
func New(opts ...OptionFunc) *Batcher {
b := &Batcher{
batchSize: defaultBatchSize,
flushInterval: defaultFlushInterval,
callbackFn: func(falcoPayloads []types.FalcoPayload, batch []byte) {},
marshalFn: jsonMarshal,
}
for _, opt := range opts {
opt(b)
}
return b
}
func WithBatchSize(sz int) OptionFunc {
return func(b *Batcher) {
b.batchSize = sz
}
}
func WithFlushInterval(interval time.Duration) OptionFunc {
return func(b *Batcher) {
b.flushInterval = interval
}
}
func WithCallback(cb CallbackFunc) OptionFunc {
return func(b *Batcher) {
b.callbackFn = cb
}
}
func WithMarshal(fn MarshalFunc) OptionFunc {
return func(b *Batcher) {
b.marshalFn = fn
}
}
func (b *Batcher) Push(falcopayload types.FalcoPayload) error {
b.mx.Lock()
defer b.mx.Unlock()
data, err := b.marshalFn(falcopayload)
if err != nil {
return err
}
if b.pending.Len() == 0 {
b.scheduleFlushInterval()
} else if b.pending.Len()+len(data) > b.batchSize {
b.flush()
b.scheduleFlushInterval()
}
_, _ = b.pending.Write(data)
b.pendingPayloads = append(b.pendingPayloads, falcopayload)
return nil
}
func (b *Batcher) scheduleFlushInterval() {
if b.curTimer != nil {
b.curTimer.Stop()
}
b.curTimer = time.AfterFunc(b.flushInterval, b.flushOnTimer)
}
func (b *Batcher) flushOnTimer() {
b.mx.Lock()
defer b.mx.Unlock()
b.flush()
}
func (b *Batcher) flush() {
if b.pending.Len() == 0 {
return
}
serialized := b.pending.Bytes()
falcoPayloads := b.pendingPayloads
b.pending = bytes.Buffer{}
b.pendingPayloads = nil
b.callbackFn(falcoPayloads, serialized)
}
// jsonMarshal default marshal function
func jsonMarshal(payload types.FalcoPayload) ([]byte, error) {
return json.Marshal(payload)
}

View File

@ -1,79 +0,0 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
package batcher
import (
"encoding/json"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/uuid"
"github.com/falcosecurity/falcosidekick/types"
)
func TestElasticsearchBatcher(t *testing.T) {
const (
batchSize = 1234
testCount = 100
flushInterval = 300 * time.Millisecond
)
// Just to emulated similar payload for testing, not strictly needed
type eSPayload struct {
types.FalcoPayload
Timestamp time.Time `json:"@timestamp"`
}
marshalFunc := func(payload types.FalcoPayload) ([]byte, error) {
return json.Marshal(eSPayload{FalcoPayload: payload, Timestamp: payload.Time})
}
var wantBatches, gotBatches [][]byte
var mx sync.Mutex
batcher := New(
WithBatchSize(batchSize),
WithFlushInterval(500*time.Millisecond),
WithMarshal(marshalFunc),
WithCallback(func(falcoPayloads []types.FalcoPayload, data []byte) {
mx.Lock()
defer mx.Unlock()
gotBatches = append(gotBatches, data)
}))
var currentBatch []byte
for i := 0; i < testCount; i++ {
payload := types.FalcoPayload{UUID: uuid.Must(uuid.NewV7()).String()}
data, err := marshalFunc(payload)
if err != nil {
t.Fatal(err)
}
if len(currentBatch)+len(data) > batchSize {
wantBatches = append(wantBatches, currentBatch)
currentBatch = nil
}
currentBatch = append(currentBatch, data...)
err = batcher.Push(payload)
if err != nil {
t.Fatal(err)
}
}
wantBatches = append(wantBatches, currentBatch)
// give it time to flush
time.Sleep(flushInterval * 2)
mx.Lock()
defer mx.Unlock()
diff := cmp.Diff(wantBatches, gotBatches)
if diff != "" {
t.Fatal(diff)
}
}

View File

@ -1,37 +0,0 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
package utils
import "log"
const (
InfoLvl string = "info"
InfoPrefix string = "[INFO] "
ErrorLvl string = "error"
ErrorPrefix string = "[ERROR]"
DebugLvl string = "debug"
DebugPrefix string = "[DEBUG]"
WarningLvl string = "warning"
WarningPrefix string = "[WARN] "
FatalLvl string = "fatal"
FatalPrefix string = "[FATAL]"
)
func Log(level, output, msg string) {
var prefix string
switch level {
case InfoLvl:
prefix = InfoPrefix
case ErrorLvl:
prefix = ErrorPrefix
case DebugLvl:
prefix = DebugPrefix
case WarningLvl:
prefix = WarningPrefix
}
if output != "" {
log.Printf("%v : %v - %v", prefix, output, msg)
} else {
log.Printf("%v : %v", prefix, msg)
}
}

260
main.go
View File

@ -7,6 +7,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"log"
"net/http"
"net/url"
"os"
@ -16,11 +17,10 @@ import (
"github.com/DataDog/datadog-go/statsd"
"github.com/embano1/memlog"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/outputs"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
@ -32,11 +32,9 @@ var (
rocketchatClient *outputs.Client
mattermostClient *outputs.Client
teamsClient *outputs.Client
webexClient *outputs.Client
datadogClient *outputs.Client
datadogLogsClient *outputs.Client
discordClient *outputs.Client
alertmanagerClients []*outputs.Client
alertmanagerClient *outputs.Client
elasticsearchClient *outputs.Client
quickwitClient *outputs.Client
influxdbClient *outputs.Client
@ -79,27 +77,21 @@ var (
n8nClient *outputs.Client
openObserveClient *outputs.Client
dynatraceClient *outputs.Client
otlpTracesClient *outputs.Client
otlpLogsClient *outputs.Client
otlpClient *outputs.Client
talonClient *outputs.Client
logstashClient *outputs.Client
statsdClient, dogstatsdClient *statsd.Client
config *types.Configuration
stats *types.Statistics
promStats *types.PromStatistics
otlpMetrics *otlpmetrics.OTLPMetrics
initClientArgs *types.InitClientArgs
regPromLabels *regexp.Regexp
regOTLPMetricsAttributes *regexp.Regexp
regOutputFormat *regexp.Regexp
shutDownFuncs []func()
regPromLabels *regexp.Regexp
regOutputFormat *regexp.Regexp
shutDownFuncs []func()
)
func init() {
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick version: %s", GetVersionInfo().GitVersion))
// detect unit testing and skip init.
// see: https://github.com/alecthomas/kingpin/issues/187
testing := (strings.HasSuffix(os.Args[0], ".test") ||
@ -109,21 +101,17 @@ func init() {
}
regPromLabels, _ = regexp.Compile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
// TODO: replace the following regex if something more appropriate is found
regOTLPMetricsAttributes = regexp.MustCompile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
regOutputFormat, _ = regexp.Compile(`(?i)[0-9:]+\.[0-9]+: (Debug|Informational|Notice|Warning|Error|Critical|Alert|Emergency) .*`)
config = getConfig()
stats = getInitStats()
promStats = getInitPromStats(config)
otlpMetrics = newOTLPMetrics(config)
nullClient = &outputs.Client{
OutputType: "null",
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}
@ -133,7 +121,6 @@ func init() {
Stats: stats,
DogstatsdClient: dogstatsdClient,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
}
if config.Statsd.Forwarder != "" {
@ -160,7 +147,7 @@ func init() {
if config.Slack.WebhookURL != "" {
var err error
slackClient, err = outputs.NewClient("Slack", config.Slack.WebhookURL, config.Slack.CommonConfig, *initClientArgs)
slackClient, err = outputs.NewClient("Slack", config.Slack.WebhookURL, config.Slack.MutualTLS, config.Slack.CheckCert, *initClientArgs)
if err != nil {
config.Slack.WebhookURL = ""
} else {
@ -170,7 +157,7 @@ func init() {
if config.Cliq.WebhookURL != "" {
var err error
cliqClient, err = outputs.NewClient("Cliq", config.Cliq.WebhookURL, config.Cliq.CommonConfig, *initClientArgs)
cliqClient, err = outputs.NewClient("Cliq", config.Cliq.WebhookURL, config.Cliq.MutualTLS, config.Cliq.CheckCert, *initClientArgs)
if err != nil {
config.Cliq.WebhookURL = ""
} else {
@ -180,7 +167,7 @@ func init() {
if config.Rocketchat.WebhookURL != "" {
var err error
rocketchatClient, err = outputs.NewClient("Rocketchat", config.Rocketchat.WebhookURL, config.Rocketchat.CommonConfig, *initClientArgs)
rocketchatClient, err = outputs.NewClient("Rocketchat", config.Rocketchat.WebhookURL, config.Rocketchat.MutualTLS, config.Rocketchat.CheckCert, *initClientArgs)
if err != nil {
config.Rocketchat.WebhookURL = ""
} else {
@ -190,7 +177,7 @@ func init() {
if config.Mattermost.WebhookURL != "" {
var err error
mattermostClient, err = outputs.NewClient("Mattermost", config.Mattermost.WebhookURL, config.Mattermost.CommonConfig, *initClientArgs)
mattermostClient, err = outputs.NewClient("Mattermost", config.Mattermost.WebhookURL, config.Mattermost.MutualTLS, config.Mattermost.CheckCert, *initClientArgs)
if err != nil {
config.Mattermost.WebhookURL = ""
} else {
@ -200,7 +187,7 @@ func init() {
if config.Teams.WebhookURL != "" {
var err error
teamsClient, err = outputs.NewClient("Teams", config.Teams.WebhookURL, config.Teams.CommonConfig, *initClientArgs)
teamsClient, err = outputs.NewClient("Teams", config.Teams.WebhookURL, config.Teams.MutualTLS, config.Teams.CheckCert, *initClientArgs)
if err != nil {
config.Teams.WebhookURL = ""
} else {
@ -208,20 +195,10 @@ func init() {
}
}
if config.Webex.WebhookURL != "" {
var err error
webexClient, err = outputs.NewClient("Webex", config.Webex.WebhookURL, config.Webex.CommonConfig, *initClientArgs)
if err != nil {
config.Webex.WebhookURL = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Webex")
}
}
if config.Datadog.APIKey != "" {
var err error
endpointUrl := fmt.Sprintf("%s?api_key=%s", config.Datadog.Host+outputs.DatadogPath, config.Datadog.APIKey)
datadogClient, err = outputs.NewClient("Datadog", endpointUrl, config.Datadog.CommonConfig, *initClientArgs)
datadogClient, err = outputs.NewClient("Datadog", endpointUrl, config.Datadog.MutualTLS, config.Datadog.CheckCert, *initClientArgs)
if err != nil {
config.Datadog.APIKey = ""
} else {
@ -229,20 +206,9 @@ func init() {
}
}
if config.DatadogLogs.APIKey != "" {
var err error
endpointUrl := config.DatadogLogs.Host + outputs.DatadogLogsPath
datadogLogsClient, err = outputs.NewClient("DatadogLogs", endpointUrl, config.DatadogLogs.CommonConfig, *initClientArgs)
if err != nil {
config.DatadogLogs.APIKey = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "DatadogLogs")
}
}
if config.Discord.WebhookURL != "" {
var err error
discordClient, err = outputs.NewClient("Discord", config.Discord.WebhookURL, config.Discord.CommonConfig, *initClientArgs)
discordClient, err = outputs.NewClient("Discord", config.Discord.WebhookURL, config.Discord.MutualTLS, config.Discord.CheckCert, *initClientArgs)
if err != nil {
config.Discord.WebhookURL = ""
} else {
@ -250,11 +216,12 @@ func init() {
}
}
if len(config.Alertmanager.HostPort) != 0 {
if config.Alertmanager.HostPort != "" {
var err error
alertmanagerClients, err = outputs.NewAlertManagerClient(config.Alertmanager.HostPort, config.Alertmanager.Endpoint, config.Alertmanager.CommonConfig, *initClientArgs)
endpointUrl := fmt.Sprintf("%s%s", config.Alertmanager.HostPort, config.Alertmanager.Endpoint)
alertmanagerClient, err = outputs.NewClient("AlertManager", endpointUrl, config.Alertmanager.MutualTLS, config.Alertmanager.CheckCert, *initClientArgs)
if err != nil {
config.Alertmanager.HostPort = []string{}
config.Alertmanager.HostPort = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "AlertManager")
}
@ -262,7 +229,8 @@ func init() {
if config.Elasticsearch.HostPort != "" {
var err error
elasticsearchClient, err = outputs.NewElasticsearchClient(*initClientArgs)
endpointUrl := fmt.Sprintf("%s/%s/%s", config.Elasticsearch.HostPort, config.Elasticsearch.Index, config.Elasticsearch.Type)
elasticsearchClient, err = outputs.NewClient("Elasticsearch", endpointUrl, config.Elasticsearch.MutualTLS, config.Elasticsearch.CheckCert, *initClientArgs)
if err != nil {
config.Elasticsearch.HostPort = ""
} else {
@ -283,7 +251,7 @@ func init() {
var err error
endpointUrl := fmt.Sprintf("%s/%s/%s/ingest", config.Quickwit.HostPort, config.Quickwit.ApiEndpoint, config.Quickwit.Index)
quickwitClient, err = outputs.NewClient("Quickwit", endpointUrl, config.Quickwit.CommonConfig, *initClientArgs)
quickwitClient, err = outputs.NewClient("Quickwit", endpointUrl, config.Quickwit.MutualTLS, config.Quickwit.CheckCert, *initClientArgs)
if err == nil && config.Quickwit.AutoCreateIndex {
err = quickwitClient.AutoCreateQuickwitIndex(*initClientArgs)
}
@ -297,7 +265,7 @@ func init() {
if config.Loki.HostPort != "" {
var err error
lokiClient, err = outputs.NewClient("Loki", config.Loki.HostPort+config.Loki.Endpoint, config.Loki.CommonConfig, *initClientArgs)
lokiClient, err = outputs.NewClient("Loki", config.Loki.HostPort+config.Loki.Endpoint, config.Loki.MutualTLS, config.Loki.CheckCert, *initClientArgs)
if err != nil {
config.Loki.HostPort = ""
} else {
@ -307,7 +275,7 @@ func init() {
if config.SumoLogic.ReceiverURL != "" {
var err error
sumologicClient, err = outputs.NewClient("SumoLogic", config.SumoLogic.ReceiverURL, config.SumoLogic.CommonConfig, *initClientArgs)
sumologicClient, err = outputs.NewClient("SumoLogic", config.SumoLogic.ReceiverURL, false, config.SumoLogic.CheckCert, *initClientArgs)
if err != nil {
config.SumoLogic.ReceiverURL = ""
} else {
@ -317,7 +285,7 @@ func init() {
if config.Nats.HostPort != "" {
var err error
natsClient, err = outputs.NewClient("NATS", config.Nats.HostPort, config.Nats.CommonConfig, *initClientArgs)
natsClient, err = outputs.NewClient("NATS", config.Nats.HostPort, config.Nats.MutualTLS, config.Nats.CheckCert, *initClientArgs)
if err != nil {
config.Nats.HostPort = ""
} else {
@ -327,7 +295,7 @@ func init() {
if config.Stan.HostPort != "" && config.Stan.ClusterID != "" && config.Stan.ClientID != "" {
var err error
stanClient, err = outputs.NewClient("STAN", config.Stan.HostPort, config.Stan.CommonConfig, *initClientArgs)
stanClient, err = outputs.NewClient("STAN", config.Stan.HostPort, config.Stan.MutualTLS, config.Stan.CheckCert, *initClientArgs)
if err != nil {
config.Stan.HostPort = ""
config.Stan.ClusterID = ""
@ -352,7 +320,7 @@ func init() {
}
var err error
influxdbClient, err = outputs.NewClient("Influxdb", url, config.Influxdb.CommonConfig, *initClientArgs)
influxdbClient, err = outputs.NewClient("Influxdb", url, config.Influxdb.MutualTLS, config.Influxdb.CheckCert, *initClientArgs)
if err != nil {
config.Influxdb.HostPort = ""
} else {
@ -364,7 +332,7 @@ func init() {
config.AWS.SNS.TopicArn != "" || config.AWS.CloudWatchLogs.LogGroup != "" || config.AWS.S3.Bucket != "" ||
config.AWS.Kinesis.StreamName != "" || (config.AWS.SecurityLake.Bucket != "" && config.AWS.SecurityLake.Region != "" && config.AWS.SecurityLake.AccountID != "") {
var err error
awsClient, err = outputs.NewAWSClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
awsClient, err = outputs.NewAWSClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.AWS.AccessKeyID = ""
config.AWS.SecretAccessKey = ""
@ -420,7 +388,7 @@ func init() {
if config.SMTP.HostPort != "" && config.SMTP.From != "" && config.SMTP.To != "" {
var err error
smtpClient, err = outputs.NewSMTPClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
smtpClient, err = outputs.NewSMTPClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.SMTP.HostPort = ""
} else {
@ -434,7 +402,7 @@ func init() {
if strings.ToLower(config.Opsgenie.Region) == "eu" {
url = "https://api.eu.opsgenie.com/v2/alerts"
}
opsgenieClient, err = outputs.NewClient("Opsgenie", url, config.Opsgenie.CommonConfig, *initClientArgs)
opsgenieClient, err = outputs.NewClient("Opsgenie", url, config.Opsgenie.MutualTLS, config.Opsgenie.CheckCert, *initClientArgs)
if err != nil {
config.Opsgenie.APIKey = ""
} else {
@ -444,7 +412,7 @@ func init() {
if config.Webhook.Address != "" {
var err error
webhookClient, err = outputs.NewClient("Webhook", config.Webhook.Address, config.Webhook.CommonConfig, *initClientArgs)
webhookClient, err = outputs.NewClient("Webhook", config.Webhook.Address, config.Webhook.MutualTLS, config.Webhook.CheckCert, *initClientArgs)
if err != nil {
config.Webhook.Address = ""
} else {
@ -454,7 +422,7 @@ func init() {
if config.NodeRed.Address != "" {
var err error
noderedClient, err = outputs.NewClient("NodeRed", config.NodeRed.Address, config.NodeRed.CommonConfig, *initClientArgs)
noderedClient, err = outputs.NewClient("NodeRed", config.NodeRed.Address, false, config.NodeRed.CheckCert, *initClientArgs)
if err != nil {
config.NodeRed.Address = ""
} else {
@ -464,7 +432,7 @@ func init() {
if config.CloudEvents.Address != "" {
var err error
cloudeventsClient, err = outputs.NewClient("CloudEvents", config.CloudEvents.Address, config.CloudEvents.CommonConfig, *initClientArgs)
cloudeventsClient, err = outputs.NewClient("CloudEvents", config.CloudEvents.Address, config.CloudEvents.MutualTLS, config.CloudEvents.CheckCert, *initClientArgs)
if err != nil {
config.CloudEvents.Address = ""
} else {
@ -474,7 +442,7 @@ func init() {
if config.Azure.EventHub.Name != "" {
var err error
azureClient, err = outputs.NewEventHubClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
azureClient, err = outputs.NewEventHubClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Azure.EventHub.Name = ""
config.Azure.EventHub.Namespace = ""
@ -487,7 +455,7 @@ func init() {
if (config.GCP.PubSub.ProjectID != "" && config.GCP.PubSub.Topic != "") || config.GCP.Storage.Bucket != "" || config.GCP.CloudFunctions.Name != "" {
var err error
gcpClient, err = outputs.NewGCPClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
gcpClient, err = outputs.NewGCPClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.GCP.PubSub.ProjectID = ""
config.GCP.PubSub.Topic = ""
@ -510,7 +478,7 @@ func init() {
var err error
var outputName = "GCPCloudRun"
gcpCloudRunClient, err = outputs.NewClient(outputName, config.GCP.CloudRun.Endpoint, types.CommonConfig{}, *initClientArgs)
gcpCloudRunClient, err = outputs.NewClient(outputName, config.GCP.CloudRun.Endpoint, false, false, *initClientArgs)
if err != nil {
config.GCP.CloudRun.Endpoint = ""
@ -521,7 +489,7 @@ func init() {
if config.Googlechat.WebhookURL != "" {
var err error
googleChatClient, err = outputs.NewClient("Googlechat", config.Googlechat.WebhookURL, config.Googlechat.CommonConfig, *initClientArgs)
googleChatClient, err = outputs.NewClient("Googlechat", config.Googlechat.WebhookURL, config.Googlechat.MutualTLS, config.Googlechat.CheckCert, *initClientArgs)
if err != nil {
config.Googlechat.WebhookURL = ""
} else {
@ -531,7 +499,7 @@ func init() {
if config.Kafka.HostPort != "" && config.Kafka.Topic != "" {
var err error
kafkaClient, err = outputs.NewKafkaClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
kafkaClient, err = outputs.NewKafkaClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Kafka.HostPort = ""
} else {
@ -541,7 +509,7 @@ func init() {
if config.KafkaRest.Address != "" {
var err error
kafkaRestClient, err = outputs.NewClient("KafkaRest", config.KafkaRest.Address, config.KafkaRest.CommonConfig, *initClientArgs)
kafkaRestClient, err = outputs.NewClient("KafkaRest", config.KafkaRest.Address, config.KafkaRest.MutualTLS, config.KafkaRest.CheckCert, *initClientArgs)
if err != nil {
config.KafkaRest.Address = ""
} else {
@ -554,7 +522,7 @@ func init() {
var url = "https://events.pagerduty.com/v2/enqueue"
var outputName = "Pagerduty"
pagerdutyClient, err = outputs.NewClient(outputName, url, config.Pagerduty.CommonConfig, *initClientArgs)
pagerdutyClient, err = outputs.NewClient(outputName, url, config.Pagerduty.MutualTLS, config.Pagerduty.CheckCert, *initClientArgs)
if err != nil {
config.Pagerduty.RoutingKey = ""
@ -565,9 +533,9 @@ func init() {
if config.Kubeless.Namespace != "" && config.Kubeless.Function != "" {
var err error
kubelessClient, err = outputs.NewKubelessClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
kubelessClient, err = outputs.NewKubelessClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
utils.Log(utils.ErrorLvl, kubelessClient.OutputType, err.Error())
log.Printf("[ERROR] : Kubeless - %v\n", err)
config.Kubeless.Namespace = ""
config.Kubeless.Function = ""
} else {
@ -577,7 +545,7 @@ func init() {
if config.WebUI.URL != "" {
var err error
webUIClient, err = outputs.NewClient("WebUI", config.WebUI.URL, config.WebUI.CommonConfig, *initClientArgs)
webUIClient, err = outputs.NewClient("WebUI", config.WebUI.URL, config.WebUI.MutualTLS, config.WebUI.CheckCert, *initClientArgs)
if err != nil {
config.WebUI.URL = ""
} else {
@ -587,7 +555,7 @@ func init() {
if config.PolicyReport.Enabled {
var err error
policyReportClient, err = outputs.NewPolicyReportClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
policyReportClient, err = outputs.NewPolicyReportClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.PolicyReport.Enabled = false
} else {
@ -597,9 +565,9 @@ func init() {
if config.Openfaas.FunctionName != "" {
var err error
openfaasClient, err = outputs.NewOpenfaasClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
openfaasClient, err = outputs.NewOpenfaasClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
utils.Log(utils.ErrorLvl, openfaasClient.OutputType, err.Error())
log.Printf("[ERROR] : OpenFaaS - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OpenFaaS")
}
@ -607,9 +575,9 @@ func init() {
if config.Tekton.EventListener != "" {
var err error
tektonClient, err = outputs.NewClient("Tekton", config.Tekton.EventListener, config.Tekton.CommonConfig, *initClientArgs)
tektonClient, err = outputs.NewClient("Tekton", config.Tekton.EventListener, config.Tekton.MutualTLS, config.Tekton.CheckCert, *initClientArgs)
if err != nil {
utils.Log(utils.ErrorLvl, tektonClient.OutputType, err.Error())
log.Printf("[ERROR] : Tekton - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Tekton")
}
@ -617,7 +585,7 @@ func init() {
if config.Rabbitmq.URL != "" && config.Rabbitmq.Queue != "" {
var err error
rabbitmqClient, err = outputs.NewRabbitmqClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
rabbitmqClient, err = outputs.NewRabbitmqClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Rabbitmq.URL = ""
} else {
@ -627,9 +595,9 @@ func init() {
if config.Wavefront.EndpointType != "" && config.Wavefront.EndpointHost != "" {
var err error
wavefrontClient, err = outputs.NewWavefrontClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
wavefrontClient, err = outputs.NewWavefrontClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
utils.Log(utils.ErrorLvl, wavefrontClient.OutputType, err.Error())
log.Printf("[ERROR] : Wavefront - %v\n", err)
config.Wavefront.EndpointHost = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Wavefront")
@ -638,9 +606,9 @@ func init() {
if config.Fission.Function != "" {
var err error
fissionClient, err = outputs.NewFissionClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
fissionClient, err = outputs.NewFissionClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
utils.Log(utils.ErrorLvl, fissionClient.OutputType, err.Error())
log.Printf("[ERROR] : Fission - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, outputs.Fission)
}
@ -650,7 +618,7 @@ func init() {
var err error
var outputName = "Grafana"
endpointUrl := fmt.Sprintf("%s/api/annotations", config.Grafana.HostPort)
grafanaClient, err = outputs.NewClient(outputName, endpointUrl, config.Grafana.CommonConfig, *initClientArgs)
grafanaClient, err = outputs.NewClient(outputName, endpointUrl, config.Grafana.MutualTLS, config.Grafana.CheckCert, *initClientArgs)
if err != nil {
config.Grafana.HostPort = ""
config.Grafana.APIKey = ""
@ -662,7 +630,7 @@ func init() {
if config.GrafanaOnCall.WebhookURL != "" {
var err error
var outputName = "GrafanaOnCall"
grafanaOnCallClient, err = outputs.NewClient(outputName, config.GrafanaOnCall.WebhookURL, config.GrafanaOnCall.CommonConfig, *initClientArgs)
grafanaOnCallClient, err = outputs.NewClient(outputName, config.GrafanaOnCall.WebhookURL, config.GrafanaOnCall.MutualTLS, config.GrafanaOnCall.CheckCert, *initClientArgs)
if err != nil {
config.GrafanaOnCall.WebhookURL = ""
} else {
@ -672,10 +640,10 @@ func init() {
if config.Yandex.S3.Bucket != "" {
var err error
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Yandex.S3.Bucket = ""
utils.Log(utils.ErrorLvl, yandexClient.OutputType, err.Error())
log.Printf("[ERROR] : Yandex - %v\n", err)
} else {
if config.Yandex.S3.Bucket != "" {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "YandexS3")
@ -685,10 +653,10 @@ func init() {
if config.Yandex.DataStreams.StreamName != "" {
var err error
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Yandex.DataStreams.StreamName = ""
utils.Log(utils.ErrorLvl, yandexClient.OutputType, err.Error())
log.Printf("[ERROR] : Yandex - %v\n", err)
} else {
if config.Yandex.DataStreams.StreamName != "" {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "YandexDataStreams")
@ -698,10 +666,10 @@ func init() {
if config.Syslog.Host != "" {
var err error
syslogClient, err = outputs.NewSyslogClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
syslogClient, err = outputs.NewSyslogClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Syslog.Host = ""
utils.Log(utils.ErrorLvl, syslogClient.OutputType, err.Error())
log.Printf("[ERROR] : Syslog - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Syslog")
}
@ -709,10 +677,10 @@ func init() {
if config.MQTT.Broker != "" {
var err error
mqttClient, err = outputs.NewMQTTClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
mqttClient, err = outputs.NewMQTTClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.MQTT.Broker = ""
utils.Log(utils.ErrorLvl, mqttClient.OutputType, err.Error())
log.Printf("[ERROR] : MQTT - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "MQTT")
}
@ -721,7 +689,7 @@ func init() {
if config.Zincsearch.HostPort != "" {
var err error
endpointUrl := fmt.Sprintf("%s/api/%s/_doc", config.Zincsearch.HostPort, config.Zincsearch.Index)
zincsearchClient, err = outputs.NewClient("Zincsearch", endpointUrl, types.CommonConfig{CheckCert: config.Zincsearch.CheckCert}, *initClientArgs)
zincsearchClient, err = outputs.NewClient("Zincsearch", endpointUrl, false, config.Zincsearch.CheckCert, *initClientArgs)
if err != nil {
config.Zincsearch.HostPort = ""
} else {
@ -732,7 +700,7 @@ func init() {
if config.Gotify.HostPort != "" {
var err error
endpointUrl := fmt.Sprintf("%s/message", config.Gotify.HostPort)
gotifyClient, err = outputs.NewClient("Gotify", endpointUrl, types.CommonConfig{CheckCert: config.Gotify.CheckCert}, *initClientArgs)
gotifyClient, err = outputs.NewClient("Gotify", endpointUrl, false, config.Gotify.CheckCert, *initClientArgs)
if err != nil {
config.Gotify.HostPort = ""
} else {
@ -742,10 +710,10 @@ func init() {
if config.Spyderbat.OrgUID != "" {
var err error
spyderbatClient, err = outputs.NewSpyderbatClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
spyderbatClient, err = outputs.NewSpyderbatClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Spyderbat.OrgUID = ""
utils.Log(utils.ErrorLvl, spyderbatClient.OutputType, err.Error())
log.Printf("[ERROR] : Spyderbat - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Spyderbat")
}
@ -753,10 +721,10 @@ func init() {
if config.TimescaleDB.Host != "" {
var err error
timescaleDBClient, err = outputs.NewTimescaleDBClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
timescaleDBClient, err = outputs.NewTimescaleDBClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.TimescaleDB.Host = ""
utils.Log(utils.ErrorLvl, timescaleDBClient.OutputType, err.Error())
log.Printf("[ERROR] : TimescaleDB - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "TimescaleDB")
}
@ -764,7 +732,7 @@ func init() {
if config.Redis.Address != "" {
var err error
redisClient, err = outputs.NewRedisClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
redisClient, err = outputs.NewRedisClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Redis.Address = ""
} else {
@ -772,27 +740,17 @@ func init() {
}
}
if config.Logstash.Address != "" {
var err error
logstashClient, err = outputs.NewLogstashClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.Logstash.Address = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Logstash")
}
}
if config.Telegram.ChatID != "" && config.Telegram.Token != "" {
var err error
var urlFormat = "https://api.telegram.org/bot%s/sendMessage"
telegramClient, err = outputs.NewClient("Telegram", fmt.Sprintf(urlFormat, config.Telegram.Token), types.CommonConfig{CheckCert: config.Telegram.CheckCert}, *initClientArgs)
telegramClient, err = outputs.NewClient("Telegram", fmt.Sprintf(urlFormat, config.Telegram.Token), false, config.Telegram.CheckCert, *initClientArgs)
if err != nil {
config.Telegram.ChatID = ""
config.Telegram.Token = ""
utils.Log(utils.ErrorLvl, telegramClient.OutputType, err.Error())
log.Printf("[ERROR] : Telegram - %v\n", err)
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Telegram")
}
@ -800,7 +758,7 @@ func init() {
if config.N8N.Address != "" {
var err error
n8nClient, err = outputs.NewClient("n8n", config.N8N.Address, types.CommonConfig{CheckCert: config.N8N.CheckCert}, *initClientArgs)
n8nClient, err = outputs.NewClient("n8n", config.N8N.Address, false, config.N8N.CheckCert, *initClientArgs)
if err != nil {
config.N8N.Address = ""
} else {
@ -811,7 +769,7 @@ func init() {
if config.OpenObserve.HostPort != "" {
var err error
endpointUrl := fmt.Sprintf("%s/api/%s/%s/_multi", config.OpenObserve.HostPort, config.OpenObserve.OrganizationName, config.OpenObserve.StreamName)
openObserveClient, err = outputs.NewClient("OpenObserve", endpointUrl, config.OpenObserve.CommonConfig, *initClientArgs)
openObserveClient, err = outputs.NewClient("OpenObserve", endpointUrl, config.OpenObserve.MutualTLS, config.OpenObserve.CheckCert, *initClientArgs)
if err != nil {
config.OpenObserve.HostPort = ""
} else {
@ -822,7 +780,7 @@ func init() {
if config.Dynatrace.APIToken != "" && config.Dynatrace.APIUrl != "" {
var err error
dynatraceApiUrl := strings.TrimRight(config.Dynatrace.APIUrl, "/") + "/v2/logs/ingest"
dynatraceClient, err = outputs.NewClient("Dynatrace,", dynatraceApiUrl, types.CommonConfig{CheckCert: config.Dynatrace.CheckCert}, *initClientArgs)
dynatraceClient, err = outputs.NewClient("Dynatrace", dynatraceApiUrl, false, config.Dynatrace.CheckCert, *initClientArgs)
if err != nil {
config.Dynatrace.APIToken = ""
config.Dynatrace.APIUrl = ""
@ -833,45 +791,18 @@ func init() {
if config.OTLP.Traces.Endpoint != "" {
var err error
otlpTracesClient, err = outputs.NewOtlpTracesClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
otlpClient, err = outputs.NewOtlpTracesClient(config, stats, promStats, statsdClient, dogstatsdClient)
if err != nil {
config.OTLP.Traces.Endpoint = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OTLPTraces")
shutDownFuncs = append(shutDownFuncs, otlpTracesClient.ShutDownFunc)
}
}
if config.OTLP.Logs.Endpoint != "" {
var err error
otlpLogsClient, err = outputs.NewOtlpLogsClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
if err != nil {
config.OTLP.Logs.Endpoint = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OTLPLogs")
shutDownFuncs = append(shutDownFuncs, otlpLogsClient.ShutDownFunc)
}
}
if config.OTLP.Metrics.Endpoint != "" {
shutDownFunc, err := otlpmetrics.InitProvider(context.Background(), &config.OTLP.Metrics)
if err != nil {
fmt.Println(err)
config.OTLP.Logs.Endpoint = ""
} else {
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OTLPMetrics")
fn := func() {
if err := shutDownFunc(context.TODO()); err != nil {
utils.Log(utils.ErrorLvl, "OTLP Metrics", err.Error())
}
}
shutDownFuncs = append(shutDownFuncs, fn)
shutDownFuncs = append(shutDownFuncs, otlpClient.ShutDownFunc)
}
}
if config.Talon.Address != "" {
var err error
talonClient, err = outputs.NewClient("Talon", config.Talon.Address, types.CommonConfig{CheckCert: config.Talon.CheckCert}, *initClientArgs)
talonClient, err = outputs.NewClient("Talon", config.Talon.Address, false, config.Talon.CheckCert, *initClientArgs)
if err != nil {
config.Talon.Address = ""
} else {
@ -879,7 +810,9 @@ func init() {
}
}
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Enabled Outputs: %s", outputs.EnabledOutputs))
log.Printf("[INFO] : Falco Sidekick version: %s\n", GetVersionInfo().GitVersion)
log.Printf("[INFO] : Enabled Outputs : %s\n", outputs.EnabledOutputs)
}
func main() {
@ -887,7 +820,7 @@ func main() {
defer shutdown()
}
if config.Debug {
utils.Log(utils.InfoPrefix, "", fmt.Sprintf("Debug mode: %v", config.Debug))
log.Printf("[INFO] : Debug mode : %v", config.Debug)
}
routes := map[string]http.Handler{
@ -909,11 +842,11 @@ func main() {
if ok {
delete(routes, r)
if config.Debug {
utils.Log(utils.DebugLvl, "", fmt.Sprintf("%s is served on http", r))
log.Printf("[DEBUG] : %s is served on http", r)
}
HTTPServeMux.Handle(r, handler)
} else {
utils.Log(utils.WarningLvl, "", fmt.Sprintf("tlsserver.notlspaths has unknown path '%s'", r))
log.Printf("[WARN] : tlsserver.notlspaths has unknown path '%s'", r)
}
}
}
@ -936,12 +869,12 @@ func main() {
if config.TLSServer.Deploy {
if config.TLSServer.MutualTLS {
if config.Debug {
utils.Log(utils.DebugLvl, "", "running mTLS server")
log.Printf("[DEBUG] : running mTLS server")
}
caCert, err := os.ReadFile(config.TLSServer.CaCertFile)
if err != nil {
utils.Log(utils.ErrorLvl, "", err.Error())
log.Printf("[ERROR] : %v\n", err.Error())
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
@ -955,16 +888,16 @@ func main() {
}
if config.Debug && !config.TLSServer.MutualTLS {
utils.Log(utils.DebugLvl, "", "running TLS server")
log.Printf("[DEBUG] : running TLS server")
}
if len(config.TLSServer.NoTLSPaths) == 0 {
utils.Log(utils.WarningLvl, "", "tlsserver.deploy is true but tlsserver.notlspaths is empty, change tlsserver.deploy to true to deploy two servers, at least for /ping endpoint")
log.Printf("[WARN] : tlsserver.deploy is true but tlsserver.notlspaths is empty, change tlsserver.deploy to true to deploy two servers, at least for /ping endpoint")
}
if len(config.TLSServer.NoTLSPaths) != 0 {
if config.Debug {
utils.Log(utils.DebugLvl, "", "running HTTP server for endpoints defined in tlsserver.notlspaths")
log.Printf("[DEBUG] : running HTTP server for endpoints defined in tlsserver.notlspaths")
}
httpServer := &http.Server{
@ -976,31 +909,30 @@ func main() {
WriteTimeout: 60 * time.Second,
IdleTimeout: 60 * time.Second,
}
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick is up and listening on %s:%d for TLS and %s:%d for non-TLS", config.ListenAddress, config.ListenPort, config.ListenAddress, config.TLSServer.NoTLSPort))
log.Printf("[INFO] : Falcosidekick is up and listening on %s:%d for TLS and %s:%d for non-TLS", config.ListenAddress, config.ListenPort, config.ListenAddress, config.TLSServer.NoTLSPort)
errs := make(chan error, 1)
go serveTLS(server, errs)
go serveHTTP(httpServer, errs)
err := <-errs
utils.Log(utils.FatalLvl, "", err.Error())
log.Fatal(<-errs)
} else {
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort))
log.Printf("[INFO] : Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort)
if err := server.ListenAndServeTLS(config.TLSServer.CertFile, config.TLSServer.KeyFile); err != nil {
utils.Log(utils.FatalLvl, "", err.Error())
log.Fatalf("[ERROR] : %v", err.Error())
}
}
} else {
if config.Debug {
utils.Log(utils.DebugLvl, "", "running HTTP server")
log.Printf("[DEBUG] : running HTTP server")
}
if config.TLSServer.MutualTLS {
utils.Log(utils.WarningLvl, "", "tlsserver.deploy is false but tlsserver.mutualtls is true, change tlsserver.deploy to true to use mTLS")
log.Printf("[WARN] : tlsserver.deploy is false but tlsserver.mutualtls is true, change tlsserver.deploy to true to use mTLS")
}
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort))
log.Printf("[INFO] : Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort)
if err := server.ListenAndServe(); err != nil {
utils.Log(utils.FatalLvl, "", err.Error())
log.Fatalf("[ERROR] : %v", err.Error())
}
}
}

View File

@ -1,70 +0,0 @@
package main
import (
"fmt"
"regexp"
"strings"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
func newOTLPMetrics(config *types.Configuration) *otlpmetrics.OTLPMetrics {
otlpMetrics = &otlpmetrics.OTLPMetrics{
Falco: newOTLPFalcoMatchesCounter(config),
Inputs: newOTLPInputsCounter(),
Outputs: newOTLPOutputsCounter(),
}
return otlpMetrics
}
func newOTLPInputsCounter() otlpmetrics.Counter {
supportedAttributes := []string{"source", "status"}
name := "falcosecurity_falcosidekick_inputs"
description := "Number of times an input is received"
counter := otlpmetrics.NewCounter(name, description, supportedAttributes)
return counter
}
func newOTLPOutputsCounter() otlpmetrics.Counter {
name := "falcosecurity_falcosidekick_outputs"
description := "Number of times an output is generated"
supportedAttributes := []string{"destination", "status"}
counter := otlpmetrics.NewCounter(name, description, supportedAttributes)
return counter
}
func newOTLPFalcoMatchesCounter(config *types.Configuration) otlpmetrics.Counter {
regOTLPLabels, _ := regexp.Compile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
supportedAttributes := []string{
"source",
"priority",
"rule",
"hostname",
"tags",
"k8s_ns_name",
"k8s_pod_name",
}
for i := range config.Customfields {
if !regOTLPLabels.MatchString(i) {
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Custom field '%v' is not a valid OTLP metric attribute name", i))
continue
}
supportedAttributes = append(supportedAttributes, i)
}
for _, i := range config.OTLP.Metrics.ExtraAttributesList {
if !regOTLPLabels.MatchString(strings.ReplaceAll(i, ".", "_")) {
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Extra field '%v' is not a valid OTLP metric attribute name", i))
continue
}
supportedAttributes = append(supportedAttributes, strings.ReplaceAll(i, ".", "_"))
}
name := "falcosecurity_falco_rules_matches_total"
description := "Number of times rules match"
counter := otlpmetrics.NewCounter(name, description, supportedAttributes)
return counter
}

View File

@ -4,17 +4,13 @@ package outputs
import (
"encoding/json"
"fmt"
"net/http"
"log"
"regexp"
"sort"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -40,28 +36,6 @@ var (
reg = regexp.MustCompile("[^a-zA-Z0-9_]")
)
func NewAlertManagerClient(hostPorts []string, endpoint string, cfg types.CommonConfig, params types.InitClientArgs) ([]*Client, error) {
clients := make([]*Client, 0)
if len(hostPorts) == 1 {
endpointUrl := fmt.Sprintf("%s%s", hostPorts[0], endpoint)
c, err := NewClient("AlertManager", endpointUrl, cfg, params)
if err != nil {
return nil, err
}
clients = append(clients, c)
} else {
for i, j := range hostPorts {
endpointUrl := fmt.Sprintf("%s%s", j, endpoint)
c, err := NewClient(fmt.Sprintf("AlertManager_%v", i), endpointUrl, cfg, params)
if err != nil {
return nil, err
}
clients = append(clients, c)
}
}
return clients, nil
}
func newAlertmanagerPayload(falcopayload types.FalcoPayload, config *types.Configuration) []alertmanagerPayload {
var amPayload alertmanagerPayload
amPayload.Labels = make(map[string]string)
@ -156,27 +130,24 @@ func newAlertmanagerPayload(falcopayload types.FalcoPayload, config *types.Confi
// AlertmanagerPost posts event to AlertManager
func (c *Client) AlertmanagerPost(falcopayload types.FalcoPayload) {
c.Stats.Alertmanager.Add(Total, 1)
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
for i, j := range c.Config.Alertmanager.CustomHeaders {
c.AddHeader(i, j)
}
err := c.Post(newAlertmanagerPayload(falcopayload, c.Config), func(req *http.Request) {
for i, j := range c.Config.Alertmanager.CustomHeaders {
req.Header.Set(i, j)
}
})
err := c.Post(newAlertmanagerPayload(falcopayload, c.Config))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:alertmanager", "status:error"})
c.Stats.Alertmanager.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "alertmanager", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "alertmanager"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : AlertManager - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:alertmanager", "status:ok"})
c.Stats.Alertmanager.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "alertmanager", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "alertmanager"),
attribute.String("status", OK)).Inc()
}
func alertmanagerSafeLabel(label string) string {

View File

@ -4,44 +4,36 @@ package outputs
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/url"
"os"
"strings"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"
cloudwatchlogstypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types"
"github.com/aws/aws-sdk-go-v2/service/kinesis"
"github.com/aws/aws-sdk-go-v2/service/lambda"
lambdatypes "github.com/aws/aws-sdk-go-v2/service/lambda/types"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go-v2/service/sns"
snstypes "github.com/aws/aws-sdk-go-v2/service/sns/types"
"github.com/aws/aws-sdk-go-v2/service/sqs"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
// NewAWSClient returns a new output.Client for accessing the AWS API.
func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
var region string
if config.AWS.Region != "" {
region = config.AWS.Region
@ -50,28 +42,15 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
} else if os.Getenv("AWS_DEFAULT_REGION") != "" {
region = os.Getenv("AWS_DEFAULT_REGION")
} else {
metaSession := session.Must(session.NewSession())
metaClient := ec2metadata.New(metaSession)
var err error
cfg, err := awsconfig.LoadDefaultConfig(context.TODO())
region, err = metaClient.Region()
if err != nil {
return nil, err
}
metaClient := imds.NewFromConfig(cfg)
getMetadataOutput, err := metaClient.GetMetadata(context.TODO(), &imds.GetMetadataInput{Path: "placement/region"})
if err != nil {
utils.Log(utils.ErrorLvl, "AWS", fmt.Sprintf("Error while calling from Metadata AWS: %v", err.Error()))
return nil, errors.New("error calling to get metadata")
}
defer getMetadataOutput.Content.Close()
regionBytes, err := io.ReadAll(getMetadataOutput.Content)
if err != nil {
utils.Log(utils.ErrorLvl, "AWS", fmt.Sprintf("Error while getting region from Metadata AWS Session: %v", err.Error()))
log.Printf("[ERROR] : AWS - Error while getting region from Metadata AWS Session: %v\n", err.Error())
return nil, errors.New("error getting region from metadata")
}
region = string(regionBytes)
utils.Log(utils.InfoLvl, "AWS", fmt.Sprintf("region from metadata: %s", region))
}
if config.AWS.AccessKeyID != "" && config.AWS.SecretAccessKey != "" && region != "" {
@ -79,45 +58,52 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
err2 := os.Setenv("AWS_SECRET_ACCESS_KEY", config.AWS.SecretAccessKey)
err3 := os.Setenv("AWS_DEFAULT_REGION", region)
if err1 != nil || err2 != nil || err3 != nil {
utils.Log(utils.ErrorLvl, "AWS", "Error setting AWS env vars")
log.Println("[ERROR] : AWS - Error setting AWS env vars")
return nil, errors.New("error setting AWS env vars")
}
}
awscfg := &aws.Config{Region: region}
awscfg := &aws.Config{Region: aws.String(region)}
if config.AWS.RoleARN != "" {
stsSvc := sts.NewFromConfig(*awscfg)
baseSess := session.Must(session.NewSession(awscfg))
stsSvc := sts.New(baseSess)
stsArIn := new(sts.AssumeRoleInput)
stsArIn.RoleArn = aws.String(config.AWS.RoleARN)
stsArIn.RoleSessionName = aws.String(fmt.Sprintf("session-%v", uuid.New().String()))
if config.AWS.ExternalID != "" {
stsArIn.ExternalId = aws.String(config.AWS.ExternalID)
}
assumedRole, err := stsSvc.AssumeRole(context.Background(), stsArIn)
assumedRole, err := stsSvc.AssumeRole(stsArIn)
if err != nil {
utils.Log(utils.ErrorLvl, "AWS", "Error while Assuming Role")
log.Println("[ERROR] : AWS - Error while Assuming Role")
return nil, errors.New("error while assuming role")
}
awscfg.Credentials = aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(
awscfg.Credentials = credentials.NewStaticCredentials(
*assumedRole.Credentials.AccessKeyId,
*assumedRole.Credentials.SecretAccessKey,
*assumedRole.Credentials.SessionToken,
))
)
}
sess, err := session.NewSession(awscfg)
if err != nil {
log.Printf("[ERROR] : AWS - Error while creating AWS Session: %v\n", err.Error())
return nil, errors.New("error while creating AWS Session")
}
if config.AWS.CheckIdentity {
_, err := sts.NewFromConfig(*awscfg).GetCallerIdentity(context.Background(), &sts.GetCallerIdentityInput{})
_, err = sts.New(sess).GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
utils.Log(utils.ErrorLvl, "AWS", fmt.Sprintf("Error while getting AWS Token: %v", err.Error()))
log.Printf("[ERROR] : AWS - Error while getting AWS Token: %v\n", err.Error())
return nil, errors.New("error while getting AWS Token")
}
}
var endpointURL *url.URL
endpointURL, err := url.Parse(config.AWS.SQS.URL)
endpointURL, err = url.Parse(config.AWS.SQS.URL)
if err != nil {
utils.Log(utils.ErrorLvl, "AWS SQS", err.Error())
log.Printf("[ERROR] : AWS SQS - %v\n", err.Error())
return nil, ErrClientCreation
}
@ -125,10 +111,9 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
OutputType: "AWS",
EndpointURL: endpointURL,
Config: config,
AWSConfig: awscfg,
AWSSession: sess,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -136,46 +121,42 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
// InvokeLambda invokes a lambda function
func (c *Client) InvokeLambda(falcopayload types.FalcoPayload) {
svc := lambda.NewFromConfig(*c.AWSConfig)
svc := lambda.New(c.AWSSession)
f, _ := json.Marshal(falcopayload)
input := &lambda.InvokeInput{
FunctionName: aws.String(c.Config.AWS.Lambda.FunctionName),
InvocationType: lambdatypes.InvocationType(c.Config.AWS.Lambda.InvocationType),
LogType: lambdatypes.LogType(c.Config.AWS.Lambda.LogType),
InvocationType: aws.String(c.Config.AWS.Lambda.InvocationType),
LogType: aws.String(c.Config.AWS.Lambda.LogType),
Payload: f,
}
c.Stats.AWSLambda.Add("total", 1)
resp, err := svc.Invoke(context.Background(), input)
resp, err := svc.Invoke(input)
if err != nil {
go c.CountMetric("outputs", 1, []string{"output:awslambda", "status:error"})
c.Stats.AWSLambda.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awslambda", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awslambda"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" Lambda", err.Error())
log.Printf("[ERROR] : %v Lambda - %v\n", c.OutputType, err.Error())
return
}
if c.Config.Debug {
r, _ := base64.StdEncoding.DecodeString(*resp.LogResult)
utils.Log(utils.DebugLvl, c.OutputType+" Lambda", fmt.Sprintf("result : %v", string(r)))
log.Printf("[DEBUG] : %v Lambda result : %v\n", c.OutputType, string(r))
}
utils.Log(utils.InfoLvl, c.OutputType+" Lambda", fmt.Sprintf("Invoke OK (%v)", resp.StatusCode))
log.Printf("[INFO] : %v Lambda - Invoke OK (%v)\n", c.OutputType, *resp.StatusCode)
go c.CountMetric("outputs", 1, []string{"output:awslambda", "status:ok"})
c.Stats.AWSLambda.Add("ok", 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awslambda", "status": "ok"}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awslambda"),
attribute.String("status", OK)).Inc()
}
// SendMessage sends a message to SQS Queue
func (c *Client) SendMessage(falcopayload types.FalcoPayload) {
svc := sqs.NewFromConfig(*c.AWSConfig)
svc := sqs.New(c.AWSSession)
f, _ := json.Marshal(falcopayload)
@ -186,27 +167,23 @@ func (c *Client) SendMessage(falcopayload types.FalcoPayload) {
c.Stats.AWSSQS.Add("total", 1)
resp, err := svc.SendMessage(context.Background(), input)
resp, err := svc.SendMessage(input)
if err != nil {
go c.CountMetric("outputs", 1, []string{"output:awssqs", "status:error"})
c.Stats.AWSSQS.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssqs", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssqs"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" SQS", err.Error())
log.Printf("[ERROR] : %v SQS - %v\n", c.OutputType, err.Error())
return
}
if c.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType+" SQS", fmt.Sprintf("MD5OfMessageBody : %v", *resp.MD5OfMessageBody))
log.Printf("[DEBUG] : %v SQS - MD5OfMessageBody : %v\n", c.OutputType, *resp.MD5OfMessageBody)
}
utils.Log(utils.InfoLvl, c.OutputType+" SQS", fmt.Sprintf("Send Message OK (%v)", *resp.MessageId))
log.Printf("[INFO] : %v SQS - Send Message OK (%v)\n", c.OutputType, *resp.MessageId)
go c.CountMetric("outputs", 1, []string{"output:awssqs", "status:ok"})
c.Stats.AWSSQS.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssqs", "status": "ok"}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssqs"),
attribute.String("status", OK)).Inc()
}
// UploadS3 upload payload to S3
@ -221,42 +198,35 @@ func (c *Client) UploadS3(falcopayload types.FalcoPayload) {
key := fmt.Sprintf("%s/%s/%s.json", prefix, t.Format("2006-01-02"), t.Format(time.RFC3339Nano))
awsConfig := aws.NewConfig()
var client s3.Client
if c.Config.AWS.S3.Endpoint != "" {
s3.NewFromConfig(*awsConfig, s3.WithEndpointResolver(s3.EndpointResolverFromURL(c.Config.AWS.S3.Endpoint)))
} else {
client = *s3.NewFromConfig(*awsConfig)
awsConfig = awsConfig.WithEndpoint(c.Config.AWS.S3.Endpoint)
}
resp, err := client.PutObject(context.Background(), &s3.PutObjectInput{
resp, err := s3.New(c.AWSSession, awsConfig).PutObject(&s3.PutObjectInput{
Bucket: aws.String(c.Config.AWS.S3.Bucket),
Key: aws.String(key),
Body: bytes.NewReader(f),
ACL: s3types.ObjectCannedACL(c.Config.AWS.S3.ObjectCannedACL),
ACL: aws.String(c.Config.AWS.S3.ObjectCannedACL),
})
if err != nil {
go c.CountMetric("outputs", 1, []string{"output:awss3", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "awss3", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awss3"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" S3", err.Error())
log.Printf("[ERROR] : %v S3 - %v\n", c.OutputType, err.Error())
return
}
if resp.SSECustomerAlgorithm != nil {
utils.Log(utils.InfoLvl, c.OutputType+" S3", fmt.Sprintf("Upload payload OK (%v)", *resp.SSECustomerKeyMD5))
log.Printf("[INFO] : %v S3 - Upload payload OK (%v)\n", c.OutputType, *resp.SSECustomerKeyMD5)
} else {
utils.Log(utils.InfoLvl, c.OutputType+" S3", "Upload payload OK")
log.Printf("[INFO] : %v S3 - Upload payload OK\n", c.OutputType)
}
go c.CountMetric("outputs", 1, []string{"output:awss3", "status:ok"})
c.PromStats.Outputs.With(map[string]string{"destination": "awss3", "status": "ok"}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awss3"),
attribute.String("status", OK)).Inc()
}
// PublishTopic sends a message to a SNS Topic
func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
svc := sns.NewFromConfig(*c.AWSConfig)
svc := sns.New(c.AWSSession)
var msg *sns.PublishInput
@ -269,7 +239,7 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
} else {
msg = &sns.PublishInput{
Message: aws.String(falcopayload.Output),
MessageAttributes: map[string]snstypes.MessageAttributeValue{
MessageAttributes: map[string]*sns.MessageAttributeValue{
"priority": {
DataType: aws.String("String"),
StringValue: aws.String(falcopayload.Priority.String()),
@ -287,13 +257,13 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
}
if len(falcopayload.Tags) != 0 {
msg.MessageAttributes["tags"] = snstypes.MessageAttributeValue{
msg.MessageAttributes["tags"] = &sns.MessageAttributeValue{
DataType: aws.String("String"),
StringValue: aws.String(strings.Join(falcopayload.Tags, ",")),
}
}
if falcopayload.Hostname != "" {
msg.MessageAttributes[Hostname] = snstypes.MessageAttributeValue{
msg.MessageAttributes[Hostname] = &sns.MessageAttributeValue{
DataType: aws.String("String"),
StringValue: aws.String(falcopayload.Hostname),
}
@ -302,12 +272,12 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
m := strings.ReplaceAll(strings.ReplaceAll(i, "]", ""), "[", ".")
switch j.(type) {
case string:
msg.MessageAttributes[m] = snstypes.MessageAttributeValue{
msg.MessageAttributes[m] = &sns.MessageAttributeValue{
DataType: aws.String("String"),
StringValue: aws.String(fmt.Sprintf("%v", j)),
}
case json.Number:
msg.MessageAttributes[m] = snstypes.MessageAttributeValue{
msg.MessageAttributes[m] = &sns.MessageAttributeValue{
DataType: aws.String("Number"),
StringValue: aws.String(fmt.Sprintf("%v", j)),
}
@ -319,32 +289,28 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
if c.Config.Debug {
p, _ := json.Marshal(msg)
utils.Log(utils.DebugLvl, c.OutputType+" SNS", fmt.Sprintf("Message : %v", string(p)))
log.Printf("[DEBUG] : %v SNS - Message : %v\n", c.OutputType, string(p))
}
c.Stats.AWSSNS.Add("total", 1)
resp, err := svc.Publish(context.TODO(), msg)
resp, err := svc.Publish(msg)
if err != nil {
go c.CountMetric("outputs", 1, []string{"output:awssns", "status:error"})
c.Stats.AWSSNS.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssns", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssns"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" SNS", err.Error())
log.Printf("[ERROR] : %v SNS - %v\n", c.OutputType, err.Error())
return
}
utils.Log(utils.DebugLvl, c.OutputType+" SNS", fmt.Sprintf("Send to topic OK (%v)", *resp.MessageId))
log.Printf("[INFO] : %v SNS - Send to topic OK (%v)\n", c.OutputType, *resp.MessageId)
go c.CountMetric("outputs", 1, []string{"output:awssns", "status:ok"})
c.Stats.AWSSNS.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssns", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssns"),
attribute.String("status", OK)).Inc()
}
// SendCloudWatchLog sends a message to CloudWatch Log
func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
svc := cloudwatchlogs.NewFromConfig(*c.AWSConfig)
svc := cloudwatchlogs.New(c.AWSSession)
f, _ := json.Marshal(falcopayload)
@ -352,24 +318,21 @@ func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
if c.Config.AWS.CloudWatchLogs.LogStream == "" {
streamName := "falcosidekick-logstream"
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Log Stream not configured creating one called %s", streamName))
log.Printf("[INFO] : %v CloudWatchLogs - Log Stream not configured creating one called %s\n", c.OutputType, streamName)
inputLogStream := &cloudwatchlogs.CreateLogStreamInput{
LogGroupName: aws.String(c.Config.AWS.CloudWatchLogs.LogGroup),
LogStreamName: aws.String(streamName),
}
_, err := svc.CreateLogStream(context.Background(), inputLogStream)
_, err := svc.CreateLogStream(inputLogStream)
if err != nil {
var rae *cloudwatchlogstypes.ResourceAlreadyExistsException
if errors.As(err, &rae) {
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Log Stream %s already exist, reusing...", streamName))
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == cloudwatchlogs.ErrCodeResourceAlreadyExistsException {
log.Printf("[INFO] : %v CloudWatchLogs - Log Stream %s already exist, reusing...\n", c.OutputType, streamName)
} else {
go c.CountMetric("outputs", 1, []string{"output:awscloudwatchlogs", "status:error"})
c.Stats.AWSCloudWatchLogs.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awscloudwatchlogs", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awscloudwatchlogs"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" CloudWatchLogs", err.Error())
log.Printf("[ERROR] : %v CloudWatchLogs - %v\n", c.OutputType, err.Error())
return
}
}
@ -377,13 +340,13 @@ func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
c.Config.AWS.CloudWatchLogs.LogStream = streamName
}
logevent := cloudwatchlogstypes.InputLogEvent{
logevent := &cloudwatchlogs.InputLogEvent{
Message: aws.String(string(f)),
Timestamp: aws.Int64(falcopayload.Time.UnixNano() / int64(time.Millisecond)),
}
input := &cloudwatchlogs.PutLogEventsInput{
LogEvents: []cloudwatchlogstypes.InputLogEvent{logevent},
LogEvents: []*cloudwatchlogs.InputLogEvent{logevent},
LogGroupName: aws.String(c.Config.AWS.CloudWatchLogs.LogGroup),
LogStreamName: aws.String(c.Config.AWS.CloudWatchLogs.LogStream),
}
@ -394,26 +357,22 @@ func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
go c.CountMetric("outputs", 1, []string{"output:awscloudwatchlogs", "status:error"})
c.Stats.AWSCloudWatchLogs.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awscloudwatchlogs", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awscloudwatchlogs"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" CloudWatchLogs", err.Error())
log.Printf("[ERROR] : %v CloudWatchLogs - %v\n", c.OutputType, err.Error())
return
}
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Send Log OK (%v)", resp.ResultMetadata))
log.Printf("[INFO] : %v CloudWatchLogs - Send Log OK (%v)\n", c.OutputType, resp.String())
go c.CountMetric("outputs", 1, []string{"output:awscloudwatchlogs", "status:ok"})
c.Stats.AWSCloudWatchLogs.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awscloudwatchlogs", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awscloudwatchlogs"),
attribute.String("status", OK)).Inc()
}
// PutLogEvents will attempt to execute and handle invalid tokens.
func (c *Client) putLogEvents(svc *cloudwatchlogs.Client, input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) {
resp, err := svc.PutLogEvents(context.Background(), input)
func (c *Client) putLogEvents(svc *cloudwatchlogs.CloudWatchLogs, input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) {
resp, err := svc.PutLogEvents(input)
if err != nil {
if exception, ok := err.(*cloudwatchlogstypes.InvalidSequenceTokenException); ok {
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Refreshing token for LogGroup: %s LogStream: %s", *input.LogGroupName, *input.LogStreamName))
if exception, ok := err.(*cloudwatchlogs.InvalidSequenceTokenException); ok {
log.Printf("[INFO] : %v Refreshing token for LogGroup: %s LogStream: %s", c.OutputType, *input.LogGroupName, *input.LogStreamName)
input.SequenceToken = exception.ExpectedSequenceToken
return c.putLogEvents(svc, input)
@ -427,7 +386,7 @@ func (c *Client) putLogEvents(svc *cloudwatchlogs.Client, input *cloudwatchlogs.
// PutRecord puts a record in Kinesis
func (c *Client) PutRecord(falcoPayLoad types.FalcoPayload) {
svc := kinesis.NewFromConfig(*c.AWSConfig)
svc := kinesis.New(c.AWSSession)
c.Stats.AWSKinesis.Add(Total, 1)
@ -438,21 +397,17 @@ func (c *Client) PutRecord(falcoPayLoad types.FalcoPayload) {
StreamName: aws.String(c.Config.AWS.Kinesis.StreamName),
}
resp, err := svc.PutRecord(context.Background(), input)
resp, err := svc.PutRecord(input)
if err != nil {
go c.CountMetric("outputs", 1, []string{"output:awskinesis", "status:error"})
c.Stats.AWSKinesis.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awskinesis", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awskinesis"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" Kinesis", err.Error())
log.Printf("[ERROR] : %v Kinesis - %v\n", c.OutputType, err.Error())
return
}
utils.Log(utils.InfoLvl, c.OutputType+" Kinesis", fmt.Sprintf("Put Record OK (%v)", resp.SequenceNumber))
log.Printf("[INFO] : %v Kinesis - Put Record OK (%v)\n", c.OutputType, resp.SequenceNumber)
go c.CountMetric("outputs", 1, []string{"output:awskinesis", "status:ok"})
c.Stats.AWSKinesis.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awskinesis", "status": "ok"}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awskinesis"),
attribute.String("status", OK)).Inc()
}

View File

@ -8,18 +8,16 @@ import (
"errors"
"fmt"
"io"
"log"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/embano1/memlog"
"github.com/google/uuid"
"github.com/xitongsys/parquet-go-source/mem"
"github.com/xitongsys/parquet-go/writer"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -205,12 +203,10 @@ func (c *Client) EnqueueSecurityLake(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
c.Stats.AWSSecurityLake.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", err.Error())
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, err)
return
}
utils.Log(utils.InfoLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Event queued (%v)", falcopayload.UUID))
log.Printf("[INFO] : %v SecurityLake - Event queued (%v)\n", c.OutputType, falcopayload.UUID)
*c.Config.AWS.SecurityLake.WriteOffset = offset
}
@ -221,7 +217,7 @@ func (c *Client) StartSecurityLakeWorker() {
continue
}
time.Sleep(time.Duration(c.Config.AWS.SecurityLake.Interval) * time.Minute) //nolint:gosec // disable G115
time.Sleep(time.Duration(c.Config.AWS.SecurityLake.Interval) * time.Minute)
}
}
@ -237,9 +233,7 @@ func (c *Client) processNextBatch() error {
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
c.Stats.AWSSecurityLake.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", err.Error())
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, err)
// ctx currently not handled in main
// https://github.com/falcosecurity/falcosidekick/pull/390#discussion_r1081690326
return err
@ -251,8 +245,6 @@ func (c *Client) processNextBatch() error {
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
c.Stats.AWSSecurityLake.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
attribute.String("status", Error)).Inc()
earliest = earliest - 1 // to ensure next batch includes earliest as we read from ReadOffset+1
msg := fmt.Errorf("slow batch reader: resetting read offset from %d to %d: %v",
@ -260,7 +252,7 @@ func (c *Client) processNextBatch() error {
earliest,
err,
)
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", msg.Error())
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, msg)
awslake.ReadOffset = &earliest
return err
}
@ -270,9 +262,7 @@ func (c *Client) processNextBatch() error {
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
c.Stats.AWSSecurityLake.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", err.Error())
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, err)
return err
}
}
@ -284,8 +274,6 @@ func (c *Client) processNextBatch() error {
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
c.Stats.AWSSecurityLake.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
attribute.String("status", Error)).Inc()
// we don't update ReadOffset to retry and not skip records
return err
}
@ -293,8 +281,6 @@ func (c *Client) processNextBatch() error {
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:ok"})
c.Stats.AWSSecurityLake.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": "ok"}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
attribute.String("status", OK)).Inc()
// update offset
*awslake.ReadOffset = batch[count-1].Metadata.Offset
@ -309,51 +295,50 @@ func (c *Client) writeParquet(uid string, records []memlog.Record) error {
key := fmt.Sprintf("/%s/region=%s/accountId=%s/eventDay=%s/%s.parquet", c.Config.AWS.SecurityLake.Prefix, c.Config.AWS.SecurityLake.Region, c.Config.AWS.SecurityLake.AccountID, t.Format("20060102"), uid)
ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFn()
resp, err := s3.NewFromConfig(*c.AWSConfig).PutObject(ctx, &s3.PutObjectInput{
resp, err := s3.New(c.AWSSession).PutObjectWithContext(ctx, &s3.PutObjectInput{
Bucket: aws.String(c.Config.AWS.SecurityLake.Bucket),
Key: aws.String(key),
Body: r,
Body: aws.ReadSeekCloser(r),
ContentType: aws.String("Apache Parquet"),
ACL: s3types.ObjectCannedACLBucketOwnerFullControl,
ACL: aws.String(s3.ObjectCannedACLBucketOwnerFullControl),
})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Upload parquet file %s.parquet Failed: %v", uid, err))
log.Printf("[ERROR] : %v SecurityLake - Upload parquet file %s.parquet Failed: %v\n", c.OutputType, uid, err)
return err
}
if resp.SSECustomerAlgorithm != nil {
utils.Log(utils.InfoLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Upload parquet file %s.parquet OK (%v) (%v events)", uid, *resp.SSECustomerKeyMD5, len(records)))
log.Printf("[INFO] : %v SecurityLake - Upload parquet file %s.parquet OK (%v) (%v events) \n", c.OutputType, uid, *resp.SSECustomerKeyMD5, len(records))
} else {
utils.Log(utils.InfoLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Upload parquet file %s.parquet OK (%v events)\n", uid, len(records)))
log.Printf("[INFO] : %v SecurityLake - Upload parquet file %s.parquet OK (%v events)\n", c.OutputType, uid, len(records))
}
return nil
})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't create the parquet file %s.parquet: %v", uid, err))
log.Printf("[ERROR] : %v SecurityLake - Can't create the parquet file %s.parquet: %v\n", c.OutputType, uid, err)
return err
}
pw, err := writer.NewParquetWriter(fw, new(OCSFSecurityFinding), 10)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't create the parquet writer: %v", err))
log.Printf("[ERROR] : %v SecurityLake - Can't create the parquet writer: %v\n", c.OutputType, err)
return err
}
for _, i := range records {
var f types.FalcoPayload
if err := json.Unmarshal(i.Data, &f); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Unmarshalling error: %v", err))
log.Printf("[ERROR] : %v SecurityLake - Unmarshalling error: %v\n", c.OutputType, err)
continue
}
o := NewOCSFSecurityFinding(f)
if err = pw.Write(o); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Parquet writer error: %v", err))
log.Printf("[ERROR] : %v SecurityLake - Parquet writer error: %v\n", c.OutputType, err)
continue
}
}
if err = pw.WriteStop(); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't stop the parquet writer: %v", err))
log.Printf("[ERROR] : %v SecurityLake - Can't stop the parquet writer: %v\n", c.OutputType, err)
}
if err = fw.Close(); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't close the parquet file %s.parquet: %v", uid, err))
log.Printf("[ERROR] : %v SecurityLake - Can't close the parquet file %s.parquet: %v\n", c.OutputType, uid, err)
return err
}
return nil

View File

@ -5,28 +5,23 @@ package outputs
import (
"context"
"encoding/json"
"fmt"
"log"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
azeventhubs "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs"
"github.com/DataDog/datadog-go/statsd"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
// NewEventHubClient returns a new output.Client for accessing the Azure Event Hub.
func NewEventHubClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewEventHubClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
return &Client{
OutputType: "AzureEventHub",
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -39,48 +34,48 @@ func (c *Client) EventHubPost(falcopayload types.FalcoPayload) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
utils.Log(utils.InfoLvl, c.OutputType+" EventHub", "Try sending event")
log.Printf("[INFO] : %v EventHub - Try sending event", c.OutputType)
defaultAzureCred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
c.setEventHubErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", err.Error())
log.Printf("[ERROR] : %v EventHub - %v\n", c.OutputType, err.Error())
return
}
producerClient, err := azeventhubs.NewProducerClient(c.Config.Azure.EventHub.Namespace, c.Config.Azure.EventHub.Name, defaultAzureCred, nil)
if err != nil {
c.setEventHubErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", err.Error())
log.Printf("[ERROR] : %v EventHub - %v\n", c.OutputType, err.Error())
return
}
defer producerClient.Close(ctx)
utils.Log(utils.InfoLvl, c.OutputType+" EventHub", "Hub client created")
log.Printf("[INFO] : %v EventHub - Hub client created\n", c.OutputType)
data, err := json.Marshal(falcopayload)
if err != nil {
c.setEventHubErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", fmt.Sprintf("Cannot marshal payload: %v", err))
log.Printf("[ERROR] : Cannot marshal payload: %v", err.Error())
return
}
batch, err := producerClient.NewEventDataBatch(ctx, nil)
if err != nil {
c.setEventHubErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", fmt.Sprintf("Cannot marshal payload: %v", err))
log.Printf("[ERROR] : Cannot marshal payload: %v", err.Error())
return
}
if err := batch.AddEventData(&azeventhubs.EventData{Body: data}, nil); err != nil {
c.setEventHubErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", fmt.Sprintf("Cannot marshal payload: %v", err))
log.Printf("[ERROR] : Cannot marshal payload: %v", err.Error())
return
}
producerClient.SendEventDataBatch(ctx, batch, nil)
if err := producerClient.SendEventDataBatch(ctx, batch, nil); err != nil {
c.setEventHubErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", err.Error())
log.Printf("[ERROR] : %v EventHub - %v\n", c.OutputType, err.Error())
return
}
@ -88,9 +83,7 @@ func (c *Client) EventHubPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:azureeventhub", "status:ok"})
c.Stats.AzureEventHub.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "azureeventhub", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "azureeventhub"),
attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType+" EventHub", "Publish OK")
log.Printf("[INFO] : %v EventHub - Publish OK", c.OutputType)
}
// setEventHubErrorMetrics set the error stats
@ -98,6 +91,4 @@ func (c *Client) setEventHubErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:azureeventhub", "status:error"})
c.Stats.AzureEventHub.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "azureeventhub", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "azureeventhub"),
attribute.String("status", Error)).Inc()
}

View File

@ -5,15 +5,14 @@ package outputs
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"math"
"log"
"net/http"
"net/url"
"os"
@ -21,26 +20,24 @@ import (
"strings"
"sync"
crdClient "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/generated/v1alpha2/clientset/versioned"
gcpfunctions "cloud.google.com/go/functions/apiv1"
amqp "github.com/rabbitmq/amqp091-go"
wavefront "github.com/wavefronthq/wavefront-sdk-go/senders"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/storage"
"github.com/DataDog/datadog-go/statsd"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go/aws/session"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/segmentio/kafka-go"
"k8s.io/client-go/kubernetes"
mqtt "github.com/eclipse/paho.mqtt.golang"
timescaledb "github.com/jackc/pgx/v5/pgxpool"
amqp "github.com/rabbitmq/amqp091-go"
redis "github.com/redis/go-redis/v9"
"github.com/segmentio/kafka-go"
logstash "github.com/telkomdev/go-stash"
wavefront "github.com/wavefronthq/wavefront-sdk-go/senders"
"golang.org/x/sync/semaphore"
"k8s.io/client-go/kubernetes"
crdClient "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/generated/v1alpha2/clientset/versioned"
"github.com/falcosecurity/falcosidekick/internal/pkg/batcher"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
@ -95,27 +92,31 @@ const MutualTLSCacertFilename = "/ca.crt"
const HttpPost = "POST"
const HttpPut = "PUT"
// Protocol
const GRPC = "grpc"
// Headers to add to the client before sending the request
type Header struct {
Key string
Value string
}
// Client communicates with the different API.
type Client struct {
OutputType string
// FIXME: This causes race condition if outputs overwrite this URL during requests from multiple go routines
EndpointURL *url.URL
OutputType string
EndpointURL *url.URL
MutualTLSEnabled bool
CheckCert bool
HeaderList []Header
ContentType string
ShutDownFunc func()
Config *types.Configuration
Stats *types.Statistics
PromStats *types.PromStatistics
OTLPMetrics *otlpmetrics.OTLPMetrics
AWSConfig *aws.Config
AWSSession *session.Session
StatsdClient *statsd.Client
DogstatsdClient *statsd.Client
GCPTopicClient *pubsub.Topic
GCPCloudFunctionsClient *gcpfunctions.CloudFunctionsClient
// FIXME: this lock requires a per-output usage lock currently if headers are used -- needs to be refactored
httpClientLock sync.Mutex
GCSStorageClient *storage.Client
KafkaProducer *kafka.Writer
@ -127,99 +128,45 @@ type Client struct {
MQTTClient mqtt.Client
TimescaleDBClient *timescaledb.Pool
RedisClient *redis.Client
OTLPLogsLogger *slog.Logger
LogstashClient *logstash.Stash
// Enable gzip compression
EnableCompression bool
// cached http.Client
httpcli *http.Client
// lock for http client creation
mx sync.Mutex
// common config
cfg types.CommonConfig
// init once on first request
initOnce sync.Once
// maxconcurrent requests limiter
sem *semaphore.Weighted
// batcher
batcher *batcher.Batcher
}
// InitClient returns a new output.Client for accessing the different API.
func NewClient(outputType string, defaultEndpointURL string, cfg types.CommonConfig, params types.InitClientArgs) (*Client, error) {
func NewClient(outputType string, defaultEndpointURL string, mutualTLSEnabled bool, checkCert bool, params types.InitClientArgs) (*Client, error) {
reg := regexp.MustCompile(`(http|nats)(s?)://.*`)
if !reg.MatchString(defaultEndpointURL) {
utils.Log(utils.ErrorLvl, outputType, "Bad Endpoint")
log.Printf("[ERROR] : %v - %v\n", outputType, "Bad Endpoint")
return nil, ErrClientCreation
}
if _, err := url.ParseRequestURI(defaultEndpointURL); err != nil {
utils.Log(utils.ErrorLvl, outputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", outputType, err.Error())
return nil, ErrClientCreation
}
endpointURL, err := url.Parse(defaultEndpointURL)
if err != nil {
utils.Log(utils.ErrorLvl, outputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", outputType, err.Error())
return nil, ErrClientCreation
}
return &Client{
cfg: cfg,
OutputType: outputType,
EndpointURL: endpointURL,
ContentType: DefaultContentType,
Config: params.Config,
Stats: params.Stats,
PromStats: params.PromStats,
OTLPMetrics: params.OTLPMetrics,
StatsdClient: params.StatsdClient,
DogstatsdClient: params.DogstatsdClient,
}, nil
return &Client{OutputType: outputType, EndpointURL: endpointURL, MutualTLSEnabled: mutualTLSEnabled, CheckCert: checkCert, HeaderList: []Header{}, ContentType: DefaultContentType, Config: params.Config, Stats: params.Stats, PromStats: params.PromStats, StatsdClient: params.StatsdClient, DogstatsdClient: params.DogstatsdClient}, nil
}
type RequestOptionFunc func(req *http.Request)
// Get get a payload from Output with GET http method.
func (c *Client) Get(opts ...RequestOptionFunc) error {
return c.sendRequest("GET", nil, nil, opts...)
func (c *Client) Get() error {
return c.sendRequest("GET", nil)
}
// Post sends event (payload) to Output with POST http method.
func (c *Client) Post(payload interface{}, opts ...RequestOptionFunc) error {
return c.sendRequest("POST", payload, nil, opts...)
}
// PostWithResponse sends event (payload) to Output with POST http method and returns a stringified response body
// This is added in order to get the response body and avoid breaking any other code that relies on the Post implmentation
func (c *Client) PostWithResponse(payload interface{}, opts ...RequestOptionFunc) (string, error) {
var responseBody string
err := c.sendRequest("POST", payload, &responseBody, opts...)
return responseBody, err
func (c *Client) Post(payload interface{}) error {
return c.sendRequest("POST", payload)
}
// Put sends event (payload) to Output with PUT http method.
func (c *Client) Put(payload interface{}, opts ...RequestOptionFunc) error {
return c.sendRequest("PUT", payload, nil, opts...)
func (c *Client) Put(payload interface{}) error {
return c.sendRequest("PUT", payload)
}
// Get the response body as inlined string
func (c *Client) getInlinedBodyAsString(resp *http.Response) string {
func getInlinedBodyAsString(resp *http.Response) string {
body, _ := io.ReadAll(resp.Body)
contentEncoding := resp.Header.Get("Content-Encoding")
if contentEncoding == "gzip" {
dec, err := decompressData(body)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Failed to decompress response: %v", err))
return ""
}
body = dec
}
contentType := resp.Header.Get("Content-Type")
if contentType == "application/json" {
var compactedBody bytes.Buffer
@ -232,233 +179,44 @@ func (c *Client) getInlinedBodyAsString(resp *http.Response) string {
return string(body)
}
func compressData(reader io.Reader) ([]byte, error) {
var compressed bytes.Buffer
gw := gzip.NewWriter(&compressed)
if _, err := io.Copy(gw, reader); err != nil {
return nil, err
}
if err := gw.Close(); err != nil {
return nil, err
}
return compressed.Bytes(), nil
}
func decompressData(compressed []byte) (data []byte, err error) {
gr, err := gzip.NewReader(bytes.NewBuffer(compressed))
if err != nil {
return nil, err
}
defer func() {
err = errors.Join(err, gr.Close())
}()
data, err = io.ReadAll(gr)
if err != nil {
return nil, err
}
return data, nil
}
// Post sends event (payload) to Output.
// Returns stringified response body or error
func (c *Client) sendRequest(method string, payload interface{}, responseBody *string, opts ...RequestOptionFunc) error {
// Initialize the semaphore once here
// because currently there are multiple code paths
// where the client is created directly without using NewClient constructor
c.initOnce.Do(func() {
if c.cfg.MaxConcurrentRequests == 0 {
c.sem = semaphore.NewWeighted(math.MaxInt64)
utils.Log(utils.InfoLvl, c.OutputType, "Max concurrent requests: unlimited")
} else {
c.sem = semaphore.NewWeighted(int64(c.cfg.MaxConcurrentRequests))
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Max concurrent requests: %v", c.cfg.MaxConcurrentRequests))
}
})
func (c *Client) sendRequest(method string, payload interface{}) error {
// defer + recover to catch panic if output doesn't respond
defer func(c *Client) {
if err := recover(); err != nil {
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:connectionrefused"})
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprint(err))
log.Printf("[ERROR] : %v - %s", c.OutputType, err)
}
}(c)
body := new(bytes.Buffer)
var reader io.Reader = body
switch v := payload.(type) {
switch payload.(type) {
case influxdbPayload:
fmt.Fprintf(body, "%v", payload)
if c.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : %v", body))
log.Printf("[DEBUG] : %v payload : %v\n", c.OutputType, body)
}
case spyderbatPayload:
zipper := gzip.NewWriter(body)
if err := json.NewEncoder(zipper).Encode(payload); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %s", c.OutputType, err)
}
zipper.Close()
if c.Config.Debug {
debugBody := new(bytes.Buffer)
if err := json.NewEncoder(debugBody).Encode(payload); err == nil {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : %v", body))
log.Printf("[DEBUG] : %v payload : %v\n", c.OutputType, debugBody)
}
}
case io.Reader:
reader = v
case []byte:
reader = bytes.NewBuffer(v)
default:
if err := json.NewEncoder(body).Encode(payload); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %s", c.OutputType, err)
}
if c.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : %v", body))
log.Printf("[DEBUG] : %v payload : %v\n", c.OutputType, body)
}
}
if c.EnableCompression {
data, err := compressData(reader)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Failed to compress data: %v", err))
return err
}
reader = bytes.NewBuffer(data)
}
client := c.httpClient()
var req *http.Request
var err error
if method == "GET" {
req, err = http.NewRequest(method, c.EndpointURL.String(), nil)
} else {
req, err = http.NewRequest(method, c.EndpointURL.String(), reader)
}
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
return err
}
req.Header.Set(ContentTypeHeaderKey, c.ContentType)
req.Header.Set(UserAgentHeaderKey, UserAgentHeaderValue)
if c.EnableCompression {
req.Header.Set("Content-Encoding", "gzip")
req.Header.Set("Accept-Encoding", "gzip")
}
// Call request options functions
// Allows the clients to adjust request as needed
for _, opt := range opts {
opt(req)
}
// Using the background context for now
// TODO: Eventually pass the proper context to sendRequest, add pass it to NewRequest call as well
// in order to make the requests cancellable
ctx := context.Background()
err = c.sem.Acquire(ctx, 1)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
return err
}
defer c.sem.Release(1)
resp, err := client.Do(req)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:connectionrefused"})
return err
}
defer resp.Body.Close()
// Clear out headers - they will be set for the next request.
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:" + strings.ToLower(http.StatusText(resp.StatusCode))})
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent: //200, 201, 202, 204
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v OK (%v)", method, resp.StatusCode))
ot := c.OutputType
logResponse := ot == Kubeless || ot == Openfaas || ot == Fission
if responseBody != nil || logResponse {
s := c.getInlinedBodyAsString(resp)
if responseBody != nil {
// In some cases now we need to capture the response on 200
// For example the Elasticsearch output bulk request that returns 200
// even when some items in the bulk failed
*responseBody = s
}
if logResponse {
utils.Log(utils.InfoLvl, ot, fmt.Sprintf("Function Response : %s", s))
}
}
return nil
case http.StatusBadRequest: //400
msg := c.getInlinedBodyAsString(resp)
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrHeaderMissing, resp.StatusCode, msg))
if msg != "" {
return errors.New(msg)
}
return ErrHeaderMissing
case http.StatusUnauthorized: //401
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrClientAuthenticationError, resp.StatusCode, c.getInlinedBodyAsString(resp)))
return ErrClientAuthenticationError
case http.StatusForbidden: //403
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrForbidden, resp.StatusCode, c.getInlinedBodyAsString(resp)))
return ErrForbidden
case http.StatusNotFound: //404
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrNotFound, resp.StatusCode, c.getInlinedBodyAsString(resp)))
return ErrNotFound
case http.StatusUnprocessableEntity: //422
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrUnprocessableEntityError, resp.StatusCode, c.getInlinedBodyAsString(resp)))
return ErrUnprocessableEntityError
case http.StatusTooManyRequests: //429
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrTooManyRequest, resp.StatusCode, c.getInlinedBodyAsString(resp)))
return ErrTooManyRequest
case http.StatusInternalServerError: //500
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v)", ErrTooManyRequest, resp.StatusCode))
return ErrInternalServer
case http.StatusBadGateway: //502
msg := c.getInlinedBodyAsString(resp)
fmt.Println(msg)
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v)", ErrTooManyRequest, resp.StatusCode))
return ErrBadGateway
default:
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("unexpected Response (%v)", resp.StatusCode))
return errors.New(resp.Status)
}
}
// httpClient returns http client.
// It returns the cached client if it was successfully configured before, for compatibility.
// It returns misconfigured client as before if some of the configuration steps failed.
// It was only logging the failures in it's original implementation, so keeping it the same.
func (c *Client) httpClient() *http.Client {
c.mx.Lock()
defer c.mx.Unlock()
if c.httpcli != nil {
return c.httpcli
}
customTransport, err := c.configureTransport()
client := &http.Client{
Transport: customTransport,
}
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
} else {
c.httpcli = client // cache the client instance for future http calls
}
return client
}
// configureTransport configure http transport
// This preserves the previous behavior where it only logged errors, but returned misconfigured transport in case of errors
func (c *Client) configureTransport() (*http.Transport, error) {
customTransport := http.DefaultTransport.(*http.Transport).Clone()
if customTransport.TLSClientConfig == nil {
@ -478,12 +236,12 @@ func (c *Client) configureTransport() (*http.Transport, error) {
if c.Config.TLSClient.CaCertFile != "" {
caCert, err := os.ReadFile(c.Config.TLSClient.CaCertFile)
if err != nil {
return customTransport, err
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
}
customTransport.TLSClientConfig.RootCAs.AppendCertsFromPEM(caCert)
}
if c.cfg.MutualTLS {
if c.MutualTLSEnabled {
// Load client cert
var MutualTLSClientCertPath, MutualTLSClientKeyPath, MutualTLSClientCaCertPath string
if c.Config.MutualTLSClient.CertFile != "" {
@ -503,23 +261,110 @@ func (c *Client) configureTransport() (*http.Transport, error) {
}
cert, err := tls.LoadX509KeyPair(MutualTLSClientCertPath, MutualTLSClientKeyPath)
if err != nil {
return customTransport, err
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
}
// Load CA cert
caCert, err := os.ReadFile(MutualTLSClientCaCertPath)
if err != nil {
return customTransport, err
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
}
customTransport.TLSClientConfig.RootCAs.AppendCertsFromPEM(caCert)
customTransport.TLSClientConfig.Certificates = []tls.Certificate{cert}
} else {
// With MutualTLS enabled, the check cert flag is ignored
if !c.cfg.CheckCert {
if !c.CheckCert {
customTransport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true, // #nosec G402 This is only set as a result of explicit configuration
}
}
}
return customTransport, nil
client := &http.Client{
Transport: customTransport,
}
req := new(http.Request)
var err error
if method == "GET" {
req, err = http.NewRequest(method, c.EndpointURL.String(), nil)
} else {
req, err = http.NewRequest(method, c.EndpointURL.String(), body)
}
if err != nil {
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
}
req.Header.Add(ContentTypeHeaderKey, c.ContentType)
req.Header.Add(UserAgentHeaderKey, UserAgentHeaderValue)
for _, headerObj := range c.HeaderList {
req.Header.Set(headerObj.Key, headerObj.Value)
}
resp, err := client.Do(req)
if err != nil {
c.HeaderList = []Header{}
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:connectionrefused"})
return err
}
defer resp.Body.Close()
// Clear out headers - they will be set for the next request.
c.HeaderList = []Header{}
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:" + strings.ToLower(http.StatusText(resp.StatusCode))})
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent: //200, 201, 202, 204
log.Printf("[INFO] : %v - %v OK (%v)\n", c.OutputType, method, resp.StatusCode)
if ot := c.OutputType; ot == Kubeless || ot == Openfaas || ot == Fission {
log.Printf("[INFO] : %v - Function Response : %s\n", ot, getInlinedBodyAsString(resp))
}
return nil
case http.StatusBadRequest: //400
msg := getInlinedBodyAsString(resp)
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrHeaderMissing, resp.StatusCode, msg)
if msg != "" {
return fmt.Errorf(msg)
}
return ErrHeaderMissing
case http.StatusUnauthorized: //401
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrClientAuthenticationError, resp.StatusCode, getInlinedBodyAsString(resp))
return ErrClientAuthenticationError
case http.StatusForbidden: //403
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrForbidden, resp.StatusCode, getInlinedBodyAsString(resp))
return ErrForbidden
case http.StatusNotFound: //404
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrNotFound, resp.StatusCode, getInlinedBodyAsString(resp))
return ErrNotFound
case http.StatusUnprocessableEntity: //422
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrUnprocessableEntityError, resp.StatusCode, getInlinedBodyAsString(resp))
return ErrUnprocessableEntityError
case http.StatusTooManyRequests: //429
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrTooManyRequest, resp.StatusCode, getInlinedBodyAsString(resp))
return ErrTooManyRequest
case http.StatusInternalServerError: //500
log.Printf("[ERROR] : %v - %v (%v)\n", c.OutputType, ErrTooManyRequest, resp.StatusCode)
return ErrInternalServer
case http.StatusBadGateway: //502
log.Printf("[ERROR] : %v - %v (%v)\n", c.OutputType, ErrTooManyRequest, resp.StatusCode)
return ErrBadGateway
default:
log.Printf("[ERROR] : %v - unexpected Response (%v)\n", c.OutputType, resp.StatusCode)
return errors.New(resp.Status)
}
}
// BasicAuth adds an HTTP Basic Authentication compliant header to the Client.
func (c *Client) BasicAuth(username, password string) {
// Check out RFC7617 for the specifics on this code.
// https://datatracker.ietf.org/doc/html/rfc7617
// This might break I18n, but we can cross that bridge when we come to it.
userPass := username + ":" + password
b64UserPass := base64.StdEncoding.EncodeToString([]byte(userPass))
c.AddHeader(AuthorizationHeaderKey, "Basic "+b64UserPass)
}
// AddHeader adds an HTTP Header to the Client.
func (c *Client) AddHeader(key, value string) {
c.HeaderList = append(c.HeaderList, Header{Key: key, Value: value})
}

View File

@ -43,11 +43,11 @@ func TestNewClient(t *testing.T) {
PromStats: promStats,
}
testClientOutput := Client{OutputType: "test", EndpointURL: u, cfg: types.CommonConfig{CheckCert: true}, ContentType: "application/json; charset=utf-8", Config: config, Stats: stats, PromStats: promStats}
_, err := NewClient("test", "localhost/%*$¨^!/:;", types.CommonConfig{CheckCert: true}, *initClientArgs)
testClientOutput := Client{OutputType: "test", EndpointURL: u, MutualTLSEnabled: false, CheckCert: true, HeaderList: []Header{}, ContentType: "application/json; charset=utf-8", Config: config, Stats: stats, PromStats: promStats}
_, err := NewClient("test", "localhost/%*$¨^!/:;", false, true, *initClientArgs)
require.NotNil(t, err)
nc, err := NewClient("test", "http://localhost", types.CommonConfig{CheckCert: true}, *initClientArgs)
nc, err := NewClient("test", "http://localhost", false, true, *initClientArgs)
require.Nil(t, err)
require.Equal(t, &testClientOutput, nc)
}
@ -91,7 +91,7 @@ func TestPost(t *testing.T) {
Stats: &types.Statistics{},
PromStats: &types.PromStatistics{},
}
nc, err := NewClient("", ts.URL+i, types.CommonConfig{CheckCert: true}, *initClientArgs)
nc, err := NewClient("", ts.URL+i, false, true, *initClientArgs)
require.Nil(t, err)
require.NotEmpty(t, nc)
@ -111,13 +111,13 @@ func TestAddHeader(t *testing.T) {
Stats: &types.Statistics{},
PromStats: &types.PromStatistics{},
}
nc, err := NewClient("", ts.URL, types.CommonConfig{CheckCert: true}, *initClientArgs)
nc, err := NewClient("", ts.URL, false, true, *initClientArgs)
require.Nil(t, err)
require.NotEmpty(t, nc)
nc.Post("", func(req *http.Request) {
req.Header.Set(headerKey, headerVal)
})
nc.AddHeader(headerKey, headerVal)
nc.Post("")
}
func TestAddBasicAuth(t *testing.T) {
@ -167,13 +167,13 @@ func TestAddBasicAuth(t *testing.T) {
Stats: &types.Statistics{},
PromStats: &types.PromStatistics{},
}
nc, err := NewClient("", ts.URL, types.CommonConfig{CheckCert: true}, *initClientArgs)
nc, err := NewClient("", ts.URL, false, true, *initClientArgs)
require.Nil(t, err)
require.NotEmpty(t, nc)
nc.Post("", func(req *http.Request) {
req.SetBasicAuth(username, password)
})
nc.BasicAuth(username, password)
nc.Post("")
}
func TestHeadersResetAfterReq(t *testing.T) {
@ -188,17 +188,17 @@ func TestHeadersResetAfterReq(t *testing.T) {
Stats: &types.Statistics{},
PromStats: &types.PromStatistics{},
}
nc, err := NewClient("", ts.URL, types.CommonConfig{CheckCert: true}, *initClientArgs)
nc, err := NewClient("", ts.URL, false, true, *initClientArgs)
require.Nil(t, err)
require.NotEmpty(t, nc)
nc.Post("", func(req *http.Request) {
req.Header.Set(headerKey, headerVal)
})
nc.AddHeader(headerKey, headerVal)
nc.Post("", func(req *http.Request) {
req.Header.Set(headerKey, headerVal)
})
nc.Post("")
nc.AddHeader(headerKey, headerVal)
nc.Post("")
}
func TestMutualTlsPost(t *testing.T) {
@ -239,7 +239,7 @@ func TestMutualTlsPost(t *testing.T) {
Stats: &types.Statistics{},
PromStats: &types.PromStatistics{},
}
nc, err := NewClient("", server.URL+Status200, types.CommonConfig{MutualTLS: true, CheckCert: true}, *initClientArgs)
nc, err := NewClient("", server.URL+Status200, true, true, *initClientArgs)
require.Nil(t, err)
require.NotEmpty(t, nc)

View File

@ -5,11 +5,8 @@ package outputs
import (
"bytes"
"fmt"
"net/http"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -19,7 +16,7 @@ import (
const (
tableSlideType = "table"
textSlideType = "text"
botName = "Falcosidekick"
botName = "Falco Sidekick"
)
// Table slide fields
@ -80,7 +77,7 @@ func newCliqPayload(falcopayload types.FalcoPayload, config *types.Configuration
if config.Cliq.MessageFormatTemplate != nil {
buf := &bytes.Buffer{}
if err := config.Cliq.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
utils.Log(utils.ErrorLvl, "Cliq", fmt.Sprintf("Error expanding Cliq message: %v", err))
log.Printf("[ERROR] : Cliq - Error expanding Cliq message %v", err)
} else {
payload.Text = buf.String()
@ -167,16 +164,15 @@ func newCliqPayload(falcopayload types.FalcoPayload, config *types.Configuration
func (c *Client) CliqPost(falcopayload types.FalcoPayload) {
c.Stats.Cliq.Add(Total, 1)
err := c.Post(newCliqPayload(falcopayload, c.Config), func(req *http.Request) {
req.Header.Set(ContentTypeHeaderKey, "application/json")
})
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader(ContentTypeHeaderKey, "application/json")
err := c.Post(newCliqPayload(falcopayload, c.Config))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:cliq", "status:error"})
c.Stats.Cliq.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "cliq", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cliq"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Cliq - %v\n", err)
return
}
@ -184,5 +180,4 @@ func (c *Client) CliqPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:cliq", "status:ok"})
c.Stats.Cliq.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "cliq", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cliq"), attribute.String("status", OK)).Inc()
}

View File

@ -16,7 +16,7 @@ func TestNewCliqPayload(t *testing.T) {
expectedOutput := cliqPayload{
Text: "\U000026AA Rule: Test rule Priority: Debug",
Bot: cliqBot{
Name: "Falcosidekick",
Name: "Falco Sidekick",
Image: DefaultIconURL,
},
Slides: []cliqSlide{

View File

@ -4,13 +4,10 @@ package outputs
import (
"context"
"fmt"
"go.opentelemetry.io/otel/attribute"
"log"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -22,11 +19,7 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
client, err := cloudevents.NewClientHTTP()
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:cloudevents", "status:error"})
c.Stats.CloudEvents.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "cloudevents", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cloudevents"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("NewDefaultClient : %v", err))
log.Printf("[ERROR] : CloudEvents - NewDefaultClient : %v\n", err)
return
}
c.CloudEventsClient = client
@ -40,7 +33,7 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
event.SetType("falco.rule.output.v1")
event.SetExtension("priority", falcopayload.Priority.String())
event.SetExtension("rule", falcopayload.Rule)
event.SetExtension("eventsource", falcopayload.Source)
event.SetExtension("event_source", falcopayload.Source)
if falcopayload.Hostname != "" {
event.SetExtension(Hostname, falcopayload.Hostname)
@ -52,16 +45,14 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
}
if err := event.SetData(cloudevents.ApplicationJSON, falcopayload); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to set data : %v", err))
log.Printf("[ERROR] : CloudEvents, failed to set data : %v\n", err)
}
if result := c.CloudEventsClient.Send(ctx, event); cloudevents.IsUndelivered(result) {
go c.CountMetric(Outputs, 1, []string{"output:cloudevents", "status:error"})
c.Stats.CloudEvents.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "cloudevents", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cloudevents"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v", result))
log.Printf("[ERROR] : CloudEvents - %v\n", result)
return
}
@ -69,7 +60,5 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:cloudevents", "status:ok"})
c.Stats.CloudEvents.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "cloudevents", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cloudevents"),
attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType, "Send OK")
log.Printf("[INFO] : CloudEvents - Send OK\n")
}

View File

@ -4,11 +4,9 @@ package outputs
import (
"fmt"
"log"
"sort"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -33,7 +31,7 @@ func newDatadogPayload(falcopayload types.FalcoPayload) datadogPayload {
tags = append(tags, fmt.Sprintf("%v:%v", i, falcopayload.OutputFields[i]))
}
tags = append(tags, "source:"+falcopayload.Source, "source:falco")
tags = append(tags, "source:"+falcopayload.Source)
if falcopayload.Hostname != "" {
tags = append(tags, Hostname+":"+falcopayload.Hostname)
}
@ -71,15 +69,11 @@ func (c *Client) DatadogPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:datadog", "status:error"})
c.Stats.Datadog.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "datadog", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadog"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Datadog - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:datadog", "status:ok"})
c.Stats.Datadog.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "datadog", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadog"),
attribute.String("status", OK)).Inc()
}

View File

@ -1,75 +0,0 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
package outputs
import (
"net/http"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
const (
// DatadogLogsPath is the path of Datadog's logs API
DatadogLogsPath string = "/api/v2/logs"
)
type datadogLogsPayload struct {
DDSource string `json:"ddsource,omitempty"`
Hostname string `json:"hostname,omitempty"`
Service string `json:"service,omitempty"`
Message string `json:"message,omitempty"`
DDTags string `json:"ddtags,omitempty"`
}
func newDatadogLogsPayload(falcopayload types.FalcoPayload, config *types.Configuration) datadogLogsPayload {
var d datadogLogsPayload
if len(falcopayload.Tags) != 0 {
sort.Strings(falcopayload.Tags)
d.DDTags = strings.Join(falcopayload.Tags, ",")
}
d.Hostname = falcopayload.Hostname
d.DDSource = strings.ToLower(Falco)
d.Message = falcopayload.String()
d.Service = config.DatadogLogs.Service
return d
}
// DatadogLogsPost posts logs to Datadog
func (c *Client) DatadogLogsPost(falcopayload types.FalcoPayload) {
c.Stats.DatadogLogs.Add(Total, 1)
reqOpts := []RequestOptionFunc{
func(req *http.Request) {
if c.Config.DatadogLogs.APIKey != "" {
req.Header.Set("DD-API-KEY", c.Config.DatadogLogs.APIKey)
}
},
}
err := c.Post(newDatadogLogsPayload(falcopayload, c.Config), reqOpts...)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:datadoglogs", "status:error"})
c.Stats.DatadogLogs.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "datadoglogs", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadoglogs"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
return
}
go c.CountMetric(Outputs, 1, []string{"output:datadoglogs", "status:ok"})
c.Stats.DatadogLogs.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "datadoglogs", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadoglogs"),
attribute.String("status", OK)).Inc()
}

View File

@ -1,25 +0,0 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
package outputs
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"github.com/falcosecurity/falcosidekick/types"
)
func TestNewDatadogLogsPayload(t *testing.T) {
expectedOutput := `{"title":"Test rule","text":"This is a test from falcosidekick","alert_type":"info","source_type_name":"falco","tags":["proc.name:falcosidekick", "source:syscalls", "source:falco", "hostname:test-host", "example", "test"]}`
var f types.FalcoPayload
json.Unmarshal([]byte(falcoTestInput), &f)
s, _ := json.Marshal(newDatadogPayload(f))
var o1, o2 datadogLogsPayload
require.Nil(t, json.Unmarshal([]byte(expectedOutput), &o1))
require.Nil(t, json.Unmarshal(s, &o2))
require.Equal(t, o1, o2)
}

View File

@ -12,7 +12,7 @@ import (
)
func TestNewDatadogPayload(t *testing.T) {
expectedOutput := `{"title":"Test rule","text":"This is a test from falcosidekick","alert_type":"info","source_type_name":"falco","tags":["proc.name:falcosidekick", "source:syscalls", "source:falco", "hostname:test-host", "example", "test"]}`
expectedOutput := `{"title":"Test rule","text":"This is a test from falcosidekick","alert_type":"info","source_type_name":"falco","tags":["proc.name:falcosidekick", "source:syscalls", "hostname:test-host", "example", "test"]}`
var f types.FalcoPayload
json.Unmarshal([]byte(falcoTestInput), &f)
s, _ := json.Marshal(newDatadogPayload(f))

View File

@ -4,12 +4,10 @@ package outputs
import (
"fmt"
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -107,9 +105,7 @@ func (c *Client) DiscordPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:discord", "status:error"})
c.Stats.Discord.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "discord", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "discord"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Discord - %v\n", err)
return
}
@ -117,6 +113,4 @@ func (c *Client) DiscordPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:discord", "status:ok"})
c.Stats.Discord.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "discord", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "discord"),
attribute.String("status", OK)).Inc()
}

View File

@ -3,14 +3,11 @@
package outputs
import (
"net/http"
"log"
"regexp"
"strconv"
"time"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -117,22 +114,20 @@ func (c *Client) DynatracePost(falcopayload types.FalcoPayload) {
c.ContentType = DynatraceContentType
err := c.Post(newDynatracePayload(falcopayload).Payload, func(req *http.Request) {
req.Header.Set("Authorization", "Api-Token "+c.Config.Dynatrace.APIToken)
})
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader("Authorization", "Api-Token "+c.Config.Dynatrace.APIToken)
err := c.Post(newDynatracePayload(falcopayload).Payload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:dynatrace", "status:error"})
c.Stats.Dynatrace.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "dynatrace", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dynatrace"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Dynatrace - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:dynatrace", "status:ok"})
c.Stats.Dynatrace.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "dynatrace", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dynatrace"),
attribute.String("status", OK)).Inc()
}

View File

@ -7,9 +7,9 @@ import (
"strconv"
"testing"
"github.com/stretchr/testify/require"
"github.com/falcosecurity/falcosidekick/types"
"github.com/stretchr/testify/require"
)
func TestNewDynatracePayload(t *testing.T) {

View File

@ -3,19 +3,14 @@
package outputs
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"log"
"net/url"
"regexp"
"strings"
"time"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/batcher"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -24,217 +19,111 @@ type eSPayload struct {
Timestamp time.Time `json:"@timestamp"`
}
type esResponse struct {
type mappingError struct {
Error struct {
RootCause []struct {
Type string `json:"type"`
Reason string `json:"reason"`
} `json:"root_cause"`
Type string `json:"type"`
Reason string `json:"reason"`
} `json:"error"`
Status int `json:"status"`
}
type esBulkResponse struct {
Errors bool `json:"errors"`
Items []esItemResponse `json:"items"`
}
type esItemResponse struct {
Create esResponse `json:"create"`
}
func NewElasticsearchClient(params types.InitClientArgs) (*Client, error) {
esCfg := params.Config.Elasticsearch
endpointUrl := fmt.Sprintf("%s/%s/%s", esCfg.HostPort, esCfg.Index, esCfg.Type)
c, err := NewClient("Elasticsearch", endpointUrl, esCfg.CommonConfig, params)
if err != nil {
return nil, err
}
if esCfg.Batching.Enabled {
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Batching enabled: %v max bytes, %v interval", esCfg.Batching.BatchSize, esCfg.Batching.FlushInterval))
callbackFn := func(falcoPayloads []types.FalcoPayload, data []byte) {
go c.elasticsearchPost("", data, falcoPayloads...)
}
c.batcher = batcher.New(
batcher.WithBatchSize(esCfg.Batching.BatchSize),
batcher.WithFlushInterval(esCfg.Batching.FlushInterval),
batcher.WithMarshal(c.marshalESBulkPayload),
batcher.WithCallback(callbackFn),
)
}
if esCfg.EnableCompression {
c.EnableCompression = true
utils.Log(utils.InfoLvl, c.OutputType, "Compression enabled")
}
return c, nil
}
func (c *Client) ElasticsearchPost(falcopayload types.FalcoPayload) {
if c.Config.Elasticsearch.Batching.Enabled {
c.batcher.Push(falcopayload)
return
}
payload, err := c.marshalESPayload(falcopayload)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Failed to marshal payload: %v", err))
}
c.elasticsearchPost(c.getIndex(), payload, falcopayload)
}
var esReasonMappingFieldsRegex *regexp.Regexp = regexp.MustCompile(`\[\w+(\.\w+)+\]`)
// ElasticsearchPost posts event to Elasticsearch
func (c *Client) elasticsearchPost(index string, payload []byte, falcoPayloads ...types.FalcoPayload) {
sz := int64(len(falcoPayloads))
c.Stats.Elasticsearch.Add(Total, sz)
func (c *Client) ElasticsearchPost(falcopayload types.FalcoPayload) {
c.Stats.Elasticsearch.Add(Total, 1)
current := time.Now()
var eURL string
if index == "" {
eURL = c.Config.Elasticsearch.HostPort + "/_bulk"
} else {
eURL = c.Config.Elasticsearch.HostPort + "/" + index + "/" + c.Config.Elasticsearch.Type
switch c.Config.Elasticsearch.Suffix {
case None:
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "/" + c.Config.Elasticsearch.Type
case "monthly":
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "-" + current.Format("2006.01") + "/" + c.Config.Elasticsearch.Type
case "annually":
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "-" + current.Format("2006") + "/" + c.Config.Elasticsearch.Type
default:
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "-" + current.Format("2006.01.02") + "/" + c.Config.Elasticsearch.Type
}
endpointURL, err := url.Parse(eURL)
if err != nil {
c.setElasticSearchErrorMetrics(sz)
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
c.setElasticSearchErrorMetrics()
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
return
}
reqOpts := []RequestOptionFunc{
// Set request headers
func(req *http.Request) {
if c.Config.Elasticsearch.ApiKey != "" {
req.Header.Set("Authorization", "APIKey "+c.Config.Elasticsearch.ApiKey)
}
if c.Config.Elasticsearch.Username != "" && c.Config.Elasticsearch.Password != "" {
req.SetBasicAuth(c.Config.Elasticsearch.Username, c.Config.Elasticsearch.Password)
}
for i, j := range c.Config.Elasticsearch.CustomHeaders {
req.Header.Set(i, j)
}
},
// Set the final endpointURL
func(req *http.Request) {
// Append pipeline parameter to the URL if configured
if c.Config.Elasticsearch.Pipeline != "" {
query := endpointURL.Query()
query.Set("pipeline", c.Config.Elasticsearch.Pipeline)
endpointURL.RawQuery = query.Encode()
}
// Set request URL
req.URL = endpointURL
},
c.EndpointURL = endpointURL
if c.Config.Elasticsearch.Username != "" && c.Config.Elasticsearch.Password != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.BasicAuth(c.Config.Elasticsearch.Username, c.Config.Elasticsearch.Password)
}
var response string
if c.Config.Elasticsearch.Batching.Enabled {
// Use PostWithResponse call when batching is enabled in order to capture response body on 200
res, err := c.PostWithResponse(payload, reqOpts...)
if err != nil {
response = err.Error()
} else {
response = res
}
} else {
// Use regular Post call, this avoid parsing response on http status 200
err = c.Post(payload, reqOpts...)
if err != nil {
response = err.Error()
for i, j := range c.Config.Elasticsearch.CustomHeaders {
c.AddHeader(i, j)
}
payload := eSPayload{FalcoPayload: falcopayload, Timestamp: falcopayload.Time}
if c.Config.Elasticsearch.FlattenFields || c.Config.Elasticsearch.CreateIndexTemplate {
for i, j := range payload.OutputFields {
payload.OutputFields[strings.ReplaceAll(i, ".", "_")] = j
delete(payload.OutputFields, i)
}
}
if response != "" {
if c.Config.Elasticsearch.Batching.Enabled {
var resp esBulkResponse
if err2 := json.Unmarshal([]byte(response), &resp); err2 != nil {
c.setElasticSearchErrorMetrics(sz)
err = c.Post(payload)
if err != nil {
var mappingErr mappingError
if err2 := json.Unmarshal([]byte(err.Error()), &mappingErr); err2 != nil {
c.setElasticSearchErrorMetrics()
return
}
if mappingErr.Error.Type == "document_parsing_exception" {
reg := regexp.MustCompile(`\[\w+(\.\w+)+\]`)
k := reg.FindStringSubmatch(mappingErr.Error.Reason)
if len(k) == 0 {
c.setElasticSearchErrorMetrics()
return
}
if len(resp.Items) != len(falcoPayloads) {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("mismatched %v responses with %v request payloads", len(resp.Items), len(falcoPayloads)))
c.setElasticSearchErrorMetrics(sz)
if !strings.Contains(k[0], "output_fields") {
c.setElasticSearchErrorMetrics()
return
}
// Check errors. Not using the mapping errors retry approach for batched/bulk requests
// Only mark set the errors and stats
if resp.Errors {
failed := int64(0)
for _, item := range resp.Items {
switch item.Create.Status {
case http.StatusOK, http.StatusCreated:
default:
failed++
}
s := strings.ReplaceAll(k[0], "[output_fields.", "")
s = strings.ReplaceAll(s, "]", "")
for i := range payload.OutputFields {
if strings.HasPrefix(i, s) {
delete(payload.OutputFields, i)
}
c.setElasticSearchErrorMetrics(failed)
// Set success sz that is reported at the end of this function
sz -= failed
}
} else {
// Slightly refactored the original approach to mapping errors, but logic is still the same
// The Request is retried only once without the field that can't be mapped.
// One of the problems with this approach is that if the mapping has two "unmappable" fields
// only the first one is returned with the error and removed from the retried request.
// Do we need to retry without the field? Do we need to keep retrying and removing fields until it succeeds?
var resp esResponse
if err2 := json.Unmarshal([]byte(response), &resp); err2 != nil {
c.setElasticSearchErrorMetrics(sz)
fmt.Println(payload.OutputFields)
log.Printf("[INFO] : %v - %v\n", c.OutputType, "attempt to POST again the payload without the wrong field")
err = c.Post(payload)
if err != nil {
c.setElasticSearchErrorMetrics()
return
}
payload := falcoPayloads[0]
if resp.Error.Type == "document_parsing_exception" {
k := esReasonMappingFieldsRegex.FindStringSubmatch(resp.Error.Reason)
if len(k) == 0 {
c.setElasticSearchErrorMetrics(sz)
return
}
if !strings.Contains(k[0], "output_fields") {
c.setElasticSearchErrorMetrics(sz)
return
}
s := strings.ReplaceAll(k[0], "[output_fields.", "")
s = strings.ReplaceAll(s, "]", "")
for i := range payload.OutputFields {
if strings.HasPrefix(i, s) {
delete(payload.OutputFields, i)
}
}
utils.Log(utils.InfoLvl, c.OutputType, "attempt to POST again the payload without the wrong field")
err = c.Post(payload, reqOpts...)
if err != nil {
c.setElasticSearchErrorMetrics(sz)
return
}
}
}
}
// Setting the success status
go c.CountMetric(Outputs, sz, []string{"output:elasticsearch", "status:ok"})
c.Stats.Elasticsearch.Add(OK, sz)
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": OK}).Add(float64(sz))
c.OTLPMetrics.Outputs.With(attribute.String("destination", "elasticsearch"),
attribute.String("status", OK)).Inc()
go c.CountMetric(Outputs, 1, []string{"output:elasticsearch", "status:ok"})
c.Stats.Elasticsearch.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": OK}).Inc()
}
func (c *Client) ElasticsearchCreateIndexTemplate(config types.ElasticsearchOutputConfig) error {
d := c
indexExists, err := c.isIndexTemplateExist(config)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
return err
}
if indexExists {
utils.Log(utils.InfoLvl, c.OutputType, "Index template already exists")
log.Printf("[INFO] : %v - %v\n", c.OutputType, "Index template already exists")
return nil
}
@ -248,16 +137,16 @@ func (c *Client) ElasticsearchCreateIndexTemplate(config types.ElasticsearchOutp
m = strings.ReplaceAll(m, "${REPLICAS}", fmt.Sprintf("%v", config.NumberOfReplicas))
j := make(map[string]interface{})
if err := json.Unmarshal([]byte(m), &j); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
return err
}
// create the index template by PUT
if err := d.Put(j); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
if d.Put(j) != nil {
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
return err
}
utils.Log(utils.InfoLvl, c.OutputType, "Index template created")
log.Printf("[INFO] : %v - %v\n", c.OutputType, "Index template created")
return nil
}
@ -278,63 +167,8 @@ func (c *Client) isIndexTemplateExist(config types.ElasticsearchOutputConfig) (b
}
// setElasticSearchErrorMetrics set the error stats
func (c *Client) setElasticSearchErrorMetrics(n int64) {
go c.CountMetric(Outputs, n, []string{"output:elasticsearch", "status:error"})
c.Stats.Elasticsearch.Add(Error, n)
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": Error}).Add(float64(n))
c.OTLPMetrics.Outputs.With(attribute.String("destination", "elasticsearch"),
attribute.String("status", Error)).Inc()
}
func (c *Client) buildESPayload(falcopayload types.FalcoPayload) eSPayload {
payload := eSPayload{FalcoPayload: falcopayload, Timestamp: falcopayload.Time}
if c.Config.Elasticsearch.FlattenFields || c.Config.Elasticsearch.CreateIndexTemplate {
for i, j := range payload.OutputFields {
if strings.Contains(i, ".") {
payload.OutputFields[strings.ReplaceAll(i, ".", "_")] = j
delete(payload.OutputFields, i)
}
}
}
return payload
}
func (c *Client) marshalESPayload(falcopayload types.FalcoPayload) ([]byte, error) {
return json.Marshal(c.buildESPayload(falcopayload))
}
func (c *Client) marshalESBulkPayload(falcopayload types.FalcoPayload) ([]byte, error) {
body, err := c.marshalESPayload(falcopayload)
if err != nil {
return nil, err
}
var buf bytes.Buffer
_, _ = buf.WriteString(`{"create":{`)
_, _ = buf.WriteString(`"_index":"`)
_, _ = buf.WriteString(c.getIndex())
_, _ = buf.WriteString("\"}}\n")
_, _ = buf.Write(body)
_, _ = buf.WriteRune('\n')
return buf.Bytes(), nil
}
func (c *Client) getIndex() string {
var index string
current := time.Now()
switch c.Config.Elasticsearch.Suffix {
case None:
index = c.Config.Elasticsearch.Index
case "monthly":
index = c.Config.Elasticsearch.Index + "-" + current.Format("2006.01")
case "annually":
index = c.Config.Elasticsearch.Index + "-" + current.Format("2006")
default:
index = c.Config.Elasticsearch.Index + "-" + current.Format("2006.01.02")
}
return index
func (c *Client) setElasticSearchErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:elasticsearch", "status:error"})
c.Stats.Elasticsearch.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": Error}).Inc()
}

View File

@ -6,17 +6,14 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"log"
"strconv"
"github.com/DataDog/datadog-go/statsd"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
@ -29,7 +26,7 @@ const ServicesPath = "/services/"
// NewFissionClient returns a new output.Client for accessing Kubernetes.
func NewFissionClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
if config.Fission.KubeConfig != "" {
restConfig, err := clientcmd.BuildConfigFromFlags("", config.Fission.KubeConfig)
if err != nil {
@ -44,11 +41,9 @@ func NewFissionClient(config *types.Configuration, stats *types.Statistics, prom
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
KubernetesClient: clientset,
cfg: config.Fission.CommonConfig,
}, nil
}
@ -61,7 +56,7 @@ func NewFissionClient(config *types.Configuration, stats *types.Statistics, prom
StatsdClient: statsdClient,
}
return NewClient(Fission, endpointUrl, config.Fission.CommonConfig, *initClientArgs)
return NewClient(Fission, endpointUrl, config.Fission.MutualTLS, config.Fission.CheckCert, *initClientArgs)
}
// FissionCall .
@ -84,32 +79,27 @@ func (c *Client) FissionCall(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:Fission", "status:error"})
c.Stats.Fission.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "Fission", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "Fission"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %s - %v\n", Fission, err.Error())
return
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Function Response : %v", string(rawbody)))
log.Printf("[INFO] : %s - Function Response : %v\n", Fission, string(rawbody))
} else {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader(FissionEventIDKey, uuid.New().String())
c.ContentType = FissionContentType
err := c.Post(falcopayload, func(req *http.Request) {
req.Header.Set(FissionEventIDKey, uuid.New().String())
})
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:Fission", "status:error"})
c.Stats.Fission.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "Fission", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "Fission"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %s - %v\n", Fission, err.Error())
return
}
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Call Function \"%v\" OK", c.Config.Fission.Function))
log.Printf("[INFO] : %s - Call Function \"%v\" OK\n", Fission, c.Config.Fission.Function)
go c.CountMetric(Outputs, 1, []string{"output:Fission", "status:ok"})
c.Stats.Fission.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "Fission", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "Fission"),
attribute.String("status", OK)).Inc()
}

View File

@ -8,29 +8,27 @@ import (
"encoding/json"
"errors"
"fmt"
"log"
"time"
gcpfunctions "cloud.google.com/go/functions/apiv1"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/storage"
"github.com/DataDog/datadog-go/statsd"
"github.com/googleapis/gax-go/v2"
"go.opentelemetry.io/otel/attribute"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
gcpfunctionspb "google.golang.org/genproto/googleapis/cloud/functions/v1"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"cloud.google.com/go/pubsub"
"github.com/DataDog/datadog-go/statsd"
"github.com/googleapis/gax-go/v2"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
"github.com/falcosecurity/falcosidekick/types"
)
// NewGCPClient returns a new output.Client for accessing the GCP API.
func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
base64decodedCredentialsData, err := base64.StdEncoding.DecodeString(config.GCP.Credentials)
if err != nil {
utils.Log(utils.ErrorLvl, "GCP", "Erroc.OutputTyper while base64-decoding GCP Credentials")
log.Printf("[ERROR] : GCP - %v\n", "Error while base64-decoding GCP Credentials")
return nil, errors.New("error while base64-decoding GCP Credentials")
}
@ -43,19 +41,19 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
if googleCredentialsData != "" {
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData), pubsub.ScopePubSub)
if err != nil {
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while loading GCP Credentials")
log.Printf("[ERROR] : GCP PubSub - %v\n", "Error while loading GCP Credentials")
return nil, errors.New("error while loading GCP Credentials")
}
pubSubClient, err := pubsub.NewClient(context.Background(), config.GCP.PubSub.ProjectID, option.WithCredentials(credentials))
if err != nil {
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while creating GCP PubSub Client")
log.Printf("[ERROR] : GCP PubSub - %v\n", "Error while creating GCP PubSub Client")
return nil, errors.New("error while creating GCP PubSub Client")
}
topicClient = pubSubClient.Topic(config.GCP.PubSub.Topic)
} else {
pubSubClient, err := pubsub.NewClient(context.Background(), config.GCP.PubSub.ProjectID)
if err != nil {
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while creating GCP PubSub Client")
log.Printf("[ERROR] : GCP PubSub - %v\n", "Error while creating GCP PubSub Client")
return nil, errors.New("error while creating GCP PubSub Client")
}
topicClient = pubSubClient.Topic(config.GCP.PubSub.Topic)
@ -63,14 +61,14 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
}
if config.GCP.Storage.Bucket != "" {
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData), storage.ScopeReadWrite)
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData))
if err != nil {
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while loading GCS Credentials")
log.Printf("[ERROR] : GCP Storage - %v\n", "Error while loading GCS Credentials")
return nil, errors.New("error while loading GCP Credentials")
}
storageClient, err = storage.NewClient(context.Background(), option.WithCredentials(credentials))
if err != nil {
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while creating GCP Storage Client")
log.Printf("[ERROR] : GCP Storage - %v\n", "Error while creating GCP Storage Client")
return nil, errors.New("error while creating GCP Storage Client")
}
}
@ -79,18 +77,18 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
if googleCredentialsData != "" {
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData), gcpfunctions.DefaultAuthScopes()...)
if err != nil {
utils.Log(utils.ErrorLvl, "GCP CloudFunctions", "Error while loading GCS Credentials")
log.Printf("[ERROR] : GCP CloudFunctions - %v\n", "Error while loading GCS Credentials")
return nil, errors.New("error while loading GCP Credentials")
}
cloudFunctionsClient, err = gcpfunctions.NewCloudFunctionsClient(context.Background(), option.WithCredentials(credentials))
if err != nil {
utils.Log(utils.ErrorLvl, "GCP CloudFunctions", "Error while creating GCP CloudFunctions Client")
log.Printf("[ERROR]: GCP CloudFunctions - %v\n", "Error while creating GCP CloudFunctions Client")
return nil, errors.New("error while creating GCP CloudFunctions Client")
}
} else {
cloudFunctionsClient, err = gcpfunctions.NewCloudFunctionsClient(context.Background())
if err != nil {
utils.Log(utils.ErrorLvl, "GCP CloudFunctions", "Error while creating GCP CloudFunctions Client")
log.Printf("[ERROR]: GCP CloudFunctions - %v\n", "Error while creating GCP CloudFunctions Client")
return nil, errors.New("error while creating GCP CloudFunctions Client")
}
}
@ -104,7 +102,6 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
GCPCloudFunctionsClient: cloudFunctionsClient,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -123,16 +120,15 @@ func (c *Client) GCPCallCloudFunction(falcopayload types.FalcoPayload) {
}, gax.WithGRPCOptions())
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" CloudFunctions", fmt.Sprintf("Error while calling CloudFunction: %v", err))
log.Printf("[ERROR] : GCPCloudFunctions - %v - %v\n", "Error while calling CloudFunction", err.Error())
c.Stats.GCPPubSub.Add(Error, 1)
go c.CountMetric("outputs", 1, []string{"output:gcpcloudfunctions", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcpcloudfunctions", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpcloudfunctions"),
attribute.String("status", Error)).Inc()
return
}
utils.Log(utils.ErrorLvl, c.OutputType+" CloudFunctions", fmt.Sprintf("Call CloudFunction OK (%v)", result.ExecutionId))
log.Printf("[INFO] : GCPCloudFunctions - Call CloudFunction OK (%v)\n", result.ExecutionId)
c.Stats.GCPCloudFunctions.Add(OK, 1)
go c.CountMetric("outputs", 1, []string{"output:gcpcloudfunctions", "status:ok"})
@ -151,21 +147,18 @@ func (c *Client) GCPPublishTopic(falcopayload types.FalcoPayload) {
result := c.GCPTopicClient.Publish(context.Background(), message)
id, err := result.Get(context.Background())
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+" PubSub", fmt.Sprintf("Error while publishing message: %v", err))
log.Printf("[ERROR] : GCPPubSub - %v - %v\n", "Error while publishing message", err.Error())
c.Stats.GCPPubSub.Add(Error, 1)
go c.CountMetric("outputs", 1, []string{"output:gcppubsub", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcppubsub", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcppubsub"),
attribute.String("status", Error)).Inc()
return
}
utils.Log(utils.InfoLvl, c.OutputType+" PubSub", fmt.Sprintf("Send to topic OK (%v)", id))
log.Printf("[INFO] : GCPPubSub - Send to topic OK (%v)\n", id)
c.Stats.GCPPubSub.Add(OK, 1)
go c.CountMetric("outputs", 1, []string{"output:gcppubsub", "status:ok"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcppubsub", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcppubsub"),
attribute.String("status", OK)).Inc()
}
// UploadGCS upload payload to
@ -184,37 +177,29 @@ func (c *Client) UploadGCS(falcopayload types.FalcoPayload) {
bucketWriter := c.GCSStorageClient.Bucket(c.Config.GCP.Storage.Bucket).Object(key).NewWriter(context.Background())
n, err := bucketWriter.Write(payload)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+"Storage", fmt.Sprintf("Error while Uploading message: %v", err))
log.Printf("[ERROR] : GCPStorage - %v - %v\n", "Error while Uploading message", err.Error())
c.Stats.GCPStorage.Add(Error, 1)
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
attribute.String("status", Error)).Inc()
return
}
if n == 0 {
utils.Log(utils.ErrorLvl, c.OutputType+"Storage", "Empty payload uploaded")
log.Printf("[ERROR] : GCPStorage - %v\n", "Empty payload uploaded")
c.Stats.GCPStorage.Add(Error, 1)
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
attribute.String("status", Error)).Inc()
return
}
if err := bucketWriter.Close(); err != nil {
utils.Log(utils.ErrorLvl, c.OutputType+"Storage", fmt.Sprintf("Error while closing the writer: %v", err))
log.Printf("[ERROR] : GCPStorage - %v - %v\n", "Error while closing the writer", err.Error())
c.Stats.GCPStorage.Add(Error, 1)
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
attribute.String("status", Error)).Inc()
return
}
utils.Log(utils.InfoLvl, c.OutputType+"Storage", "Upload to bucket OK")
log.Printf("[INFO] : GCPStorage - Upload to bucket OK \n")
c.Stats.GCPStorage.Add(OK, 1)
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:ok"})
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
attribute.String("status", OK)).Inc()
}

View File

@ -3,11 +3,8 @@
package outputs
import (
"net/http"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -15,18 +12,18 @@ import (
func (c *Client) CloudRunFunctionPost(falcopayload types.FalcoPayload) {
c.Stats.GCPCloudRun.Add(Total, 1)
err := c.Post(falcopayload, func(req *http.Request) {
if c.Config.GCP.CloudRun.JWT != "" {
req.Header.Set(AuthorizationHeaderKey, Bearer+" "+c.Config.GCP.CloudRun.JWT)
}
})
if c.Config.GCP.CloudRun.JWT != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader(AuthorizationHeaderKey, Bearer+" "+c.Config.GCP.CloudRun.JWT)
}
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:gcpcloudrun", "status:error"})
c.Stats.GCPCloudRun.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "gcpcloudrun", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpcloudrun"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType+"CloudRun", err.Error())
log.Printf("[ERROR] : GCPCloudRun - %v\n", err.Error())
return
}
@ -34,6 +31,4 @@ func (c *Client) CloudRunFunctionPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:gcpcloudrun", "status:ok"})
c.Stats.GCPCloudRun.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "gcpcloudrun", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpcloudrun"),
attribute.String("status", OK)).Inc()
}

View File

@ -4,13 +4,10 @@ package outputs
import (
"bytes"
"fmt"
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -49,7 +46,7 @@ func newGooglechatPayload(falcopayload types.FalcoPayload, config *types.Configu
if config.Googlechat.MessageFormatTemplate != nil {
buf := &bytes.Buffer{}
if err := config.Googlechat.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
utils.Log(utils.ErrorLvl, "GoogleChat", fmt.Sprintf("Error expanding Google Chat message: %v", err))
log.Printf("[ERROR] : GoogleChat - Error expanding Google Chat message %v", err)
} else {
messageText = buf.String()
}
@ -110,15 +107,11 @@ func (c *Client) GooglechatPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:googlechat", "status:error"})
c.Stats.GoogleChat.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "googlechat", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "googlechat"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : GoogleChat - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:googlechat", "status:ok"})
c.Stats.GoogleChat.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "googlechat", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "googlechat"),
attribute.String("status", OK)).Inc()
}

View File

@ -5,13 +5,10 @@ package outputs
import (
"bytes"
"encoding/json"
"net/http"
"log"
"strings"
textTemplate "text/template"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -76,7 +73,7 @@ func newGotifyPayload(falcopayload types.FalcoPayload, config *types.Configurati
err = ttmpl.Execute(&outtext, falcopayload)
}
if err != nil {
utils.Log(utils.ErrorLvl, "Gotify", err.Error())
log.Printf("[ERROR] : Gotify - %v\n", err)
return g
}
@ -96,14 +93,16 @@ func newGotifyPayload(falcopayload types.FalcoPayload, config *types.Configurati
func (c *Client) GotifyPost(falcopayload types.FalcoPayload) {
c.Stats.Gotify.Add(Total, 1)
err := c.Post(newGotifyPayload(falcopayload, c.Config), func(req *http.Request) {
if c.Config.Gotify.Token != "" {
req.Header.Set("X-Gotify-Key", c.Config.Gotify.Token)
}
})
if c.Config.Gotify.Token != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader("X-Gotify-Key", c.Config.Gotify.Token)
}
err := c.Post(newGotifyPayload(falcopayload, c.Config))
if err != nil {
c.setGotifyErrorMetrics()
utils.Log(utils.ErrorLvl, "Gotify", err.Error())
log.Printf("[ERROR] : Gotify - %v\n", err)
return
}
@ -111,7 +110,6 @@ func (c *Client) GotifyPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:gotify", "status:ok"})
c.Stats.Gotify.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "gotify", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gotify"), attribute.String("status", OK)).Inc()
}
// setGotifyErrorMetrics set the error stats
@ -119,6 +117,4 @@ func (c *Client) setGotifyErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:gotify", "status:error"})
c.Stats.Gotify.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "gotify", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gotify"),
attribute.String("status", Error)).Inc()
}

View File

@ -4,11 +4,8 @@ package outputs
import (
"fmt"
"net/http"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -81,54 +78,47 @@ func newGrafanaOnCallPayload(falcopayload types.FalcoPayload) grafanaOnCallPaylo
func (c *Client) GrafanaPost(falcopayload types.FalcoPayload) {
c.Stats.Grafana.Add(Total, 1)
c.ContentType = GrafanaContentType
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader("Authorization", Bearer+" "+c.Config.Grafana.APIKey)
for i, j := range c.Config.Grafana.CustomHeaders {
c.AddHeader(i, j)
}
err := c.Post(newGrafanaPayload(falcopayload, c.Config), func(req *http.Request) {
req.Header.Set("Authorization", Bearer+" "+c.Config.Grafana.APIKey)
for i, j := range c.Config.Grafana.CustomHeaders {
req.Header.Set(i, j)
}
})
err := c.Post(newGrafanaPayload(falcopayload, c.Config))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:grafana", "status:error"})
c.Stats.Grafana.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "grafana", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafana"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Grafana - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:grafana", "status:ok"})
c.Stats.Grafana.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "grafana", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafana"),
attribute.String("status", OK)).Inc()
}
// GrafanaOnCallPost posts event to grafana onCall
func (c *Client) GrafanaOnCallPost(falcopayload types.FalcoPayload) {
c.Stats.GrafanaOnCall.Add(Total, 1)
c.ContentType = GrafanaContentType
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
for i, j := range c.Config.GrafanaOnCall.CustomHeaders {
c.AddHeader(i, j)
}
err := c.Post(newGrafanaOnCallPayload(falcopayload), func(req *http.Request) {
for i, j := range c.Config.GrafanaOnCall.CustomHeaders {
req.Header.Set(i, j)
}
})
err := c.Post(newGrafanaOnCallPayload(falcopayload))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:grafanaoncall", "status:error"})
c.Stats.Grafana.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "grafanaoncall", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafanaoncall"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Grafana OnCall - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:grafanaoncall", "status:ok"})
c.Stats.Grafana.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "grafanaoncall", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafanaoncall"),
attribute.String("status", OK)).Inc()
}

View File

@ -3,12 +3,9 @@
package outputs
import (
"net/http"
"log"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -43,20 +40,20 @@ func newInfluxdbPayload(falcopayload types.FalcoPayload) influxdbPayload {
func (c *Client) InfluxdbPost(falcopayload types.FalcoPayload) {
c.Stats.Influxdb.Add(Total, 1)
err := c.Post(newInfluxdbPayload(falcopayload), func(req *http.Request) {
req.Header.Set("Accept", "application/json")
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader("Accept", "application/json")
if c.Config.Influxdb.Token != "" {
req.Header.Set("Authorization", "Token "+c.Config.Influxdb.Token)
}
})
if c.Config.Influxdb.Token != "" {
c.AddHeader("Authorization", "Token "+c.Config.Influxdb.Token)
}
err := c.Post(newInfluxdbPayload(falcopayload))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:influxdb", "status:error"})
c.Stats.Influxdb.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "influxdb", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "influxdb"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : InfluxDB - %v\n", err)
return
}
@ -64,6 +61,4 @@ func (c *Client) InfluxdbPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:influxdb", "status:ok"})
c.Stats.Influxdb.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "influxdb", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "influxdb"),
attribute.String("status", OK)).Inc()
}

View File

@ -8,6 +8,7 @@ import (
"crypto/x509"
"encoding/json"
"fmt"
"log"
"net"
"strings"
"time"
@ -16,16 +17,12 @@ import (
"github.com/segmentio/kafka-go"
"github.com/segmentio/kafka-go/sasl/plain"
"github.com/segmentio/kafka-go/sasl/scram"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
// NewKafkaClient returns a new output.Client for accessing the Apache Kafka.
func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
transport := &kafka.Transport{
Dial: (&net.Dialer{
@ -39,7 +36,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
caCertPool, err := x509.SystemCertPool()
if err != nil {
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("failed to initialize root CAs: %v", err))
log.Printf("[ERROR] : Kafka - failed to initialize root CAs: %v", err)
}
transport.TLS = &tls.Config{
@ -76,7 +73,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
}
}
if err != nil {
utils.Log(utils.ErrorLvl, "Kafka", err.Error())
log.Printf("[ERROR] : Kafka - %v\n", err)
return nil, err
}
@ -102,7 +99,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
case "round_robin":
kafkaWriter.Balancer = &kafka.RoundRobin{}
default:
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("unsupported balancer %q", config.Kafka.Balancer))
log.Printf("[ERROR] : Kafka - unsupported balancer %q\n", config.Kafka.Balancer)
return nil, fmt.Errorf("unsupported balancer %q", config.Kafka.Balancer)
}
@ -118,7 +115,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
case "NONE":
// leave as default, none
default:
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("unsupported compression %q", config.Kafka.Compression))
log.Printf("[ERROR] : Kafka - unsupported compression %q\n", config.Kafka.Compression)
return nil, fmt.Errorf("unsupported compression %q", config.Kafka.Compression)
}
@ -130,7 +127,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
case "NONE":
kafkaWriter.RequiredAcks = kafka.RequireNone
default:
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("unsupported required ACKs %q", config.Kafka.RequiredACKs))
log.Printf("[ERROR] : Kafka - unsupported required ACKs %q\n", config.Kafka.RequiredACKs)
return nil, fmt.Errorf("unsupported required ACKs %q", config.Kafka.RequiredACKs)
}
@ -139,7 +136,6 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
KafkaProducer: kafkaWriter,
@ -155,7 +151,7 @@ func (c *Client) KafkaProduce(falcopayload types.FalcoPayload) {
falcoMsg, err := json.Marshal(falcopayload)
if err != nil {
c.incrKafkaErrorMetrics(1)
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to marshalling message: %v", err))
log.Printf("[ERROR] : Kafka - %v - %v\n", "failed to marshalling message", err.Error())
return
}
@ -167,11 +163,11 @@ func (c *Client) KafkaProduce(falcopayload types.FalcoPayload) {
err = c.KafkaProducer.WriteMessages(context.Background(), kafkaMsg)
if err != nil {
c.incrKafkaErrorMetrics(1)
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Kafka - %v\n", err.Error())
return
} else {
c.incrKafkaSuccessMetrics(1)
utils.Log(utils.InfoLvl, c.OutputType, "Publish OK")
log.Printf("[INFO] : Kafka - Publish OK\n")
}
}
@ -179,10 +175,10 @@ func (c *Client) KafkaProduce(falcopayload types.FalcoPayload) {
func (c *Client) handleKafkaCompletion(messages []kafka.Message, err error) {
if err != nil {
c.incrKafkaErrorMetrics(len(messages))
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("(%d) %v", len(messages), err))
log.Printf("[ERROR] : Kafka (%d) - %v\n", len(messages), err)
} else {
c.incrKafkaSuccessMetrics(len(messages))
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("(%d) - Publish OK", len(messages)))
log.Printf("[INFO] : Kafka (%d) - Publish OK\n", len(messages))
}
}
@ -191,8 +187,6 @@ func (c *Client) incrKafkaSuccessMetrics(add int) {
go c.CountMetric("outputs", int64(add), []string{"output:kafka", "status:ok"})
c.Stats.Kafka.Add(OK, int64(add))
c.PromStats.Outputs.With(map[string]string{"destination": "kafka", "status": OK}).Add(float64(add))
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafka"),
attribute.String("status", OK)).Inc()
}
// incrKafkaErrorMetrics increments the error stats
@ -200,6 +194,4 @@ func (c *Client) incrKafkaErrorMetrics(add int) {
go c.CountMetric(Outputs, int64(add), []string{"output:kafka", "status:error"})
c.Stats.Kafka.Add(Error, int64(add))
c.PromStats.Outputs.With(map[string]string{"destination": "kafka", "status": Error}).Add(float64(add))
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafka"),
attribute.String("status", Error)).Inc()
}

View File

@ -6,10 +6,8 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -40,9 +38,7 @@ func (c *Client) KafkaRestPost(falcopayload types.FalcoPayload) {
if err != nil {
c.Stats.KafkaRest.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "kafkarest", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafkarest"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to marshalling message: %v", err))
log.Printf("[ERROR] : Kafka Rest - %v - %v\n", "failed to marshalling message", err.Error())
return
}
@ -59,9 +55,7 @@ func (c *Client) KafkaRestPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:kafkarest", "status:error"})
c.Stats.KafkaRest.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "kafkarest", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafkarest"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Kafka Rest - %v\n", err.Error())
return
}
@ -69,6 +63,4 @@ func (c *Client) KafkaRestPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:kafkarest", "status:ok"})
c.Stats.KafkaRest.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "kafkarest", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafkarest"),
attribute.String("status", OK)).Inc()
}

View File

@ -6,17 +6,14 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"log"
"strconv"
"github.com/DataDog/datadog-go/statsd"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
@ -29,8 +26,7 @@ const KubelessEventTypeValue = "falco"
const KubelessContentType = "application/json"
// NewKubelessClient returns a new output.Client for accessing Kubernetes.
func NewKubelessClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewKubelessClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
if config.Kubeless.Kubeconfig != "" {
restConfig, err := clientcmd.BuildConfigFromFlags("", config.Kubeless.Kubeconfig)
if err != nil {
@ -45,11 +41,9 @@ func NewKubelessClient(config *types.Configuration, stats *types.Statistics, pro
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
KubernetesClient: clientset,
cfg: config.Kubeless.CommonConfig,
}, nil
}
@ -59,11 +53,10 @@ func NewKubelessClient(config *types.Configuration, stats *types.Statistics, pro
Stats: stats,
DogstatsdClient: dogstatsdClient,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
}
return NewClient("Kubeless", endpointUrl, config.Kubeless.CommonConfig, *initClientArgs)
return NewClient("Kubeless", endpointUrl, config.Kubeless.MutualTLS, config.Kubeless.CheckCert, *initClientArgs)
}
// KubelessCall .
@ -85,34 +78,29 @@ func (c *Client) KubelessCall(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:kubeless", "status:error"})
c.Stats.Kubeless.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "kubeless", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kubeless"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Kubeless - %v\n", err)
return
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Function Response : %v", string(rawbody)))
log.Printf("[INFO] : Kubeless - Function Response : %v\n", string(rawbody))
} else {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader(KubelessEventIDKey, uuid.New().String())
c.AddHeader(KubelessEventTypeKey, KubelessEventTypeValue)
c.AddHeader(KubelessEventNamespaceKey, c.Config.Kubeless.Namespace)
c.ContentType = KubelessContentType
err := c.Post(falcopayload, func(req *http.Request) {
req.Header.Set(KubelessEventIDKey, uuid.New().String())
req.Header.Set(KubelessEventTypeKey, KubelessEventTypeValue)
req.Header.Set(KubelessEventNamespaceKey, c.Config.Kubeless.Namespace)
})
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:kubeless", "status:error"})
c.Stats.Kubeless.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "kubeless", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kubeless"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Kubeless - %v\n", err)
return
}
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Call Function \"%v\" OK", c.Config.Kubeless.Function))
log.Printf("[INFO] : Kubeless - Call Function \"%v\" OK\n", c.Config.Kubeless.Function)
go c.CountMetric(Outputs, 1, []string{"output:kubeless", "status:ok"})
c.Stats.Kubeless.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "kubeless", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kubeless"),
attribute.String("status", OK)).Inc()
}

View File

@ -1,149 +0,0 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
package outputs
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"os"
"regexp"
"github.com/DataDog/datadog-go/statsd"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
"github.com/telkomdev/go-stash"
)
/*
Logstash throws a jsonparse error if keys contain an index, e.g., "key[0]".
This function is meant to get rid of the index brackets format in favor of dots.
For the previous example, the "key[0]" value will be replaced by "key.0".
*/
func replaceKeysWithIndexes(data map[string]interface{}) map[string]interface{} {
newData := make(map[string]interface{})
re := regexp.MustCompile(`\[(\d+)\]`)
for key, value := range data {
newKey := re.ReplaceAllStringFunc(key, func(match string) string {
return "." + re.FindStringSubmatch(match)[1]
})
// Recursively process nested maps
if nestedMap, ok := value.(map[string]interface{}); ok {
newData[newKey] = replaceKeysWithIndexes(nestedMap)
} else {
newData[newKey] = value
}
}
return newData
}
func firstValid(paths []string) string {
for _, path := range paths {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
func NewLogstashClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
var tlsCfg *tls.Config
if mTLS := config.Logstash.MutualTLS; mTLS {
// Get certificates
var MutualTLSClientCertPath, MutualTLSClientKeyPath, MutualTLSClientCaCertPath string
MutualTLSClientCertPath = firstValid([]string{config.Logstash.CertFile, config.MutualTLSClient.CertFile, config.MutualTLSFilesPath + "/client.crt"})
MutualTLSClientKeyPath = firstValid([]string{config.Logstash.KeyFile, config.MutualTLSClient.KeyFile, config.MutualTLSFilesPath + "/client.key"})
MutualTLSClientCaCertPath = firstValid([]string{config.Logstash.CaCertFile, config.MutualTLSClient.CaCertFile, config.MutualTLSFilesPath + "/ca.crt"})
cert, err := tls.LoadX509KeyPair(MutualTLSClientCertPath, MutualTLSClientKeyPath)
if err != nil {
err = fmt.Errorf("failed to load logstash SSL certificate: %w", err)
utils.Log(utils.ErrorLvl, "Logstash", err.Error())
return nil, err
}
caCert, err := os.ReadFile(MutualTLSClientCaCertPath)
if err != nil {
err = fmt.Errorf("failed to load logstash SSL CA certificate: %w", err)
utils.Log(utils.ErrorLvl, "Logstash", err.Error())
return nil, err
}
// Configure TLS
pool, err := x509.SystemCertPool()
if err != nil {
pool = x509.NewCertPool()
}
tlsCfg = &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{cert},
RootCAs: pool,
}
tlsCfg.RootCAs.AppendCertsFromPEM(caCert)
} else {
// The check cert flag and mutual tls are mutually exclusive
if !config.Logstash.CheckCert {
tlsCfg = &tls.Config{
InsecureSkipVerify: true, // #nosec G402 This is only set as a result of explicit configuration
}
}
}
lClient, err := stash.Connect(config.Logstash.Address, config.Logstash.Port, stash.SetTLSConfig(tlsCfg), stash.SetTLS(config.Logstash.TLS || config.Logstash.MutualTLS))
if err != nil {
err = fmt.Errorf("misconfiguration, cannot connect to the logstash server: %w", err)
utils.Log(utils.ErrorLvl, "Logstash", err.Error())
return nil, err
}
utils.Log(utils.InfoLvl, "Logstash", "connected to logstash server")
return &Client{
OutputType: "Logstash",
Config: config,
LogstashClient: lClient,
Stats: stats,
PromStats: promStats,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
}
func (c *Client) LogstashPost(falcopayload types.FalcoPayload) {
status := OK
loglevel := utils.InfoLvl
c.Stats.Logstash.Add(Total, 1)
falcopayload.OutputFields = replaceKeysWithIndexes(falcopayload.OutputFields)
falcopayload.Tags = append(falcopayload.Tags, c.Config.Logstash.Tags...)
logstashPayload, err := json.Marshal(falcopayload)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to marshal falcopayload: %v", err))
c.Stats.Logstash.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "logstash", "status": Error}).Inc()
return
}
n, err := c.LogstashClient.Write(logstashPayload)
if err != nil {
status = Error
loglevel = utils.ErrorLvl
}
c.Stats.Logstash.Add(status, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "logstash", "status": status}).Inc()
go c.CountMetric(Outputs, 1, []string{"output:logstash", fmt.Sprintf("status:%v", status)})
utils.Log(loglevel, c.OutputType, fmt.Sprintf("output.logstash status=%v (%v)", status, n))
}

View File

@ -4,18 +4,13 @@ package outputs
import (
"fmt"
"net/http"
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
const LokiOut string = "Loki"
type lokiPayload struct {
Streams []lokiStream `json:"streams"`
}
@ -31,34 +26,22 @@ type lokiValue = []string
const LokiContentType = "application/json"
func newLokiPayload(falcopayload types.FalcoPayload, config *types.Configuration) lokiPayload {
s := make(map[string]string)
s := make(map[string]string, 3+len(falcopayload.OutputFields)+len(config.Loki.ExtraLabelsList)+len(falcopayload.Tags))
s["rule"] = falcopayload.Rule
s["source"] = falcopayload.Source
s["priority"] = falcopayload.Priority.String()
if k8sNs, ok := falcopayload.OutputFields["k8s.ns.name"].(string); ok {
s["k8s_ns_name"] = k8sNs
}
if k8sPod, ok := falcopayload.OutputFields["k8s.pod.name"].(string); ok {
s["k8s_pod_name"] = k8sPod
}
for i, j := range falcopayload.OutputFields {
switch v := j.(type) {
case string:
for k := range config.Customfields {
if i == k {
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", "_"), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
}
}
for k := range config.Templatedfields {
if i == k {
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", "_"), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", ""), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
}
}
for _, k := range config.Loki.ExtraLabelsList {
if i == k {
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", "_"), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", ""), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
}
}
default:
@ -75,36 +58,35 @@ func newLokiPayload(falcopayload types.FalcoPayload, config *types.Configuration
s["tags"] = strings.Join(falcopayload.Tags, ",")
}
var v string
if config.Loki.Format == "json" {
v = falcopayload.String()
} else {
v = falcopayload.Output
}
return lokiPayload{Streams: []lokiStream{
{
Stream: s,
Values: []lokiValue{[]string{fmt.Sprintf("%v", falcopayload.Time.UnixNano()), v}},
Values: []lokiValue{[]string{fmt.Sprintf("%v", falcopayload.Time.UnixNano()), falcopayload.Output}},
},
}}
}
func lokiConfigureTenant(cfg *types.Configuration, req *http.Request) {
if cfg.Loki.Tenant != "" {
req.Header.Set("X-Scope-OrgID", cfg.Loki.Tenant)
func (c *Client) configureTenant() {
if c.Config.Loki.Tenant != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader("X-Scope-OrgID", c.Config.Loki.Tenant)
}
}
func lokiConfigureAuth(cfg *types.Configuration, req *http.Request) {
if cfg.Loki.User != "" && cfg.Loki.APIKey != "" {
req.SetBasicAuth(cfg.Loki.User, cfg.Loki.APIKey)
func (c *Client) configureAuth() {
if c.Config.Loki.User != "" && c.Config.Loki.APIKey != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.BasicAuth(c.Config.Loki.User, c.Config.Loki.APIKey)
}
}
func lokiConfigureCustomHeaders(cfg *types.Configuration, req *http.Request) {
for i, j := range cfg.Loki.CustomHeaders {
req.Header.Set(i, j)
func (c *Client) configureCustomHeaders() {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
for i, j := range c.Config.Loki.CustomHeaders {
c.AddHeader(i, j)
}
}
@ -113,25 +95,20 @@ func (c *Client) LokiPost(falcopayload types.FalcoPayload) {
c.Stats.Loki.Add(Total, 1)
c.ContentType = LokiContentType
err := c.Post(newLokiPayload(falcopayload, c.Config), func(req *http.Request) {
lokiConfigureTenant(c.Config, req)
lokiConfigureAuth(c.Config, req)
lokiConfigureCustomHeaders(c.Config, req)
})
c.configureTenant()
c.configureAuth()
c.configureCustomHeaders()
err := c.Post(newLokiPayload(falcopayload, c.Config))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:loki", "status:error"})
c.Stats.Loki.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "loki", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "loki"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, LokiOut, err.Error())
log.Printf("[ERROR] : Loki - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:loki", "status:ok"})
c.Stats.Loki.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "loki", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "loki"),
attribute.String("status", OK)).Inc()
}

View File

@ -4,13 +4,10 @@ package outputs
import (
"bytes"
"fmt"
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -81,7 +78,7 @@ func newMattermostPayload(falcopayload types.FalcoPayload, config *types.Configu
if config.Mattermost.MessageFormatTemplate != nil {
buf := &bytes.Buffer{}
if err := config.Mattermost.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
utils.Log(utils.ErrorLvl, "Mattermost", fmt.Sprintf("Error expanding Mattermost message %v", err))
log.Printf("[ERROR] : Mattermost - Error expanding Mattermost message %v", err)
} else {
messageText = buf.String()
}
@ -134,9 +131,7 @@ func (c *Client) MattermostPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:mattermost", "status:error"})
c.Stats.Mattermost.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "mattermost", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mattermost"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Mattermost - %v\n", err)
return
}
@ -144,6 +139,4 @@ func (c *Client) MattermostPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:mattermost", "status:ok"})
c.Stats.Mattermost.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "mattermost", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mattermost"),
attribute.String("status", OK)).Inc()
}

View File

@ -4,21 +4,18 @@ package outputs
import (
"crypto/tls"
"fmt"
"log"
"github.com/DataDog/datadog-go/statsd"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
// NewMQTTClient returns a new output.Client for accessing Kubernetes.
func NewMQTTClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
options := mqtt.NewClientOptions()
options.AddBroker(config.MQTT.Broker)
@ -33,7 +30,7 @@ func NewMQTTClient(config *types.Configuration, stats *types.Statistics, promSta
}
}
options.OnConnectionLost = func(client mqtt.Client, err error) {
utils.Log(utils.ErrorLvl, "MQTT", fmt.Sprintf("Connection lost: %v", err))
log.Printf("[ERROR] : MQTT - Connection lost: %v\n", err.Error())
}
client := mqtt.NewClient(options)
@ -44,7 +41,6 @@ func NewMQTTClient(config *types.Configuration, stats *types.Statistics, promSta
MQTTClient: client,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -60,9 +56,7 @@ func (c *Client) MQTTPublish(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:mqtt", "status:error"})
c.Stats.MQTT.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "mqtt", "status": err.Error()}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mqtt"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %s - %v\n", MQTT, err.Error())
return
}
defer c.MQTTClient.Disconnect(100)
@ -70,15 +64,12 @@ func (c *Client) MQTTPublish(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:mqtt", "status:error"})
c.Stats.MQTT.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "mqtt", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mqtt"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %s - %v\n", MQTT, err.Error())
return
}
utils.Log(utils.InfoLvl, c.OutputType, "Message published")
log.Printf("[INFO] : %s - Message published\n", MQTT)
go c.CountMetric(Outputs, 1, []string{"output:mqtt", "status:ok"})
c.Stats.MQTT.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "mqtt", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mqtt"), attribute.String("status", OK)).Inc()
}

View File

@ -3,11 +3,8 @@
package outputs
import (
"net/http"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -15,22 +12,24 @@ import (
func (c *Client) N8NPost(falcopayload types.FalcoPayload) {
c.Stats.N8N.Add(Total, 1)
err := c.Post(falcopayload, func(req *http.Request) {
if c.Config.N8N.User != "" && c.Config.N8N.Password != "" {
req.SetBasicAuth(c.Config.N8N.User, c.Config.N8N.Password)
}
if c.Config.N8N.User != "" && c.Config.N8N.Password != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.BasicAuth(c.Config.N8N.User, c.Config.N8N.Password)
}
if c.Config.N8N.HeaderAuthName != "" && c.Config.N8N.HeaderAuthValue != "" {
req.Header.Set(c.Config.N8N.HeaderAuthName, c.Config.N8N.HeaderAuthValue)
}
})
if c.Config.N8N.HeaderAuthName != "" && c.Config.N8N.HeaderAuthValue != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader(c.Config.N8N.HeaderAuthName, c.Config.N8N.HeaderAuthValue)
}
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:n8n", "status:error"})
c.Stats.N8N.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "n8n", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "n8n"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : N8N - %v\n", err.Error())
return
}
@ -38,5 +37,4 @@ func (c *Client) N8NPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:n8n", "status:ok"})
c.Stats.N8N.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "n8n", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "n8n"), attribute.String("status", OK)).Inc()
}

View File

@ -4,60 +4,49 @@ package outputs
import (
"encoding/json"
"log"
"regexp"
"strings"
nats "github.com/nats-io/nats.go"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
var slugRegExp = regexp.MustCompile("[^a-z0-9]+")
const defaultNatsSubjects = "falco.<priority>.<rule>"
var slugRegularExpression = regexp.MustCompile("[^a-z0-9]+")
// NatsPublish publishes event to NATS
func (c *Client) NatsPublish(falcopayload types.FalcoPayload) {
c.Stats.Nats.Add(Total, 1)
subject := c.Config.Nats.SubjectTemplate
if len(subject) == 0 {
subject = defaultNatsSubjects
}
subject = strings.ReplaceAll(subject, "<priority>", strings.ToLower(falcopayload.Priority.String()))
subject = strings.ReplaceAll(subject, "<rule>", strings.Trim(slugRegExp.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_"))
nc, err := nats.Connect(c.EndpointURL.String())
if err != nil {
c.setNatsErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : NATS - %v\n", err)
return
}
defer nc.Flush()
defer nc.Close()
r := strings.Trim(slugRegularExpression.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_")
j, err := json.Marshal(falcopayload)
if err != nil {
c.setNatsErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
c.setStanErrorMetrics()
log.Printf("[ERROR] : STAN - %v\n", err.Error())
return
}
err = nc.Publish(subject, j)
err = nc.Publish("falco."+strings.ToLower(falcopayload.Priority.String())+"."+r, j)
if err != nil {
c.setNatsErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : NATS - %v\n", err)
return
}
go c.CountMetric("outputs", 1, []string{"output:nats", "status:ok"})
c.Stats.Nats.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "nats", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nats"), attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType, "Publish OK")
log.Printf("[INFO] : NATS - Publish OK\n")
}
// setNatsErrorMetrics set the error stats
@ -65,7 +54,4 @@ func (c *Client) setNatsErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:nats", "status:error"})
c.Stats.Nats.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "nats", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nats"),
attribute.String("status", Error)).Inc()
}

View File

@ -3,11 +3,9 @@
package outputs
import (
"net/http"
"encoding/base64"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -15,22 +13,24 @@ import (
func (c *Client) NodeRedPost(falcopayload types.FalcoPayload) {
c.Stats.NodeRed.Add(Total, 1)
err := c.Post(falcopayload, func(req *http.Request) {
if c.Config.NodeRed.User != "" && c.Config.NodeRed.Password != "" {
req.SetBasicAuth(c.Config.NodeRed.User, c.Config.NodeRed.Password)
}
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
if c.Config.NodeRed.User != "" && c.Config.NodeRed.Password != "" {
c.AddHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(c.Config.NodeRed.User+":"+c.Config.NodeRed.Password)))
}
if len(c.Config.NodeRed.CustomHeaders) != 0 {
for i, j := range c.Config.NodeRed.CustomHeaders {
req.Header.Set(i, j)
c.AddHeader(i, j)
}
})
}
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:nodered", "status:error"})
c.Stats.NodeRed.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "nodered", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nodered"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : NodeRed - %v\n", err.Error())
return
}
@ -38,6 +38,4 @@ func (c *Client) NodeRedPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:nodered", "status:ok"})
c.Stats.NodeRed.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "nodered", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nodered"),
attribute.String("status", OK)).Inc()
}

View File

@ -6,22 +6,19 @@ import (
"context"
"encoding/json"
"fmt"
"log"
"strconv"
"github.com/DataDog/datadog-go/statsd"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
// NewOpenfaasClient returns a new output.Client for accessing Kubernetes.
func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
if config.Openfaas.Kubeconfig != "" {
restConfig, err := clientcmd.BuildConfigFromFlags("", config.Openfaas.Kubeconfig)
if err != nil {
@ -36,7 +33,6 @@ func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, pro
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
KubernetesClient: clientset,
@ -49,11 +45,10 @@ func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, pro
Stats: stats,
DogstatsdClient: dogstatsdClient,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
}
return NewClient(Openfaas, endpointUrl, config.Openfaas.CommonConfig, *initClientArgs)
return NewClient(Openfaas, endpointUrl, config.Openfaas.MutualTLS, config.Openfaas.CheckCert, *initClientArgs)
}
// OpenfaasCall .
@ -73,28 +68,22 @@ func (c *Client) OpenfaasCall(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:openfaas", "status:error"})
c.Stats.Openfaas.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "openfaas", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openfaas"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", Openfaas, err)
return
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Function Response : %v", string(rawbody)))
log.Printf("[INFO] : %v - Function Response : %v\n", Openfaas, string(rawbody))
} else {
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:openfaas", "status:error"})
c.Stats.Openfaas.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "openfaas", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openfaas"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", Openfaas, err)
return
}
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Call Function \"%v\" OK", c.Config.Openfaas.FunctionName+"."+c.Config.Openfaas.FunctionNamespace))
log.Printf("[INFO] : %v - Call Function \"%v\" OK\n", Openfaas, c.Config.Openfaas.FunctionName+"."+c.Config.Openfaas.FunctionNamespace)
go c.CountMetric(Outputs, 1, []string{"output:openfaas", "status:ok"})
c.Stats.Openfaas.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "openfaas", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openfaas"),
attribute.String("status", OK)).Inc()
}

View File

@ -3,11 +3,8 @@
package outputs
import (
"net/http"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -15,18 +12,19 @@ import (
func (c *Client) OpenObservePost(falcopayload types.FalcoPayload) {
c.Stats.OpenObserve.Add(Total, 1)
err := c.Post(falcopayload, func(req *http.Request) {
if c.Config.OpenObserve.Username != "" && c.Config.OpenObserve.Password != "" {
req.SetBasicAuth(c.Config.OpenObserve.Username, c.Config.OpenObserve.Password)
}
if c.Config.OpenObserve.Username != "" && c.Config.OpenObserve.Password != "" {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.BasicAuth(c.Config.OpenObserve.Username, c.Config.OpenObserve.Password)
}
for i, j := range c.Config.OpenObserve.CustomHeaders {
req.Header.Set(i, j)
}
})
if err != nil {
for i, j := range c.Config.OpenObserve.CustomHeaders {
c.AddHeader(i, j)
}
if err := c.Post(falcopayload); err != nil {
c.setOpenObserveErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : OpenObserve - %v\n", err)
return
}
@ -34,8 +32,6 @@ func (c *Client) OpenObservePost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:openobserve", "status:ok"})
c.Stats.OpenObserve.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "openobserve", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openobserve"),
attribute.String("status", OK)).Inc()
}
// setOpenObserveErrorMetrics set the error stats
@ -43,6 +39,4 @@ func (c *Client) setOpenObserveErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:openobserve", "status:error"})
c.Stats.OpenObserve.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "openobserve", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openobserve"),
attribute.String("status", Error)).Inc()
}

View File

@ -3,12 +3,9 @@
package outputs
import (
"net/http"
"log"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -67,17 +64,16 @@ func newOpsgeniePayload(falcopayload types.FalcoPayload) opsgeniePayload {
// OpsgeniePost posts event to OpsGenie
func (c *Client) OpsgeniePost(falcopayload types.FalcoPayload) {
c.Stats.Opsgenie.Add(Total, 1)
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader(AuthorizationHeaderKey, "GenieKey "+c.Config.Opsgenie.APIKey)
err := c.Post(newOpsgeniePayload(falcopayload), func(req *http.Request) {
req.Header.Set(AuthorizationHeaderKey, "GenieKey "+c.Config.Opsgenie.APIKey)
})
err := c.Post(newOpsgeniePayload(falcopayload))
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:opsgenie", "status:error"})
c.Stats.Opsgenie.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "opsgenie", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "opsgenie"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : OpsGenie - %v\n", err)
return
}
@ -85,6 +81,4 @@ func (c *Client) OpsgeniePost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:opsgenie", "status:ok"})
c.Stats.Opsgenie.Add("ok", 1)
c.PromStats.Outputs.With(map[string]string{"destination": "opsgenie", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "opsgenie"),
attribute.String("status", OK)).Inc()
}

View File

@ -8,48 +8,37 @@ import (
"errors"
"fmt"
"hash/fnv"
"log"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/falcosecurity/falcosidekick/types"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
// Unit-testing helper
var getTracerProvider = otel.GetTracerProvider
func NewOtlpTracesClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewOtlpTracesClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
initClientArgs := &types.InitClientArgs{
Config: config,
Stats: stats,
DogstatsdClient: dogstatsdClient,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
}
otlpClient, err := NewClient("OTLP Traces", config.OTLP.Traces.Endpoint, types.CommonConfig{}, *initClientArgs)
otlpClient, err := NewClient("OTLPTraces", config.OTLP.Traces.Endpoint, false, false, *initClientArgs)
if err != nil {
return nil, err
}
ctx := context.Background()
shutDownFunc, err := OTLPTracesInit(otlpClient, config, ctx)
shutDownFunc, err := otlpInit(config)
if err != nil {
utils.Log(utils.ErrorLvl, "OTLP Traces", fmt.Sprintf("Error client creation: %v", err))
log.Printf("[ERROR] : OLTP Traces - Error client creation: %v\n", err)
return nil, err
}
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
utils.Log(utils.ErrorLvl, "OTLP", err.Error())
}))
utils.Log(utils.InfoLvl, "OTLP Traces", "Client created")
log.Printf("[INFO] : OTLP Traces - %+v\n", config.OTLP.Traces)
otlpClient.ShutDownFunc = shutDownFunc
return otlpClient, nil
}
@ -78,16 +67,16 @@ func (c *Client) newTrace(falcopayload types.FalcoPayload) (*trace.Span, error)
span.SetAttributes(attribute.String("source", falcopayload.Source))
span.SetAttributes(attribute.String("priority", falcopayload.Priority.String()))
span.SetAttributes(attribute.String("rule", falcopayload.Rule))
span.SetAttributes(attribute.String("output", falcopayload.Output))
span.SetAttributes(attribute.String("hostname", falcopayload.Hostname))
span.SetAttributes(attribute.StringSlice("tags", falcopayload.Tags))
for k, v := range falcopayload.OutputFields {
span.SetAttributes(attribute.String(k, fmt.Sprintf("%v", v)))
}
span.AddEvent(falcopayload.Output, trace.EventOption(trace.WithTimestamp(falcopayload.Time)))
span.End(trace.WithTimestamp(endTime))
if c.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("Payload generated successfully for traceid=%s", span.SpanContext().TraceID()))
log.Printf("[DEBUG] : OTLP Traces - payload generated successfully for traceid=%s", span.SpanContext().TraceID())
}
return &span, nil
@ -101,10 +90,17 @@ func (c *Client) OTLPTracesPost(falcopayload types.FalcoPayload) {
_, err := c.newTrace(falcopayload)
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error generating trace: %v", err))
go c.CountMetric(Outputs, 1, []string{"output:otlptraces", "status:error"})
c.Stats.OTLPTraces.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "otlptraces", "status": Error}).Inc()
log.Printf("[ERROR] : OLTP Traces - Error generating trace: %v\n", err)
return
}
utils.Log(utils.InfoLvl, c.OutputType, "Sending trace")
// Setting the success status
go c.CountMetric(Outputs, 1, []string{"output:otlptraces", "status:ok"})
c.Stats.OTLPTraces.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "otlptraces", "status": OK}).Inc()
log.Println("[INFO] : OLTP Traces - OK")
}
func generateTraceID(falcopayload types.FalcoPayload) (trace.TraceID, error) {

View File

@ -5,18 +5,17 @@ package outputs
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/falcosecurity/falcosidekick/types"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
otelresource "go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.23.1"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
const (
@ -32,25 +31,18 @@ func newResource() *otelresource.Resource {
)
}
func installTracesExportPipeline(config *types.Configuration, ctx context.Context) (func(context.Context) error, error) {
var exporter sdktrace.SpanExporter
var err error
switch config.OTLP.Traces.Protocol {
case GRPC:
opts := []otlptracegrpc.Option{}
if !config.OTLP.Traces.CheckCert {
opts = append(opts, otlptracegrpc.WithInsecure())
}
exporter, err = otlptracegrpc.New(ctx, opts...)
default:
opts := []otlptracehttp.Option{}
if !config.OTLP.Traces.CheckCert {
opts = append(opts, otlptracehttp.WithInsecure())
}
exporter, err = otlptracehttp.New(ctx, opts...)
func installExportPipeline(config *types.Configuration, ctx context.Context) (func(context.Context) error, error) {
var client otlptrace.Client
switch config.OTLP.Traces.CheckCert {
case true:
client = otlptracehttp.NewClient()
case false:
client = otlptracehttp.NewClient(otlptracehttp.WithInsecure())
}
exporter, err := otlptrace.New(ctx, client)
if err != nil {
return nil, fmt.Errorf("failed to create Traces exporter: %v", err)
return nil, fmt.Errorf("creating OTLP trace exporter: %w", err)
}
withBatcher := sdktrace.WithBatcher(exporter)
@ -66,7 +58,7 @@ func installTracesExportPipeline(config *types.Configuration, ctx context.Contex
return tracerProvider.Shutdown, nil
}
func OTLPTracesInit(client *Client, config *types.Configuration, ctx context.Context) (func(), error) {
func otlpInit(config *types.Configuration) (func(), error) {
// As config.OTLP.Traces fields may have been set by our own config (e.g. YAML),
// we need to set SDK environment variables accordingly.
os.Setenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", strings.TrimSpace(config.OTLP.Traces.Endpoint))
@ -84,14 +76,15 @@ func OTLPTracesInit(client *Client, config *types.Configuration, ctx context.Con
os.Setenv(i, j)
}
}
ctx := context.Background()
// Registers a tracer Provider globally.
shutdown, err := installTracesExportPipeline(config, ctx)
shutdown, err := installExportPipeline(config, ctx)
if err != nil {
return nil, err
}
shutDownCallback := func() {
if err := shutdown(ctx); err != nil {
utils.Log(utils.ErrorLvl, "OTLP Traces", err.Error())
log.Printf("[ERROR] : OLTP Traces - Error: %v\n", err)
}
}

View File

@ -1,121 +0,0 @@
package outputs
import (
"context"
"fmt"
"log/slog"
"os"
"strings"
"github.com/DataDog/datadog-go/statsd"
"go.opentelemetry.io/contrib/bridges/otelslog"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
sdklog "go.opentelemetry.io/otel/sdk/log"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
func NewOtlpLogsClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
initClientArgs := &types.InitClientArgs{
Config: config,
Stats: stats,
DogstatsdClient: dogstatsdClient,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
}
otlpClient, err := NewClient("OTLP Logs", config.OTLP.Logs.Endpoint, types.CommonConfig{}, *initClientArgs)
if err != nil {
return nil, err
}
ctx := context.Background()
loggerProvider, err := OTLPLogsInit(ctx, config)
if err != nil {
utils.Log(utils.ErrorLvl, "OTLP Logs", fmt.Sprintf("Error Logger creation: %v", err))
return nil, err
}
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
utils.Log(utils.ErrorLvl, "OTLP", err.Error())
}))
utils.Log(utils.InfoLvl, "OTLP Logs", "Client created")
otlpClient.ShutDownFunc = func() {
if err := loggerProvider.Shutdown(ctx); err != nil {
utils.Log(utils.ErrorLvl, "OTLP Logs", err.Error())
}
}
otlpClient.OTLPLogsLogger = otelslog.NewLogger("falco", otelslog.WithLoggerProvider(loggerProvider))
return otlpClient, nil
}
func OTLPLogsInit(ctx context.Context, config *types.Configuration) (*sdklog.LoggerProvider, error) {
// As config.OTLP.Logs fields may have been set by our own config (e.g. YAML),
// we need to set SDK environment variables accordingly.
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_ENDPOINT", strings.TrimSpace(config.OTLP.Logs.Endpoint))
if config.OTLP.Logs.Protocol != "" {
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_PROTOCOL", strings.TrimSpace(config.OTLP.Logs.Protocol))
}
if config.OTLP.Logs.Headers != "" {
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_HEADERS", strings.TrimSpace(config.OTLP.Logs.Headers))
}
if config.OTLP.Logs.Timeout != 0 {
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_TIMEOUT", fmt.Sprintf("%d", config.OTLP.Logs.Timeout))
}
if len(config.OTLP.Logs.ExtraEnvVars) != 0 {
for i, j := range config.OTLP.Logs.ExtraEnvVars {
os.Setenv(i, j)
}
}
var exporter sdklog.Exporter
var err error
switch config.OTLP.Logs.Protocol {
case GRPC:
opts := []otlploggrpc.Option{}
if !config.OTLP.Traces.CheckCert {
opts = append(opts, otlploggrpc.WithInsecure())
}
exporter, err = otlploggrpc.New(ctx, opts...)
default:
opts := []otlploghttp.Option{}
if !config.OTLP.Traces.CheckCert {
opts = append(opts, otlploghttp.WithInsecure())
}
exporter, err = otlploghttp.New(ctx, opts...)
}
if err != nil {
return nil, fmt.Errorf("failed to create Logs exporter: %v", err)
}
loggerProvider := sdklog.NewLoggerProvider(
sdklog.WithProcessor(
sdklog.NewBatchProcessor(exporter),
),
sdklog.WithResource(newResource()),
)
return loggerProvider, nil
}
func (c *Client) OTLPLogsPost(falcopayload types.FalcoPayload) {
c.OTLPLogsLogger.Info(
falcopayload.Output,
"priority", falcopayload.Priority.String(),
"source", falcopayload.Source,
"rule", falcopayload.Rule,
"hostname", falcopayload.Hostname,
"tags", strings.Join(falcopayload.Tags, ","),
slog.String("timestamp", falcopayload.Time.String()),
)
utils.Log(utils.InfoLvl, c.OutputType, "Sending log")
}

View File

@ -1,244 +0,0 @@
package otlp_metrics
import (
"context"
"fmt"
"os"
"strings"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
"go.opentelemetry.io/otel/metric"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
sdkresource "go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.23.1"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
)
const (
meterName = "falcosecurity.falco.otlpmetrics.meter"
serviceName = "falco"
serviceVersion = "0.1.0"
)
// TODO: move logging logic out of this context
// Config represents config parameters for OTLP Metrics
type Config struct {
Endpoint string
Protocol string
Timeout int64
Headers string
ExtraEnvVars map[string]string
CheckCert bool
MinimumPriority string
ExtraAttributes string
ExtraAttributesList []string
}
// InitProvider initializes a new OTLP Metrics Provider. It returns a function to shut down it.
func InitProvider(ctx context.Context, config *Config) (func(ctx context.Context) error, error) {
restoreEnvironment, err := initEnvironment(config)
if err != nil {
return nil, fmt.Errorf("failed to init environemt: %v", err)
}
defer restoreEnvironment()
shutdownFunc, err := initMeterProvider(ctx, config)
if err != nil {
return nil, fmt.Errorf("failed to create meter provider: %v", err)
}
if config.Endpoint != "" {
utils.Log(utils.InfoLvl, "OTLP Metrics", "Client created")
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
utils.Log(utils.ErrorLvl, "OTLP", err.Error())
}))
}
return shutdownFunc, nil
}
// initEnvironment initializes the proper environment variables to the corresponding config values. If an environment
// variable is already set, it's value is left uncharged. It returns a function to restore the previous environment
// context.
func initEnvironment(config *Config) (cleanupFunc func(), err error) {
cleanupFuncs := make([]func(), 0, 5)
defer func() {
if err != nil {
for _, fn := range cleanupFuncs {
fn()
}
}
}()
var unsetEnv func()
// As OTLPMetrics fields may have been set by our own config (e.g. YAML), We need to set SDK environment variables
// accordingly.
if endpoint := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"); endpoint == "" {
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT", strings.TrimSpace(config.Endpoint))
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, unsetEnv)
}
if protocol := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"); protocol == "" {
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_PROTOCOL", strings.TrimSpace(config.Protocol))
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, unsetEnv)
}
if headers := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_HEADERS"); headers == "" {
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_HEADERS", strings.TrimSpace(config.Headers))
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, unsetEnv)
}
if timeout := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"); timeout == "" {
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_TIMEOUT", fmt.Sprintf("%d", config.Timeout))
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, unsetEnv)
}
for envVar, value := range config.ExtraEnvVars {
if configValue := os.Getenv(envVar); configValue != "" {
continue
}
unsetEnv, err = setEnv(envVar, value)
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, unsetEnv)
}
return func() {
for _, fn := range cleanupFuncs {
fn()
}
}, nil
}
func setEnv(envVar, value string) (func(), error) {
if err := os.Setenv(envVar, value); err != nil {
return nil, fmt.Errorf("failed to set %v to %v: %v", envVar, value, err)
}
return func() {
if err := os.Setenv(envVar, ""); err != nil {
utils.Log(utils.ErrorLvl, "OTLP Metrics", fmt.Sprintf("Error unsetting env variable %q: %v", envVar, err))
}
}, nil
}
// initMeterProvider initializes an OTEL meter provider (and the corresponding exporter). It returns a function to shut
// down the meter provider.
func initMeterProvider(ctx context.Context, config *Config) (func(context.Context) error, error) {
var exporter sdkmetric.Exporter
var err error
switch config.Protocol {
case "grpc":
opts := []otlpmetricgrpc.Option{}
if !config.CheckCert {
opts = append(opts, otlpmetricgrpc.WithInsecure())
}
exporter, err = otlpmetricgrpc.New(ctx, opts...)
default:
opts := []otlpmetrichttp.Option{}
if !config.CheckCert {
opts = append(opts, otlpmetrichttp.WithInsecure())
}
exporter, err = otlpmetrichttp.New(ctx, opts...)
}
if err != nil {
return nil, fmt.Errorf("failed to create Logs exporter: %v", err)
}
res, err := sdkresource.New(ctx,
sdkresource.WithSchemaURL(semconv.SchemaURL),
sdkresource.WithAttributes(
semconv.ServiceName(serviceName),
semconv.ServiceVersion(serviceVersion),
),
)
if err != nil {
return nil, fmt.Errorf("failed to create resource: %v", err)
}
meterProvider := sdkmetric.NewMeterProvider(
sdkmetric.WithReader(sdkmetric.NewPeriodicReader(exporter)),
sdkmetric.WithResource(res),
)
otel.SetMeterProvider(meterProvider)
return meterProvider.Shutdown, nil
}
type Counter interface {
With(attributes ...attribute.KeyValue) Counter
Inc()
}
type OTLPMetrics struct {
Falco Counter
Inputs Counter
Outputs Counter
}
type counterInstrument struct {
name string
description string
supportedAttributes map[string]struct{}
attributes []attribute.KeyValue
}
func NewCounter(name string, description string, supportedAttributes []string) Counter {
counter := &counterInstrument{
name: name,
description: description,
supportedAttributes: make(map[string]struct{}),
}
for _, attr := range supportedAttributes {
counter.supportedAttributes[attr] = struct{}{}
}
return counter
}
func (c *counterInstrument) With(attributes ...attribute.KeyValue) Counter {
filteredAttributes := c.filterAttributes(attributes)
counter := &counterInstrument{
name: c.name,
supportedAttributes: c.supportedAttributes,
attributes: append(c.attributes, filteredAttributes...),
}
return counter
}
func (c *counterInstrument) filterAttributes(attributes []attribute.KeyValue) []attribute.KeyValue {
filteredAttributes := make([]attribute.KeyValue, 0, len(c.attributes))
for _, attr := range attributes {
if _, ok := c.supportedAttributes[string(attr.Key)]; ok {
filteredAttributes = append(filteredAttributes, attr)
}
}
return filteredAttributes
}
func (c *counterInstrument) Inc() {
meter := otel.Meter(meterName)
ruleCounter, err := meter.Int64Counter(c.name, metric.WithDescription(c.description))
if err != nil {
utils.Log(utils.ErrorLvl, "OTLP Metrics", fmt.Sprintf("Error generating metric: %v", err))
return
}
ruleCounter.Add(context.Background(), 1, metric.WithAttributes(c.attributes...))
}

View File

@ -6,9 +6,10 @@ import (
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/trace"
"github.com/falcosecurity/falcosidekick/types"
"go.opentelemetry.io/otel/trace"
)
func TestOtlpNewTrace(t *testing.T) {
@ -90,7 +91,7 @@ func TestOtlpNewTrace(t *testing.T) {
PromStats: promStats,
}
client, _ := NewClient("OTLP", "http://localhost:4317", types.CommonConfig{}, *initClientArgs)
client, _ := NewClient("OTLP", "http://localhost:4317", false, false, *initClientArgs)
// Test newTrace()
span, err := client.newTrace(c.fp)
require.Nil(t, err)

View File

@ -4,14 +4,13 @@ package outputs
import (
"context"
"log"
"sort"
"strings"
"time"
"github.com/PagerDuty/go-pagerduty"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -36,18 +35,14 @@ func (c *Client) PagerdutyPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:pagerduty", "status:error"})
c.Stats.Pagerduty.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "pagerduty", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "pagerduty"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : PagerDuty - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:pagerduty", "status:ok"})
c.Stats.Pagerduty.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "pagerduty", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "pagerduty"),
attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType, "Create Incident OK")
log.Printf("[INFO] : Pagerduty - Create Incident OK\n")
}
func createPagerdutyEvent(falcopayload types.FalcoPayload, config types.PagerdutyConfig) pagerduty.V2Event {

View File

@ -4,11 +4,12 @@ package outputs
import (
"context"
"fmt"
"log"
"os"
"github.com/DataDog/datadog-go/statsd"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/types"
corev1 "k8s.io/api/core/v1"
errorsv1 "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -17,10 +18,6 @@ import (
"k8s.io/client-go/tools/clientcmd"
wgpolicy "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/api/wgpolicyk8s.io/v1alpha2"
crd "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/generated/v1alpha2/clientset/versioned"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
type resource struct {
@ -33,18 +30,18 @@ const (
policyReportName string = "falco-policy-report"
policyReportSource string = "Falco"
updateStr string = "Update"
createStr string = "Create"
update string = "Update"
create string = "Create"
highStr wgpolicy.PolicyResultSeverity = "high"
lowStr wgpolicy.PolicyResultSeverity = "low"
mediumStr wgpolicy.PolicyResultSeverity = "medium"
infoStr wgpolicy.PolicyResultSeverity = "info"
criticalStr wgpolicy.PolicyResultSeverity = "critical"
high wgpolicy.PolicyResultSeverity = "high"
low wgpolicy.PolicyResultSeverity = "low"
medium wgpolicy.PolicyResultSeverity = "medium"
info wgpolicy.PolicyResultSeverity = "info"
critical wgpolicy.PolicyResultSeverity = "critical"
failStr wgpolicy.PolicyResult = "fail"
warnStr wgpolicy.PolicyResult = "warn"
skipStr wgpolicy.PolicyResult = "skip"
fail wgpolicy.PolicyResult = "fail"
warn wgpolicy.PolicyResult = "warn"
skip wgpolicy.PolicyResult = "skip"
k8sPodName string = "k8s.pod.name"
k8sNsName string = "k8s.ns.name"
@ -57,6 +54,28 @@ const (
var (
defaultNamespace string = "default"
// default policy report
defaultPolicyReport *wgpolicy.PolicyReport = &wgpolicy.PolicyReport{
ObjectMeta: metav1.ObjectMeta{
Name: policyReportName,
Labels: map[string]string{
"app.kubernetes.io/managed-by": "falcosidekick",
},
},
Summary: wgpolicy.PolicyReportSummary{},
}
// default cluster policy report
defaultClusterPolicyReport *wgpolicy.ClusterPolicyReport = &wgpolicy.ClusterPolicyReport{
ObjectMeta: metav1.ObjectMeta{
Name: clusterPolicyReportName,
Labels: map[string]string{
"app.kubernetes.io/managed-by": "falcosidekick",
},
},
Summary: wgpolicy.PolicyReportSummary{},
}
// used resources in the k8saudit ruleset
resourceMapping = map[string]resource{
"pods": {"v1", "Pod"},
@ -77,37 +96,12 @@ var (
}
)
func newPolicyReport() *wgpolicy.PolicyReport {
return &wgpolicy.PolicyReport{
ObjectMeta: metav1.ObjectMeta{
Name: policyReportName,
Labels: map[string]string{
"app.kubernetes.io/managed-by": "falcosidekick",
},
},
Summary: wgpolicy.PolicyReportSummary{},
}
}
func newClusterPolicyReport() *wgpolicy.ClusterPolicyReport {
return &wgpolicy.ClusterPolicyReport{
ObjectMeta: metav1.ObjectMeta{
Name: clusterPolicyReportName,
Labels: map[string]string{
"app.kubernetes.io/managed-by": "falcosidekick",
},
},
Summary: wgpolicy.PolicyReportSummary{},
}
}
func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
clientConfig, err := rest.InClusterConfig()
if err != nil {
clientConfig, err = clientcmd.BuildConfigFromFlags("", config.PolicyReport.Kubeconfig)
if err != nil {
utils.Log(utils.ErrorLvl, "PolicyReport", fmt.Sprintf("Unable to load kube config file: %v", err))
log.Printf("[ERROR] : PolicyReport - Unable to load kube config file: %v\n", err)
return nil, err
}
}
@ -123,7 +117,7 @@ func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics,
if config.PolicyReport.FalcoNamespace == "" {
dat, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
utils.Log(utils.ErrorLvl, "PolicyReport", fmt.Sprintf("Unable to get the Falcosidekick's namespace, '%v' used instead", defaultNamespace))
log.Printf("[ERROR] : PolicyReport - Unable to get the Falcosidekick's namespace, '%v' used instead\n", defaultNamespace)
} else {
defaultNamespace = string(dat)
}
@ -136,7 +130,6 @@ func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics,
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
KubernetesClient: clientset,
@ -161,14 +154,10 @@ func (c *Client) UpdateOrCreatePolicyReport(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:policyreport", "status:" + OK})
c.Stats.PolicyReport.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "policyreport", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "policyreport"),
attribute.String("status", OK)).Inc()
} else {
go c.CountMetric(Outputs, 1, []string{"output:policyreport", "status:" + Error})
c.Stats.PolicyReport.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "policyreport", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "policyreport"),
attribute.String("status", Error)).Inc()
}
}
@ -184,7 +173,7 @@ func newResult(falcopayload types.FalcoPayload) *wgpolicy.PolicyReportResult {
Rule: falcopayload.Rule,
Category: "SI - System and Information Integrity",
Source: policyReportSource,
Timestamp: metav1.Timestamp{Seconds: int64(falcopayload.Time.Second()), Nanos: int32(falcopayload.Time.Nanosecond())}, //nolint:gosec // disable G115
Timestamp: metav1.Timestamp{Seconds: int64(falcopayload.Time.Second()), Nanos: int32(falcopayload.Time.Nanosecond())},
Severity: mapSeverity(falcopayload),
Result: mapResult(falcopayload),
Description: falcopayload.Output,
@ -194,12 +183,12 @@ func newResult(falcopayload types.FalcoPayload) *wgpolicy.PolicyReportResult {
}
func (c *Client) createOrUpdatePolicyReport(result *wgpolicy.PolicyReportResult, namespace string) error {
action := updateStr
action := update
_, err := c.KubernetesClient.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
if err != nil {
if errorsv1.IsNotFound(err) {
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Can't find the namespace '%v', fallback to '%v'", namespace, defaultNamespace))
log.Printf("[INFO] : PolicyReport - Can't find the namespace '%v', fallback to '%v'\n", namespace, defaultNamespace)
namespace = defaultNamespace
result.Subjects[0].Namespace = defaultNamespace
}
@ -212,8 +201,8 @@ func (c *Client) createOrUpdatePolicyReport(result *wgpolicy.PolicyReportResult,
}
}
if policyr.Name == "" {
policyr = newPolicyReport()
action = createStr
policyr = defaultPolicyReport
action = create
}
policyr.Results = append(policyr.Results, *result)
@ -224,41 +213,41 @@ func (c *Client) createOrUpdatePolicyReport(result *wgpolicy.PolicyReportResult,
policyr.Summary = getSummary(policyr.Results)
if action == createStr {
if action == create {
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Create(context.Background(), policyr, metav1.CreateOptions{})
if err != nil {
if errorsv1.IsAlreadyExists(err) {
action = updateStr
action = update
policyr, err = c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Get(context.Background(), policyReportName, metav1.GetOptions{})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error with with the Policy Report %v in namespace %v: %v", policyReportName, namespace, err))
log.Printf("[ERROR] : PolicyReport - Error with with the Policy Report %v in namespace %v: %v\n", policyReportName, namespace, err)
return err
}
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Update(context.Background(), policyr, metav1.UpdateOptions{})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Policy Report %v in namespace %v: %v", action, policyReportName, namespace, err))
log.Printf("[ERROR] : PolicyReport - Can't %v the Policy Report %v in namespace %v: %v\n", action, policyReportName, namespace, err)
return err
}
} else {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Policy Report %v in namespace %v: %v", action, policyReportName, namespace, err))
log.Printf("[ERROR] : PolicyReport - Can't %v the Policy Report %v in namespace %v: %v\n", action, policyReportName, namespace, err)
return err
}
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v the Policy Report %v in namespace %v", action, policyReportName, namespace))
log.Printf("[INFO] : PolicyReport - %v the Policy Report %v in namespace %v\n", action, policyReportName, namespace)
return nil
} else {
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Update(context.Background(), policyr, metav1.UpdateOptions{})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Policy Report %v in namespace %v: %v", action, policyReportName, namespace, err))
log.Printf("[ERROR] : PolicyReport - Can't %v the Policy Report %v in namespace %v: %v\n", action, policyReportName, namespace, err)
return err
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v the Policy Report %v in namespace %v", action, policyReportName, namespace))
log.Printf("[INFO] : PolicyReport - %v the Policy Report %v in namespace %v\n", action, policyReportName, namespace)
return nil
}
}
func (c *Client) createOrUpdateClusterPolicyReport(result *wgpolicy.PolicyReportResult) error {
action := updateStr
action := update
cpolicyr, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Get(context.Background(), clusterPolicyReportName, metav1.GetOptions{})
if err != nil {
@ -266,9 +255,9 @@ func (c *Client) createOrUpdateClusterPolicyReport(result *wgpolicy.PolicyReport
return err
}
}
if cpolicyr.Name == "" {
cpolicyr = newClusterPolicyReport()
action = createStr
if cpolicyr == nil {
cpolicyr = defaultClusterPolicyReport
action = create
}
cpolicyr.Results = append(cpolicyr.Results, *result)
@ -279,35 +268,35 @@ func (c *Client) createOrUpdateClusterPolicyReport(result *wgpolicy.PolicyReport
cpolicyr.Summary = getSummary(cpolicyr.Results)
if action == createStr {
if action == create {
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Create(context.Background(), cpolicyr, metav1.CreateOptions{})
if err != nil {
if errorsv1.IsAlreadyExists(err) {
action = updateStr
action = update
cpolicyr, err = c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Get(context.Background(), policyReportName, metav1.GetOptions{})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error with with the Cluster Policy Report %v: %v", policyReportName, err))
log.Printf("[ERROR] : PolicyReport - Error with with the Cluster Policy Report %v: %v\n", policyReportName, err)
return err
}
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Update(context.Background(), cpolicyr, metav1.UpdateOptions{})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Cluster Policy Report %v: %v", action, policyReportName, err))
log.Printf("[ERROR] : PolicyReport - Can't %v the Cluster Policy Report %v: %v\n", action, policyReportName, err)
return err
}
} else {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Cluster Policy Report %v: %v", action, clusterPolicyReportName, err))
log.Printf("[ERROR] : PolicyReport - Can't %v the Cluster Policy Report %v: %v\n", action, clusterPolicyReportName, err)
return err
}
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v Cluster the Policy Report %v", action, policyReportName))
log.Printf("[INFO] : PolicyReport - %v Cluster the Policy Report %v\n", action, policyReportName)
return nil
} else {
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Update(context.Background(), cpolicyr, metav1.UpdateOptions{})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Cluster Policy Report %v: %v", action, clusterPolicyReportName, err))
log.Printf("[ERROR] : PolicyReport - Can't %v the Cluster Policy Report %v: %v\n", action, clusterPolicyReportName, err)
return err
}
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v the ClusterPolicy Report %v", action, policyReportName))
log.Printf("[INFO] : PolicyReport - %v the ClusterPolicy Report %v\n", action, policyReportName)
return nil
}
}
@ -333,25 +322,25 @@ func getSummary(results []wgpolicy.PolicyReportResult) wgpolicy.PolicyReportSumm
func mapResult(event types.FalcoPayload) wgpolicy.PolicyResult {
if event.Priority <= types.Notice {
return skipStr
return skip
} else if event.Priority == types.Warning {
return warnStr
return warn
} else {
return failStr
return fail
}
}
func mapSeverity(event types.FalcoPayload) wgpolicy.PolicyResultSeverity {
if event.Priority <= types.Informational {
return infoStr
return info
} else if event.Priority <= types.Notice {
return lowStr
return low
} else if event.Priority <= types.Warning {
return mediumStr
return medium
} else if event.Priority <= types.Error {
return highStr
return high
} else {
return criticalStr
return critical
}
}

View File

@ -4,11 +4,8 @@ package outputs
import (
"fmt"
"net/http"
"log"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -51,12 +48,12 @@ func (c *Client) checkQuickwitIndexAlreadyExists(args types.InitClientArgs) bool
config := args.Config.Quickwit
endpointUrl := fmt.Sprintf("%s/%s/indexes/%s/describe", config.HostPort, config.ApiEndpoint, config.Index)
quickwitCheckClient, err := NewClient("QuickwitCheckAlreadyExists", endpointUrl, config.CommonConfig, args)
quickwitCheckClient, err := NewClient("QuickwitCheckAlreadyExists", endpointUrl, config.MutualTLS, config.CheckCert, args)
if err != nil {
return false
}
if nil != quickwitCheckClient.Get() {
if nil != quickwitCheckClient.sendRequest("GET", "") {
return false
}
@ -71,7 +68,7 @@ func (c *Client) AutoCreateQuickwitIndex(args types.InitClientArgs) error {
}
endpointUrl := fmt.Sprintf("%s/%s/indexes", config.HostPort, config.ApiEndpoint)
quickwitInitClient, err := NewClient("QuickwitInit", endpointUrl, config.CommonConfig, args)
quickwitInitClient, err := NewClient("QuickwitInit", endpointUrl, config.MutualTLS, config.CheckCert, args)
if err != nil {
return err
}
@ -145,7 +142,7 @@ func (c *Client) AutoCreateQuickwitIndex(args types.InitClientArgs) error {
}
if args.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("mapping: %v", mapping))
log.Printf("[DEBUG] : Quickwit - mapping: %#v\n", mapping)
}
err = quickwitInitClient.Post(mapping)
@ -161,23 +158,25 @@ func (c *Client) AutoCreateQuickwitIndex(args types.InitClientArgs) error {
func (c *Client) QuickwitPost(falcopayload types.FalcoPayload) {
c.Stats.Quickwit.Add(Total, 1)
if c.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("ingesting payload: %v", falcopayload))
if len(c.Config.Quickwit.CustomHeaders) != 0 {
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
for i, j := range c.Config.Quickwit.CustomHeaders {
c.AddHeader(i, j)
}
}
err := c.Post(falcopayload, func(req *http.Request) {
for i, j := range c.Config.Quickwit.CustomHeaders {
req.Header.Set(i, j)
}
})
if c.Config.Debug {
log.Printf("[DEBUG] : Quickwit - ingesting payload: %v\n", falcopayload)
}
err := c.Post(falcopayload)
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:quickwit", "status:error"})
c.Stats.Quickwit.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "quickwit", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "quickwit"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Quickwit - %v\n", err.Error())
return
}
@ -185,6 +184,4 @@ func (c *Client) QuickwitPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:quickwit", "status:ok"})
c.Stats.Quickwit.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "quickwit", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "quickwit"),
attribute.String("status", OK)).Inc()
}

View File

@ -5,31 +5,26 @@ package outputs
import (
"encoding/json"
"errors"
"fmt"
"log"
"github.com/DataDog/datadog-go/statsd"
amqp "github.com/rabbitmq/amqp091-go"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
amqp "github.com/rabbitmq/amqp091-go"
)
// NewRabbitmqClient returns a new output.Client for accessing the RabbitmMQ API.
func NewRabbitmqClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewRabbitmqClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
var channel *amqp.Channel
if config.Rabbitmq.URL != "" && config.Rabbitmq.Queue != "" {
conn, err := amqp.Dial(config.Rabbitmq.URL)
if err != nil {
utils.Log(utils.ErrorLvl, "Rabbitmq", "Error while connecting rabbitmq")
log.Printf("[ERROR] : Rabbitmq - %v\n", "Error while connecting rabbitmq")
return nil, errors.New("error while connecting Rabbitmq")
}
ch, err := conn.Channel()
if err != nil {
utils.Log(utils.ErrorLvl, "Rabbitmq", "Error while creating rabbitmq channel")
log.Printf("[ERROR] : Rabbitmq Channel - %v\n", "Error while creating rabbitmq channel")
return nil, errors.New("error while creating rabbitmq channel")
}
channel = ch
@ -41,7 +36,6 @@ func NewRabbitmqClient(config *types.Configuration, stats *types.Statistics, pro
RabbitmqClient: channel,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -59,20 +53,16 @@ func (c *Client) Publish(falcopayload types.FalcoPayload) {
})
if err != nil {
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error while publishing message: %v", err))
log.Printf("[ERROR] : RabbitMQ - %v - %v\n", "Error while publishing message", err.Error())
c.Stats.Rabbitmq.Add(Error, 1)
go c.CountMetric("outputs", 1, []string{"output:rabbitmq", "status:error"})
c.PromStats.Outputs.With(map[string]string{"destination": "rabbitmq", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rabbitmq"),
attribute.String("status", Error)).Inc()
return
}
utils.Log(utils.InfoLvl, c.OutputType, "Message published OK")
log.Printf("[INFO] : RabbitMQ - Send to message OK \n")
c.Stats.Rabbitmq.Add(OK, 1)
go c.CountMetric("outputs", 1, []string{"output:rabbitmq", "status:ok"})
c.PromStats.Outputs.With(map[string]string{"destination": "rabbitmq", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rabbitmq"),
attribute.String("status", OK)).Inc()
}

View File

@ -5,29 +5,23 @@ package outputs
import (
"context"
"encoding/json"
"fmt"
"log"
"strings"
"github.com/DataDog/datadog-go/statsd"
"github.com/redis/go-redis/v9"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
"github.com/redis/go-redis/v9"
)
func (c *Client) ReportError(err error) {
go c.CountMetric(Outputs, 1, []string{"output:redis", "status:error"})
c.Stats.Redis.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "redis", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "redis"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Redis - %v\n", err)
}
func NewRedisClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
rClient := redis.NewClient(&redis.Options{
Addr: config.Redis.Address,
@ -37,9 +31,9 @@ func NewRedisClient(config *types.Configuration, stats *types.Statistics, promSt
// Ping the Redis server to check if it's running
pong, err := rClient.Ping(context.Background()).Result()
if err != nil {
utils.Log(utils.ErrorLvl, "Redis", fmt.Sprintf("Misconfiguration, cannot connect to the server: %v", err))
log.Printf("[ERROR] : Redis - Misconfiguration, cannot connect to the server %v\n", err)
}
utils.Log(utils.InfoLvl, "Redis", fmt.Sprintf("Connected to redis server: %v", pong))
log.Printf("[INFO] : Redis - Connected to redis server: %v\n", pong)
return &Client{
OutputType: "Redis",
@ -47,7 +41,6 @@ func NewRedisClient(config *types.Configuration, stats *types.Statistics, promSt
RedisClient: rClient,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -72,5 +65,4 @@ func (c *Client) RedisPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:redis", "status:ok"})
c.Stats.Redis.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "redis", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "redis"), attribute.String("status", OK)).Inc()
}

View File

@ -4,13 +4,10 @@ package outputs
import (
"bytes"
"fmt"
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -76,7 +73,7 @@ func newRocketchatPayload(falcopayload types.FalcoPayload, config *types.Configu
if config.Rocketchat.MessageFormatTemplate != nil {
buf := &bytes.Buffer{}
if err := config.Rocketchat.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
utils.Log(utils.ErrorLvl, "RocketChat", fmt.Sprintf("Error expanding RocketChat message: %v", err))
log.Printf("[ERROR] : RocketChat - Error expanding RocketChat message %v", err)
} else {
messageText = buf.String()
}
@ -130,9 +127,7 @@ func (c *Client) RocketchatPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:rocketchat", "status:error"})
c.Stats.Rocketchat.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "rocketchat", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rocketchat"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : RocketChat - %v\n", err.Error())
return
}
@ -140,6 +135,4 @@ func (c *Client) RocketchatPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:rocketchat", "status:ok"})
c.Stats.Rocketchat.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "rocketchat", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rocketchat"),
attribute.String("status", OK)).Inc()
}

View File

@ -4,13 +4,10 @@ package outputs
import (
"bytes"
"fmt"
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -106,7 +103,7 @@ func newSlackPayload(falcopayload types.FalcoPayload, config *types.Configuratio
if config.Slack.MessageFormatTemplate != nil {
buf := &bytes.Buffer{}
if err := config.Slack.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
utils.Log(utils.ErrorLvl, "Slack", fmt.Sprintf("Error expanding Slack message: %v", err))
log.Printf("[ERROR] : Slack - Error expanding Slack message %v", err)
} else {
messageText = buf.String()
}
@ -157,9 +154,7 @@ func (c *Client) SlackPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:slack", "status:error"})
c.Stats.Slack.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "slack", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "slack"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Slack - %v\n", err)
return
}
@ -167,5 +162,4 @@ func (c *Client) SlackPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:slack", "status:ok"})
c.Stats.Slack.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "slack", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "slack"), attribute.String("status", OK)).Inc()
}

View File

@ -5,8 +5,8 @@ package outputs
import (
"bytes"
"crypto/tls"
"fmt"
htmlTemplate "html/template"
"log"
"net"
"regexp"
"strconv"
@ -17,8 +17,6 @@ import (
sasl "github.com/emersion/go-sasl"
smtp "github.com/emersion/go-smtp"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
@ -33,11 +31,10 @@ type SMTPPayload struct {
}
// NewSMTPClient returns a new output.Client for accessing a SMTP server.
func NewSMTPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewSMTPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
reg := regexp.MustCompile(`.*:[0-9]+`)
if !reg.MatchString(config.SMTP.HostPort) {
utils.Log(utils.ErrorLvl, "SMTP", "Bad Host:Port")
log.Printf("[ERROR] : SMTP - Bad Host:Port\n")
return nil, ErrClientCreation
}
@ -46,7 +43,6 @@ func NewSMTPClient(config *types.Configuration, stats *types.Statistics, promSta
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -75,7 +71,7 @@ func newSMTPPayload(falcopayload types.FalcoPayload, config *types.Configuration
var outtext bytes.Buffer
err := ttmpl.Execute(&outtext, falcopayload)
if err != nil {
utils.Log(utils.ErrorLvl, "SMTP", err.Error())
log.Printf("[ERROR] : SMTP - %v\n", err)
return s
}
s.Body += outtext.String()
@ -91,7 +87,7 @@ func newSMTPPayload(falcopayload types.FalcoPayload, config *types.Configuration
var outhtml bytes.Buffer
err = htmpl.Execute(&outhtml, falcopayload)
if err != nil {
utils.Log(utils.ErrorLvl, "SMTP", err.Error())
log.Printf("[ERROR] : SMTP - %v\n", err)
return s
}
s.Body += outhtml.String()
@ -102,7 +98,7 @@ func newSMTPPayload(falcopayload types.FalcoPayload, config *types.Configuration
func (c *Client) ReportErr(message string, err error) {
go c.CountMetric("outputs", 1, []string{"output:smtp", "status:error"})
c.Stats.SMTP.Add(Error, 1)
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%s : %v", message, err))
log.Printf("[ERROR] : SMTP - %s : %v\n", message, err)
}
func (c *Client) GetAuth() (sasl.Client, error) {
@ -164,11 +160,11 @@ func (c *Client) SendMail(falcopayload types.FalcoPayload) {
body := sp.Subject + "\n" + sp.Body
if c.Config.Debug {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : \nServer: %v\n%v\n%v\nSubject: %v", c.Config.SMTP.HostPort, sp.From, sp.To, sp.Subject))
log.Printf("[DEBUG] : SMTP payload : \nServer: %v\n%v\n%v\nSubject: %v\n", c.Config.SMTP.HostPort, sp.From, sp.To, sp.Subject)
if c.Config.SMTP.AuthMechanism != "" {
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("SASL Auth : \nMechanisms: %v\nUser: %v\nToken: %v\nIdentity: %v\nTrace: %v", c.Config.SMTP.AuthMechanism, c.Config.SMTP.User, c.Config.SMTP.Token, c.Config.SMTP.Identity, c.Config.SMTP.Trace))
log.Printf("[DEBUG] : SMTP - SASL Auth : \nMechanisms: %v\nUser: %v\nToken: %v\nIdentity: %v\nTrace: %v\n", c.Config.SMTP.AuthMechanism, c.Config.SMTP.User, c.Config.SMTP.Token, c.Config.SMTP.Identity, c.Config.SMTP.Trace)
} else {
utils.Log(utils.DebugLvl, c.OutputType, "SASL Auth : Disabled")
log.Printf("[DEBUG] : SMTP - SASL Auth : Disabled\n")
}
}
@ -179,7 +175,7 @@ func (c *Client) SendMail(falcopayload types.FalcoPayload) {
return
}
utils.Log(utils.InfoLvl, c.OutputType, " SMTP - Sent OK\n")
log.Printf("[INFO] : SMTP - Sent OK\n")
go c.CountMetric("outputs", 1, []string{"output:smtp", "status:ok"})
c.Stats.SMTP.Add(OK, 1)
}

View File

@ -8,18 +8,15 @@ import (
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/google/uuid"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
"github.com/google/uuid"
)
const Falcosidekick_ string = "falcosidekick_"
@ -178,7 +175,7 @@ func newSpyderbatPayload(falcopayload types.FalcoPayload) (spyderbatPayload, err
MonotonicTime: time.Now().Nanosecond(),
OrcTime: nowTime,
Time: eventTime,
PID: int32(pid), //nolint:gosec // disable G115
PID: int32(pid),
Level: level,
Message: message,
Arguments: arguments,
@ -187,17 +184,17 @@ func newSpyderbatPayload(falcopayload types.FalcoPayload) (spyderbatPayload, err
}
func NewSpyderbatClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
hasSource, err := isSourcePresent(config)
if err != nil {
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
return nil, ErrClientCreation
}
if !hasSource {
if err := makeSource(config); err != nil {
if hasSource, err2 := isSourcePresent(config); err2 != nil || !hasSource {
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
return nil, ErrClientCreation
}
}
@ -206,51 +203,49 @@ func NewSpyderbatClient(config *types.Configuration, stats *types.Statistics, pr
source := Falcosidekick_ + config.Spyderbat.OrgUID
data_url, err := url.JoinPath(config.Spyderbat.APIUrl, APIv1Path+config.Spyderbat.OrgUID+SourcePath+source+"/data/sb-agent")
if err != nil {
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
return nil, ErrClientCreation
}
endpointURL, err := url.Parse(data_url)
if err != nil {
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
return nil, ErrClientCreation
}
return &Client{
OutputType: "Spyderbat",
EndpointURL: endpointURL,
cfg: types.CommonConfig{MutualTLS: false, CheckCert: true, MaxConcurrentRequests: 1},
ContentType: "application/ndjson",
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
OutputType: "Spyderbat",
EndpointURL: endpointURL,
MutualTLSEnabled: false,
CheckCert: true,
ContentType: "application/ndjson",
Config: config,
Stats: stats,
PromStats: promStats,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
}
func (c *Client) SpyderbatPost(falcopayload types.FalcoPayload) {
c.Stats.Spyderbat.Add(Total, 1)
c.httpClientLock.Lock()
defer c.httpClientLock.Unlock()
c.AddHeader("Authorization", "Bearer "+c.Config.Spyderbat.APIKey)
c.AddHeader("Content-Encoding", "gzip")
payload, err := newSpyderbatPayload(falcopayload)
if err == nil {
err = c.Post(payload, func(req *http.Request) {
req.Header.Set("Authorization", "Bearer "+c.Config.Spyderbat.APIKey)
req.Header.Set("Content-Encoding", "gzip")
})
err = c.Post(payload)
}
if err != nil {
go c.CountMetric(Outputs, 1, []string{"output:spyderbat", "status:error"})
c.Stats.Spyderbat.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "spyderbat", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "spyderbat"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
return
}
go c.CountMetric(Outputs, 1, []string{"output:spyderbat", "status:ok"})
c.Stats.Spyderbat.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "spyderbat", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "spyderbat"),
attribute.String("status", OK)).Inc()
}

View File

@ -4,12 +4,11 @@ package outputs
import (
"encoding/json"
"log"
"strings"
stan "github.com/nats-io/stan.go"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -17,33 +16,26 @@ import (
func (c *Client) StanPublish(falcopayload types.FalcoPayload) {
c.Stats.Stan.Add(Total, 1)
subject := c.Config.Stan.SubjectTemplate
if len(subject) == 0 {
subject = defaultNatsSubjects
}
subject = strings.ReplaceAll(subject, "<priority>", strings.ToLower(falcopayload.Priority.String()))
subject = strings.ReplaceAll(subject, "<rule>", strings.Trim(slugRegExp.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_"))
nc, err := stan.Connect(c.Config.Stan.ClusterID, c.Config.Stan.ClientID, stan.NatsURL(c.EndpointURL.String()))
if err != nil {
c.setStanErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : STAN - %v\n", err.Error())
return
}
defer nc.Close()
r := strings.Trim(slugRegularExpression.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_")
j, err := json.Marshal(falcopayload)
if err != nil {
c.setStanErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : STAN - %v\n", err.Error())
return
}
err = nc.Publish(subject, j)
err = nc.Publish("falco."+strings.ToLower(falcopayload.Priority.String())+"."+r, j)
if err != nil {
c.setStanErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : STAN - %v\n", err)
return
}
@ -51,8 +43,7 @@ func (c *Client) StanPublish(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:stan", "status:ok"})
c.Stats.Stan.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "stan", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "stan"), attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType, "Publish OK")
log.Printf("[INFO] : STAN - Publish OK\n")
}
// setStanErrorMetrics set the error stats
@ -60,7 +51,4 @@ func (c *Client) setStanErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:stan", "status:error"})
c.Stats.Stan.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "stan", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "stan"),
attribute.String("status", Error)).Inc()
}

View File

@ -3,13 +3,11 @@
package outputs
import (
"fmt"
"log"
"strings"
"github.com/DataDog/datadog-go/statsd"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -27,7 +25,7 @@ func NewStatsdClient(outputType string, config *types.Configuration, stats *type
fwd = config.Dogstatsd.Forwarder
}
if err != nil {
utils.Log(utils.ErrorLvl, outputType, fmt.Sprintf("Can't configure client for %v - %v", fwd, err))
log.Printf("[ERROR] : Can't configure %v client for %v - %v", outputType, fwd, err)
return nil, err
}
@ -49,18 +47,14 @@ func (c *Client) CountMetric(metric string, value int64, tags []string) {
if err := c.StatsdClient.Count(metric+t, value, []string{}, 1); err != nil {
c.Stats.Statsd.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "statsd", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "statsd"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Unable to send metric (%v%v%v) : %v", c.Config.Statsd.Namespace, metric, t, err))
log.Printf("[ERROR] : StatsD - Unable to send metric (%v%v%v) : %v\n", c.Config.Statsd.Namespace, metric, t, err)
return
}
c.Stats.Statsd.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "statsd", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "statsd"),
attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Send Metric OK (%v%v%v)", c.Config.Statsd.Namespace, metric, t))
log.Printf("[INFO] : StatsD - Send Metric OK (%v%v%v)\n", c.Config.Statsd.Namespace, metric, t)
}
if c.DogstatsdClient != nil {
@ -68,17 +62,13 @@ func (c *Client) CountMetric(metric string, value int64, tags []string) {
if err := c.DogstatsdClient.Count(metric, value, tags, 1); err != nil {
c.Stats.Dogstatsd.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "dogstatsd", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dogstatsd"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Send Metric Error (%v%v%v) : %v", c.Config.Statsd.Namespace, metric, tags, err))
log.Printf("[ERROR] : DogStatsD - Send Metric Error (%v%v%v) : %v\n", c.Config.Statsd.Namespace, metric, tags, err)
return
}
c.Stats.Dogstatsd.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "dogstatsd", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dogstatsd"),
attribute.String("status", OK)).Inc()
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Send Metric OK (%v%v %v)", c.Config.Statsd.Namespace, metric, tags))
log.Printf("[INFO] : DogStatsD - Send Metric OK (%v%v %v)\n", c.Config.Statsd.Namespace, metric, tags)
}
}

View File

@ -3,12 +3,9 @@
package outputs
import (
"net/http"
"log"
"net/url"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -19,32 +16,28 @@ func (c *Client) SumoLogicPost(falcopayload types.FalcoPayload) {
endpointURL, err := url.Parse(c.Config.SumoLogic.ReceiverURL)
if err != nil {
c.setSumoLogicErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
return
}
err = c.Post(falcopayload,
func(req *http.Request) {
if c.Config.SumoLogic.SourceCategory != "" {
req.Header.Set("X-Sumo-Category", c.Config.SumoLogic.SourceCategory)
}
c.EndpointURL = endpointURL
if c.Config.SumoLogic.SourceHost != "" {
req.Header.Set("X-Sumo-Host", c.Config.SumoLogic.SourceHost)
}
if c.Config.SumoLogic.SourceCategory != "" {
c.AddHeader("X-Sumo-Category", c.Config.SumoLogic.SourceCategory)
}
if c.Config.SumoLogic.Name != "" {
req.Header.Set("X-Sumo-Name", c.Config.SumoLogic.Name)
}
},
func(req *http.Request) {
req.URL = endpointURL
},
)
if c.Config.SumoLogic.SourceHost != "" {
c.AddHeader("X-Sumo-Host", c.Config.SumoLogic.SourceHost)
}
if c.Config.SumoLogic.Name != "" {
c.AddHeader("X-Sumo-Name", c.Config.SumoLogic.Name)
}
err = c.Post(falcopayload)
if err != nil {
c.setSumoLogicErrorMetrics()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : %x - %v\n", c.OutputType, err)
return
}
@ -52,8 +45,6 @@ func (c *Client) SumoLogicPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:sumologic", "status:ok"})
c.Stats.SumoLogic.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "sumologic", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "sumologic"),
attribute.String("status", OK)).Inc()
}
// setSumoLogicErrorMetrics set the error stats
@ -61,6 +52,4 @@ func (c *Client) setSumoLogicErrorMetrics() {
go c.CountMetric(Outputs, 1, []string{"output:sumologic", "status:error"})
c.Stats.SumoLogic.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "sumologic", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "sumologic"),
attribute.String("status", Error)).Inc()
}

View File

@ -5,20 +5,16 @@ package outputs
import (
"encoding/json"
"fmt"
"log"
"log/syslog"
"strings"
"time"
"github.com/DataDog/datadog-go/statsd"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
"github.com/falcosecurity/falcosidekick/types"
)
func NewSyslogClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
func NewSyslogClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
ok := isValidProtocolString(strings.ToLower(config.Syslog.Protocol))
if !ok {
return nil, fmt.Errorf("failed to configure Syslog client: invalid protocol %s", config.Syslog.Protocol)
@ -29,7 +25,6 @@ func NewSyslogClient(config *types.Configuration, stats *types.Statistics, promS
Config: config,
Stats: stats,
PromStats: promStats,
OTLPMetrics: otlpMetrics,
StatsdClient: statsdClient,
DogstatsdClient: dogstatsdClient,
}, nil
@ -91,9 +86,7 @@ func (c *Client) SyslogPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:syslog", "status:error"})
c.Stats.Syslog.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "syslog", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "syslog"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Syslog - %v\n", err)
return
}
@ -129,14 +122,11 @@ func (c *Client) SyslogPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:syslog", "status:error"})
c.Stats.Syslog.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "syslog", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "syslog"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Syslog - %v\n", err)
return
}
go c.CountMetric(Outputs, 1, []string{"output:syslog", "status:ok"})
c.Stats.Syslog.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "syslog", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "syslog"), attribute.String("status", OK)).Inc()
}

View File

@ -3,9 +3,9 @@
package outputs
import (
"go.opentelemetry.io/otel/attribute"
"fmt"
"log"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -18,15 +18,13 @@ func (c *Client) TalonPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:talon", "status:error"})
c.Stats.Talon.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "talon", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "talon"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Talon - %v\n", err.Error())
return
}
// Setting the success status
go c.CountMetric(Outputs, 1, []string{"output:talon", "status:ok"})
c.Stats.Talon.Add(OK, 1)
fmt.Println("aaaaa")
c.PromStats.Outputs.With(map[string]string{"destination": "talon", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "talon"), attribute.String("status", OK)).Inc()
}

View File

@ -3,12 +3,10 @@
package outputs
import (
"log"
"sort"
"strings"
"go.opentelemetry.io/otel/attribute"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -41,7 +39,7 @@ func newTeamsPayload(falcopayload types.FalcoPayload, config *types.Configuratio
fact teamsFact
)
section.ActivityTitle = "Falcosidekick"
section.ActivityTitle = "Falco Sidekick"
section.ActivitySubTitle = falcopayload.Time.String()
if config.Teams.OutputFormat == All || config.Teams.OutputFormat == Text || config.Teams.OutputFormat == "" {
@ -125,9 +123,7 @@ func (c *Client) TeamsPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:teams", "status:error"})
c.Stats.Teams.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "teams", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "teams"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Teams - %v\n", err)
return
}
@ -135,5 +131,4 @@ func (c *Client) TeamsPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:teams", "status:ok"})
c.Stats.Teams.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "teams", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "teams"), attribute.String("status", OK)).Inc()
}

View File

@ -18,7 +18,7 @@ func TestNewTeamsPayload(t *testing.T) {
ThemeColor: "ccfff2",
Sections: []teamsSection{
{
ActivityTitle: "Falcosidekick",
ActivityTitle: "Falco Sidekick",
ActivitySubTitle: "2001-01-01 01:10:00 +0000 UTC",
ActivityImage: "",
Text: "This is a test from falcosidekick",

View File

@ -3,9 +3,8 @@
package outputs
import (
"go.opentelemetry.io/otel/attribute"
"log"
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
"github.com/falcosecurity/falcosidekick/types"
)
@ -18,9 +17,7 @@ func (c *Client) TektonPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:tekton", "status:error"})
c.Stats.Tekton.Add(Error, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "tekton", "status": Error}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "tekton"),
attribute.String("status", Error)).Inc()
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
log.Printf("[ERROR] : Tekton - %v\n", err.Error())
return
}
@ -28,5 +25,4 @@ func (c *Client) TektonPost(falcopayload types.FalcoPayload) {
go c.CountMetric(Outputs, 1, []string{"output:tekton", "status:ok"})
c.Stats.Tekton.Add(OK, 1)
c.PromStats.Outputs.With(map[string]string{"destination": "tekton", "status": OK}).Inc()
c.OTLPMetrics.Outputs.With(attribute.String("destination", "tekton"), attribute.String("status", OK)).Inc()
}

Some files were not shown because too many files have changed in this diff Show More