Compare commits
210 Commits
2.29.0-rc.
...
master
Author | SHA1 | Date |
---|---|---|
|
2c0363e3bd | |
|
3885a33777 | |
|
7a8199d5ae | |
|
0b120d6da4 | |
|
8c24f1073f | |
|
6096134c6b | |
|
83061c5536 | |
|
beb4375088 | |
|
19b55bd0f1 | |
|
87c64c50ca | |
|
2dca2d4219 | |
|
a2c7538cb4 | |
|
ec1f02eb69 | |
|
6897e9d75f | |
|
c56675cf26 | |
|
038f7743cf | |
|
191b961b15 | |
|
739c7d18bd | |
|
765dd4a005 | |
|
bd29893d1a | |
|
f93f3c7904 | |
|
c0e7e6c19b | |
|
a0c34e784d | |
|
97134bd101 | |
|
f41e855f5d | |
|
2573030a3a | |
|
be5b5822ab | |
|
2e5e08d559 | |
|
f306abb797 | |
|
b4cf9f1777 | |
|
3ac5fcbb63 | |
|
0cb12b9427 | |
|
6a9d9d4fac | |
|
9c46d2c14e | |
|
e23afd1c38 | |
|
c9fb02fd0b | |
|
e387e72b42 | |
|
57182b15e7 | |
|
491166b0b4 | |
|
647cbb5d3a | |
|
c05c47f94b | |
|
1f3d15069a | |
|
0b443908b8 | |
|
8b740084e9 | |
|
da71784171 | |
|
4462e30d59 | |
|
1864b1df1d | |
|
2a9ce5d9a4 | |
|
e9f0e38c00 | |
|
b3d5e7c70b | |
|
5bef4a6611 | |
|
20586b04d1 | |
|
57f110769c | |
|
1ddbe5af26 | |
|
9d7389b218 | |
|
2d6cd852f6 | |
|
afca84ff4c | |
|
f31b161e6d | |
|
208e2c3118 | |
|
2d9d7ca9b0 | |
|
2d68bae30c | |
|
20a4ffbe04 | |
|
632252b427 | |
|
5af88e588a | |
|
0adfae2f3a | |
|
9984c05528 | |
|
cd40babe7b | |
|
4ebc68ce71 | |
|
dc19ba5c6e | |
|
4a634b07ce | |
|
58ec197423 | |
|
61bb03a8a6 | |
|
82c2465f22 | |
|
d2c881d3a6 | |
|
6a29f66ca4 | |
|
d85471787a | |
|
b1f3b1aab6 | |
|
b6cd41a2dc | |
|
2424be2710 | |
|
c4fbe29f56 | |
|
520277ddc0 | |
|
b58c9c4273 | |
|
6b77b6b479 | |
|
ce030d7ee4 | |
|
20c3c50109 | |
|
7e4eca8fe9 | |
|
d49dfc794e | |
|
9724de340e | |
|
b792daf085 | |
|
304d59796f | |
|
2bdb8355ea | |
|
2ddf8c518d | |
|
ae60a86cac | |
|
5c158a93e1 | |
|
9e10e874f2 | |
|
97c89d368b | |
|
21f4b096e6 | |
|
7f3654b2cc | |
|
ca111495cd | |
|
d448390192 | |
|
66fd9406e0 | |
|
11f2ea6a36 | |
|
2270a6cde6 | |
|
236be97070 | |
|
c4dcd85e4f | |
|
bac1bd2ef1 | |
|
b5d79b1dc5 | |
|
0f4f7949cb | |
|
5c7e05fa06 | |
|
97ac674827 | |
|
dad257f8e8 | |
|
b99f7a78fb | |
|
38f4fd9e6d | |
|
2e7a4d37f3 | |
|
80185922c0 | |
|
cd26ed2222 | |
|
0aa63718eb | |
|
7932255412 | |
|
58d13f3575 | |
|
27bb4a5ef5 | |
|
f45c5b2387 | |
|
e86b8ed54e | |
|
9288fc3724 | |
|
4385579634 | |
|
11290d7131 | |
|
354f9bb2e0 | |
|
56b20d2c66 | |
|
5fbcf19a0a | |
|
3908f913e9 | |
|
79199ebd89 | |
|
b71b856bca | |
|
74ce82c163 | |
|
3b87d91ed0 | |
|
793784aef0 | |
|
4b4d7f74f2 | |
|
7a0003ef0d | |
|
01c9cd709c | |
|
20f6cf65a4 | |
|
0788919138 | |
|
0f39f47e11 | |
|
ac8c3ac40b | |
|
51ba0f6065 | |
|
8ffe416a91 | |
|
31a8dca948 | |
|
04dd08b263 | |
|
b91b07eb73 | |
|
cd70fcfa2b | |
|
388814cc5a | |
|
aaad6b378d | |
|
d2c91eb603 | |
|
63ca1cb7d6 | |
|
f2f16da888 | |
|
b3cc550f16 | |
|
4c5f50f2b4 | |
|
258f3d0bf5 | |
|
c9cd229774 | |
|
ac3b4bd5a8 | |
|
ec04e21d72 | |
|
2afc2fa8aa | |
|
4e53f274d4 | |
|
407bc6cdda | |
|
6600343d2f | |
|
ab7bae38e6 | |
|
2cec220064 | |
|
1625dad834 | |
|
2db72f0f4a | |
|
797d452d68 | |
|
662716646f | |
|
d520290f45 | |
|
45f6cb773f | |
|
1ccb742b2a | |
|
c8ed7550a2 | |
|
33ca2a0c99 | |
|
2fe10ae096 | |
|
95f7b21313 | |
|
70f9afe508 | |
|
a9dfe9baaf | |
|
067de26c93 | |
|
8247131f85 | |
|
9c72b4f0fc | |
|
98547008be | |
|
41e280e552 | |
|
bf64d82881 | |
|
b040f28695 | |
|
0406a23714 | |
|
c6e07525e2 | |
|
aba3835aff | |
|
62b4835021 | |
|
54ac9bb3c4 | |
|
fd503a4b72 | |
|
a65ae86647 | |
|
41a9581e66 | |
|
ead7c608bb | |
|
a36af89c3e | |
|
805e069323 | |
|
f1772e9d62 | |
|
aa3975b775 | |
|
d01c9de618 | |
|
c698f85377 | |
|
5b7845050e | |
|
5f96b955a9 | |
|
b5d4ede8b0 | |
|
6795955ff8 | |
|
c1ef422ac5 | |
|
1689fe6bb6 | |
|
e74017c5d0 | |
|
824d83ccb6 | |
|
609185641c | |
|
9ad65facf5 | |
|
835e25c537 |
|
@ -5,15 +5,17 @@ updates:
|
|||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels:
|
||||
- "area/dependency"
|
||||
- "release-note-none"
|
||||
- "ok-to-test"
|
||||
groups:
|
||||
gomod:
|
||||
update-types:
|
||||
- "patch"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels:
|
||||
- "area/dependency"
|
||||
- "release-note-none"
|
||||
- "ok-to-test"
|
||||
groups:
|
||||
actions:
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
|
|
@ -9,23 +9,23 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: anchore/sbom-action/download-syft@e8d2a6937ecead383dfe75190d104edd1f9c5751 # v0.16.0
|
||||
- uses: anchore/sbom-action/download-syft@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 # v0.20.4
|
||||
|
||||
- uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0
|
||||
- uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
with:
|
||||
install-only: true
|
||||
|
||||
|
|
|
@ -14,14 +14,14 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
cache: false
|
||||
check-latest: true
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
|
||||
uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0
|
||||
with:
|
||||
version: v1.57
|
||||
version: v1.61
|
||||
args: --timeout=5m
|
||||
|
|
|
@ -14,25 +14,25 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
||||
- uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
|
||||
- uses: anchore/sbom-action/download-syft@e8d2a6937ecead383dfe75190d104edd1f9c5751 # v0.16.0
|
||||
- uses: anchore/sbom-action/download-syft@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 # v0.20.4
|
||||
|
||||
- uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0
|
||||
- uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
with:
|
||||
install-only: true
|
||||
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
|
||||
# Push images to DockerHUB
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_SECRET }}
|
||||
|
@ -63,7 +63,7 @@ jobs:
|
|||
|
||||
# Push images to AWS Public ECR
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::292999226676:role/github_actions-falcosidekick-ecr
|
||||
aws-region: us-east-1
|
||||
|
|
|
@ -18,33 +18,33 @@ jobs:
|
|||
tag_name: ${{ steps.tag.outputs.tag_name }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
||||
- uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
|
||||
- uses: anchore/sbom-action/download-syft@e8d2a6937ecead383dfe75190d104edd1f9c5751 # v0.16.0
|
||||
- uses: anchore/sbom-action/download-syft@7b36ad622f042cab6f59a75c2ac24ccb256e9b45 # v0.20.4
|
||||
|
||||
# Push images to DockerHUB
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_SECRET }}
|
||||
|
||||
# Push images to AWS Public ECR
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
|
||||
uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::292999226676:role/github_actions-falcosidekick-ecr
|
||||
aws-region: us-east-1
|
||||
|
@ -68,10 +68,10 @@ jobs:
|
|||
|
||||
- name: Run GoReleaser
|
||||
id: run-goreleaser
|
||||
uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0
|
||||
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean --timeout 120m
|
||||
args: release --clean --timeout 120m --parallelism 1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
LDFLAGS: ${{ env.GO_FLAGS }}
|
||||
|
@ -92,7 +92,7 @@ jobs:
|
|||
actions: read # To read the workflow path.
|
||||
id-token: write # To sign the provenance.
|
||||
contents: write # To add assets to a release.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.release.outputs.hashes }}"
|
||||
upload-assets: true
|
||||
|
|
|
@ -14,10 +14,10 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
cache: true
|
||||
- name: Run Go tests
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
run:
|
||||
deadline: 5m
|
||||
timeout: 5m
|
||||
issues:
|
||||
exclude-files:
|
||||
- "zz_generated.*\\.go$"
|
||||
|
@ -18,7 +18,3 @@ linters:
|
|||
- unused
|
||||
# Run with --fast=false for more extensive checks
|
||||
fast: true
|
||||
include:
|
||||
- EXC0002 # include "missing comments" issues from golint
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
|
|
@ -10,7 +10,7 @@ env:
|
|||
- COSIGN_YES=true
|
||||
|
||||
snapshot:
|
||||
name_template: 'latest'
|
||||
version_template: 'latest'
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
|
47
CHANGELOG.md
47
CHANGELOG.md
|
@ -1,5 +1,52 @@
|
|||
# Changelog
|
||||
|
||||
## 2.31.1 - 2025-02-04
|
||||
#### Fix
|
||||
- Fix error while closing the writer for `GCPStorage` ([PR#1116](https://github.com/falcosecurity/falcosidekick/pull/1116) thanks to [@chanukya-yekollu-exa](https://github.com/chanukya-yekollu-exa))
|
||||
|
||||
## 2.31.0 - 2025-02-03
|
||||
#### New
|
||||
- New output: **OTLP Logs** ([PR#1109](https://github.com/falcosecurity/falcosidekick/pull/1109))
|
||||
|
||||
#### Enhancement
|
||||
- Add the namespace and the pod name as labels by default in `Loki` payload ([PR#1087](https://github.com/falcosecurity/falcosidekick/pull/1087) thanks to [@afreyermuth98](https://github.com/afreyermuth98))
|
||||
- Allow to set the format for the `Loki` payload to JSON ([PR#1091](https://github.com/falcosecurity/falcosidekick/pull/1091))
|
||||
- Allow to set a template for the subjets for `NATS`/`STAN` outputs ([PR#1099](https://github.com/falcosecurity/falcosidekick/pull/1099))
|
||||
- Improve the logger with a generic and extensible method ([PR#1102](https://github.com/falcosecurity/falcosidekick/pull/1102))
|
||||
|
||||
#### Fix
|
||||
- Remove forgotten debug line ([PR#1088](https://github.com/falcosecurity/falcosidekick/pull/1088))
|
||||
- Fix missing templated fields as labls in `Loki` payload ([PR#1091](https://github.com/falcosecurity/falcosidekick/pull/1091))
|
||||
- Fix creation error of `ClusterPolicyReports` ([PR#1100](https://github.com/falcosecurity/falcosidekick/pull/100))
|
||||
- Fix missing custom headers for HTTP requests for `Loki` ([PR#1107](https://github.com/falcosecurity/falcosidekick/pull/1107) thanks to [@lsroe](https://github.com/lsroe))
|
||||
- Fix wrong key format for `Prometheus` format ([PR#1110](https://github.com/falcosecurity/falcosidekick/pull/1110) thanks to [@rubensf](https://github.com/rubensf))
|
||||
|
||||
## 2.30.0 - 2024-11-28
|
||||
#### New
|
||||
- New output: **Webex** ([PR#979](https://github.com/falcosecurity/falcosidekick/pull/979) thanks to [@k0rventen](https://github.com/k0rventen))
|
||||
- New output: **OTLP Metrics** ([PR#1012](https://github.com/falcosecurity/falcosidekick/pull/1012) thanks to [@ekoops](https://github.com/ekoops))
|
||||
- New output: **Datadog Logs** ([PR#1052](https://github.com/falcosecurity/falcosidekick/pull/1052) thanks to [@yohboy](https://github.com/yohboy))
|
||||
|
||||
#### Enhancement
|
||||
- Reuse of the http client for 3-4x increase of the throughput ([PR#962](https://github.com/falcosecurity/falcosidekick/pull/962) thanks to [@alekmaus](https://github.com/aleksmaus))
|
||||
- Improve outputs throughput handling ([PR#966](https://github.com/falcosecurity/falcosidekick/pull/966) thanks to [@alekmaus](https://github.com/aleksmaus))
|
||||
- Batching and gzip compression for the `Elastticsearch` output ([PR#967](https://github.com/falcosecurity/falcosidekick/pull/967) thanks to [@alekmaus](https://github.com/aleksmaus))
|
||||
- Use the same convention for the Prometheus metrics than Falco ([PR#995](https://github.com/falcosecurity/falcosidekick/pull/995))
|
||||
- Add `APIKey` for `Elasticsearch` output ([PR#980](https://github.com/falcosecurity/falcosidekick/pull/980) thanks to [@alekmaus](https://github.com/aleksmaus))
|
||||
- Add `Pipeline` configuration for `Elasticsearch` output ([PR#981](https://github.com/falcosecurity/falcosidekick/pull/981 ) thanks to [@alekmaus](https://github.com/aleksmaus))
|
||||
- Add `MessageThreadID` configuration in `Telegram` output ([PR#1008](https://github.com/falcosecurity/falcosidekick/pull/1008) thanks to [@vashian](https://github.com/vashian))
|
||||
- Support multi-architecture in build ([PR#1024](https://github.com/falcosecurity/falcosidekick/pull/1024) thanks to [@nickytd](https://github.com/nickytd))
|
||||
- Add `falco` as source for the `Datadog Events` ([PR#1043](https://github.com/falcosecurity/falcosidekick/pull/1043) thanks to [@maxd-wttj](https://github.com/maxd-wttj))
|
||||
- Support `AlertManager` output in HA mode ([PR#1051](https://github.com/falcosecurity/falcosidekick/pull/1051))
|
||||
|
||||
#### Fix
|
||||
- Fix `PolicyReports` created in the same namespace than previous event ([PR#978](https://github.com/falcosecurity/falcosidekick/pull/978))
|
||||
- Fix missing `customFields/extraFields` in the `Elasticsearch` payload ([PR#1033](https://github.com/falcosecurity/falcosidekick/pull/1033))
|
||||
- Fix incorrect key name for `CloudEvent` spec attribute ([PR#1051](https://github.com/falcosecurity/falcosidekick/pull/1051))
|
||||
|
||||
> [!WARNING]
|
||||
> Breaking change: The Prometheus metrics have different names from this release, it might break the queries for the dashboards and alerts.
|
||||
|
||||
## 2.29.0 - 2024-07-01
|
||||
#### New
|
||||
- New output: **Dynatrace** ([PR#575](https://github.com/falcosecurity/falcosidekick/pull/575) thanks to [@blu3r4y](https://github.com/blu3r4y))
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
ARG BASE_IMAGE=alpine:3.19
|
||||
# Final Docker image
|
||||
FROM ${BASE_IMAGE} AS final-stage
|
||||
LABEL MAINTAINER "Thomas Labarussias <issif+falcosidekick@gadz.org>"
|
||||
LABEL MAINTAINER="Thomas Labarussias <issif+falcosidekick@gadz.org>"
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates gcompat
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ RUN make falcosidekick
|
|||
|
||||
# Final Docker image
|
||||
FROM ${BASE_IMAGE} AS final-stage
|
||||
LABEL MAINTAINER "Thomas Labarussias <issif+falcosidekick@gadz.org>"
|
||||
LABEL MAINTAINER="Thomas Labarussias <issif+falcosidekick@gadz.org>"
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates
|
||||
|
||||
|
|
11
Makefile
11
Makefile
|
@ -2,7 +2,6 @@
|
|||
SHELL=/bin/bash -o pipefail
|
||||
|
||||
.DEFAULT_GOAL:=help
|
||||
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
|
@ -55,15 +54,15 @@ IMAGE_TAG := falcosecurity/falcosidekick:latest
|
|||
.PHONY: falcosidekick
|
||||
falcosidekick:
|
||||
$(GO) mod download
|
||||
$(GO) build -trimpath -ldflags "$(LDFLAGS)" -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o $@ .
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO) build -trimpath -ldflags "$(LDFLAGS)" -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o $@ .
|
||||
|
||||
.PHONY: falcosidekick-linux-amd64
|
||||
falcosidekick-linux-amd64:
|
||||
.PHONY: falcosidekick-linux
|
||||
falcosidekick-linux:
|
||||
$(GO) mod download
|
||||
GOOS=linux GOARCH=amd64 $(GO) build -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o falcosidekick .
|
||||
GOOS=linux GOARCH=$(GOARCH) $(GO) build -ldflags "$(LDFLAGS)" -gcflags all=-trimpath=/src -asmflags all=-trimpath=/src -a -installsuffix cgo -o falcosidekick .
|
||||
|
||||
.PHONY: build-image
|
||||
build-image: falcosidekick-linux-amd64
|
||||
build-image: falcosidekick-linux
|
||||
$(DOCKER) build -t $(IMAGE_TAG) .
|
||||
|
||||
.PHONY: push-image
|
||||
|
|
|
@ -78,6 +78,7 @@ Follow the links to get the configuration of each output.
|
|||
- [**Rocketchat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/rocketchat.md)
|
||||
- [**Mattermost**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/mattermost.md)
|
||||
- [**Teams**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/teams.md)
|
||||
- [**Webex**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/webex.md)
|
||||
- [**Discord**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/discord.md)
|
||||
- [**Google Chat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/googlechat.md)
|
||||
- [**Zoho Cliq**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/cliq.md)
|
||||
|
@ -94,6 +95,7 @@ Follow the links to get the configuration of each output.
|
|||
- [**Spyderbat**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/spyderbat.md)
|
||||
- [**TimescaleDB**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/timescaledb.md)
|
||||
- [**Dynatrace**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/dynatrace.md)
|
||||
- [**OTEL Metrics**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/otlp_metrics.md) (for both events and monitoring of `falcosidekick`)
|
||||
|
||||
### Alerting
|
||||
|
||||
|
@ -113,6 +115,8 @@ Follow the links to get the configuration of each output.
|
|||
- [**OpenObserve**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/openobserve.md)
|
||||
- [**SumoLogic**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/sumologic.md)
|
||||
- [**Quickwit**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/quickwit.md)
|
||||
- [**Datadog Logs**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/datadog_logs.md)
|
||||
- [**Logstash**](https://github.com/falcosecurity/falcosidekick/blob/master/docs/outputs/logstash.md)
|
||||
|
||||
### Object Storage
|
||||
|
||||
|
@ -328,6 +332,9 @@ customfields: # custom fields are added to falco events, if the value starts wit
|
|||
# Ckey: "CValue"
|
||||
templatedfields: # templated fields are added to falco events and metrics, it uses Go template + output_fields values
|
||||
# Dkey: '{{ or (index . "k8s.ns.labels.foo") "bar" }}'
|
||||
customtags: # custom tags are added to the falco events, if the value starts with % the relative env var is used
|
||||
# - tagA
|
||||
# - tagB
|
||||
# bracketreplacer: "_" # if not empty, replace the brackets in keys of Output Fields
|
||||
outputFieldFormat: "<timestamp>: <priority> <output> <custom_fields> <templated_fields>" # if not empty, allow to change the format of the output field. (default: "<timestamp>: <priority> <output>")
|
||||
mutualtlsfilespath: "/etc/certs" # folder which will used to store client.crt, client.key and ca.crt files for mutual tls for outputs, will be deprecated in the future (default: "/etc/certs")
|
||||
|
|
|
@ -8,6 +8,9 @@ customfields: # custom fields are added to falco events and metrics, if the valu
|
|||
templatedfields: # templated fields are added to falco events and metrics, it uses Go template + output_fields values
|
||||
# Dkey: '{{ or (index . "k8s.ns.labels.foo") "bar" }}'
|
||||
# bracketreplacer: "_" # if not empty, the brackets in keys of Output Fields are replaced
|
||||
customtags: # custom tags are added to the falco events, if the value starts with % the relative env var is used
|
||||
- tagA
|
||||
- tagB
|
||||
outputFieldFormat: "<timestamp>: <priority> <output> <custom_fields> <templated_fields>" # if not empty, allow to change the format of the output field. (default: "<timestamp>: <priority> <output>")
|
||||
mutualtlsfilespath: "/etc/certs" # folder which will used to store client.crt, client.key and ca.crt files for mutual tls for outputs, will be deprecated in the future (default: "/etc/certs")
|
||||
mutualtlsclient: # takes priority over mutualtlsfilespath if not emtpy
|
||||
|
@ -65,13 +68,23 @@ teams:
|
|||
outputformat: "all" # all (default), text, facts
|
||||
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
|
||||
webex:
|
||||
# webhookurl: "" # Webex WebhookURL, if not empty, Teams Webex is enabled
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
|
||||
datadog:
|
||||
# apikey: "" # Datadog API Key, if not empty, Datadog output is enabled
|
||||
# host: "" # Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://api.datadoghq.com"
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
|
||||
datadoglogs:
|
||||
# apikey: "" # Datadog API Key, if not empty, Datadog Logs output is enabled
|
||||
# host: "" # Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://http-intake.logs.datadoghq.com/"
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# service: "" # The name of the application or service generating the log events.
|
||||
|
||||
alertmanager:
|
||||
# hostport: "" # http://{domain or ip}:{port}, if not empty, Alertmanager output is enabled
|
||||
# hostport: "" # Comma separated list of http://{domain or ip}:{port} that will all receive the payload, if not empty, Alertmanager output is enabled
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
|
@ -89,10 +102,12 @@ elasticsearch:
|
|||
# hostport: "" # http://{domain or ip}:{port}, if not empty, Elasticsearch output is enabled
|
||||
# index: "falco" # index (default: falco)
|
||||
# type: "_doc"
|
||||
# pipeline: "" # optional ingest pipeline name
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# suffix: "daily" # date suffix for index rotation : daily (default), monthly, annually, none
|
||||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
# apikey: "" # use this APIKey to authenticate to Elasticsearch if the APIKey is not empty (default: "")
|
||||
# username: "" # use this username to authenticate to Elasticsearch if the username is not empty (default: "")
|
||||
# password: "" # use this password to authenticate to Elasticsearch if the password is not empty (default: "")
|
||||
# flattenfields: false # replace . by _ to avoid mapping conflicts, force to true if createindextemplate==true (default: false)
|
||||
|
@ -101,6 +116,12 @@ elasticsearch:
|
|||
# numberofreplicas: 3 # number of replicas set by the index template (default: 3)
|
||||
# customHeaders: # Custom headers to add in POST, useful for Authentication
|
||||
# key: value
|
||||
# enablecompression: false # if true enables gzip compression for http requests (default: false)
|
||||
# batching: # batching configuration, improves throughput dramatically utilizing _bulk Elasticsearch API
|
||||
# enabled: true # if true enables batching
|
||||
# batchsize: 5242880 # batch size in bytes (default: 5 MB)
|
||||
# flushinterval: 1s # batch fush interval (default: 1s)
|
||||
# maxconcurrentrequests: 1 # max number of concurrent http requests (default: 1)
|
||||
|
||||
quickwit:
|
||||
# hostport: "" # http(s)://{domain or ip}:{port}, if not empty, Quickwit output is enabled
|
||||
|
@ -135,6 +156,7 @@ loki:
|
|||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
# tenant: "" # Add the Tenant header
|
||||
# format: "text" # Format for the log entry value: json, text (default)
|
||||
# endpoint: "/loki/api/v1/push" # The endpoint URL path, default is "/loki/api/v1/push" more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush
|
||||
# extralabels: "" # comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields
|
||||
# customHeaders: # Custom headers to add in POST, useful for Authentication
|
||||
|
@ -142,6 +164,7 @@ loki:
|
|||
|
||||
nats:
|
||||
# hostport: "" # nats://{domain or ip}:{port}, if not empty, NATS output is enabled
|
||||
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
|
@ -150,6 +173,7 @@ stan:
|
|||
# hostport: "" # nats://{domain or ip}:{port}, if not empty, STAN output is enabled
|
||||
# clusterid: "" # Cluster name, if not empty, STAN output is enabled
|
||||
# clientid: "" # Client ID, if not empty, STAN output is enabled
|
||||
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
|
@ -516,7 +540,7 @@ sumologic:
|
|||
|
||||
otlp:
|
||||
traces:
|
||||
# endpoint: "" # OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces
|
||||
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/traces), if not empty, OTLP Traces output is enabled
|
||||
# protocol: "" # OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json)
|
||||
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
|
||||
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
|
||||
|
@ -527,8 +551,41 @@ otlp:
|
|||
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# checkcert: true # Set if you want to skip TLS certificate validation (default: true)
|
||||
logs:
|
||||
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/logs), if not empty, OTLP Traces output is enabled
|
||||
# protocol: "" # OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json)
|
||||
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
|
||||
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
|
||||
# extraenvvars: # Extra env vars (override the other settings)
|
||||
# OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: 10000
|
||||
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# checkcert: true # Set if you want to skip TLS certificate validation (default: true)
|
||||
metrics:
|
||||
# endpoint: "" # OTLP endpoint, typically in the form http(s)://{domain or ip}:4318(/v1/metrics), if not empty, OTLP Metrics output is enabled
|
||||
# protocol: "" # OTLP transport protocol to be used for metrics data; it can be "grpc" or "http/protobuf" (default: "grpc")
|
||||
# timeout: "" # OTLP timeout for outgoing metrics in milliseconds (default: "" which uses SDK default: 10000)
|
||||
# headers: "" # List of headers to apply to all outgoing metrics in the form of "some-key=some-value,other-key=other-value" (default: "")
|
||||
# extraenvvars: # Extra env vars (override the other settings) (default: "")
|
||||
# OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: 10000
|
||||
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
|
||||
# minimumpriority: "" # Minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "")
|
||||
# checkcert: true # Set to false if you want to skip TLS certificate validation (only with https) (default: true)
|
||||
# extraattributes: "" # Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields
|
||||
|
||||
talon:
|
||||
# address: "" # Falco talon address, if not empty, Falco Talon output is enabled
|
||||
# checkcert: false # check if ssl certificate of the output is valid (default: true)
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
|
||||
logstash:
|
||||
# address: "" # Logstash address, if not empty, Logstash output is enabled
|
||||
# port: 5044 # Logstash port number (default: 5044)
|
||||
# tls: false # communicate over tls; requires Logstash version 8+ to work
|
||||
# mutualtls: false # or authenticate to the output with TLS; if true, checkcert flag will be ignored (server cert will always be checked) (default: false)
|
||||
# checkcert: true # Check if ssl certificate of the output is valid (default: true)
|
||||
# certfile: "" # Use this certificate file instead of the client certificate when using mutual TLS (default: "")
|
||||
# keyfile: "" # Use this key file instead of the client certificate when using mutual TLS (default: "")
|
||||
# cacertfile: "" # Use this CA certificate file instead of the client certificate when using mutual TLS (default: "")
|
||||
# minimumpriority: minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "debug")
|
||||
# tags: ["falco"] # An additional list of tags that will be added to those produced by Falco (default: [])
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| Setting | Env var | Default value | Description |
|
||||
| --------------------------------------- | --------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `alertmanager.hostport` | `ALERTMANAGER_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Alertmanager output is **enabled** |
|
||||
| `alertmanager.hostport` | `ALERTMANAGER_HOSTPORT` | | Comma separated list of http://{domain or ip}:{port} that will all receive the payload, if not empty, Alertmanager output is **enabled** |
|
||||
| `alertmanager.mutualtls` | `ALERTMANAGER_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `alertmanager.checkcert` | `ALERTMANAGER_CHECKCERT` | `true` | check if ssl certificate of the output is valid |
|
||||
| `alertmanager.endpoint` | `ALERTMANAGER_ENDPOINT` | `/api/v1/alerts` | Alertmanager endpoint for posting alerts `/api/v1/alerts` or `/api/v2/alerts` |
|
||||
|
|
|
@ -38,4 +38,4 @@ Filter the events in the UI with `sources: falco`.
|
|||
|
||||
## Screenshots
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
# Datadog Logs
|
||||
|
||||
- **Category**: Logs
|
||||
- **Website**: https://www.datadoghq.com/
|
||||
|
||||
## Table of content
|
||||
|
||||
- [Datadog Logs](#datadogLogs)
|
||||
- [Table of content](#table-of-content)
|
||||
- [Configuration](#configuration)
|
||||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Additional info](#additional-info)
|
||||
- [Screenshots](#screenshots)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
|-------------------------------|-----------------------------| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `datadoglogs.apikey` | `DATADOGLOGS_APIKEY` | | Datadog API Key, if not empty, Datadog Logs output is **enabled** |
|
||||
| `datadoglogs.host` | `DATADOGLOGS_HOST` | `https://http-intake.logs.datadoghq.com/` | Datadog host. Override if you are on the Datadog EU site |
|
||||
| `datadoglogs.minimumpriority` | `DATADOGLOGS_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `datadoglogs.service` | `DATADOGLOGS_SERVICE` | `""` | The name of the application or service generating the log events. |
|
||||
|
||||
> [!NOTE]
|
||||
The Env var values override the settings from yaml file.
|
||||
|
||||
## Example of config.yaml
|
||||
|
||||
```yaml
|
||||
datadoglogs:
|
||||
apikey: "" # Datadog API Key, if not empty, Datadog Logs output is enabled
|
||||
# host: "" # Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://http-intake.logs.datadoghq.com/"
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# service: "" # The name of the application or service generating the log events.
|
||||
```
|
||||
|
||||
## Additional info
|
||||
|
||||
Filter the logs in the UI with `sources: falco`.
|
||||
|
||||
## Screenshots
|
||||
|
||||

|
|
@ -13,26 +13,40 @@
|
|||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ----------------------------------- | ----------------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `elasticsearch.hosport` | `ELASTICSEARCH_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Elasticsearch output is **enabled** |
|
||||
| `elasticsearch.index` | `ELASTICSEARCH_INDEX` | `falco` | Index |
|
||||
| `elasticsearch.type` | `ELASTICSEARCH_TYPE` | `_doc` | Index |
|
||||
| `elasticsearch.suffix` | `ELASTICSEARCH_SUFFIX` | `daily` | Date suffix for index rotation : `daily`, `monthly`, `annually`, `none` |
|
||||
| `elasticsearch.username` | `ELASTICSEARCH_USERNAME` | | Use this username to authenticate to Elasticsearch |
|
||||
| `elasticsearch.password` | `ELASTICSEARCH_PASSWORD` | | Use this password to authenticate to Elasticsearch |
|
||||
| `elasticsearch.flattenfields` | `ELASTICSEARCH_FLATTENFIELDS` | `false` | Replace . by _ to avoid mapping conflicts, force to true if `createindextemplate=true` |
|
||||
| `elasticsearch.createindextemplate` | `ELASTICSEARCH_CREATEINDEXTEMPLATE` | `false` | Create an index template |
|
||||
| `elasticsearch.numberofshards` | `ELASTICSEARCH_NUMBEROFSHARDS` | `3` | Number of shards set by the index template |
|
||||
| `elasticsearch.numberofreplicas` | `ELASTICSEARCH_REPLICAS` | `3` | Number of replicas set by the index template |
|
||||
| `elasticsearch.customheaders` | `ELASTICSEARCH_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication |
|
||||
| `elasticsearch.mutualtls` | `ELASTICSEARCH_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `elasticsearch.checkcert` | `ELASTICSEARCH_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
|
||||
| `elasticsearch.minimumpriority` | `ELASTICSEARCH_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ------------------------------------- | -------------------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `elasticsearch.hostport` | `ELASTICSEARCH_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Elasticsearch output is **enabled** |
|
||||
| `elasticsearch.index` | `ELASTICSEARCH_INDEX` | `falco` | Index |
|
||||
| `elasticsearch.type` | `ELASTICSEARCH_TYPE` | `_doc` | Index |
|
||||
| `elasticsearch.pipeline` | `ELASTICSEARCH_PIPELINE` | | Optional ingest pipeline name. Documentation: https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html |
|
||||
| `elasticsearch.suffix` | `ELASTICSEARCH_SUFFIX` | `daily` | Date suffix for index rotation : `daily`, `monthly`, `annually`, `none` |
|
||||
| `elasticsearch.apikey` | `ELASTICSEARCH_APIKEY` | | Use this APIKey to authenticate to Elasticsearch |
|
||||
| `elasticsearch.username` | `ELASTICSEARCH_USERNAME` | | Use this username to authenticate to Elasticsearch |
|
||||
| `elasticsearch.password` | `ELASTICSEARCH_PASSWORD` | | Use this password to authenticate to Elasticsearch |
|
||||
| `elasticsearch.flattenfields` | `ELASTICSEARCH_FLATTENFIELDS` | `false` | Replace . by _ to avoid mapping conflicts, force to true if `createindextemplate=true` |
|
||||
| `elasticsearch.createindextemplate` | `ELASTICSEARCH_CREATEINDEXTEMPLATE` | `false` | Create an index template |
|
||||
| `elasticsearch.numberofshards` | `ELASTICSEARCH_NUMBEROFSHARDS` | `3` | Number of shards set by the index template |
|
||||
| `elasticsearch.numberofreplicas` | `ELASTICSEARCH_NUMBEROFREPLICAS` | `3` | Number of replicas set by the index template |
|
||||
| `elasticsearch.customheaders` | `ELASTICSEARCH_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication |
|
||||
| `elasticsearch.mutualtls` | `ELASTICSEARCH_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `elasticsearch.checkcert` | `ELASTICSEARCH_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
|
||||
| `elasticsearch.minimumpriority` | `ELASTICSEARCH_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `elasticsearch.maxconcurrentrequests` | `ELASTICSEARCH_MAXCONCURRENTREQUESTS` | `1` | Max number of concurrent requests |
|
||||
| `elasticsearch.enablecompression` | `ELASTICSEARCH_ENABLECOMPRESSION` | `false` | Enables gzip compression |
|
||||
| `elasticsearch.batching.enabled` | `ELASTICSEARCH_BATCHING_ENABLED` | `false` | Enables batching (utilizing Elasticsearch bulk API) |
|
||||
| `elasticsearch.batching.batchsize` | `ELASTICSEARCH_BATCHING_BATCHSIZE` | `5242880` | Batch size in bytes, default 5MB |
|
||||
| `elasticsearch.batching.flushinterval`| `ELASTICSEARCH_BATCHING_FLUSHINTERVAL` | `1s` | Batch flush interval, use valid Go duration string |
|
||||
|
||||
> [!NOTE]
|
||||
The Env var values override the settings from yaml file.
|
||||
|
||||
> [!NOTE]
|
||||
Increasing the default number of concurrent requests is a good way to increase throughput of the http outputs. This also increases the potential number of open connections. Choose wisely.
|
||||
|
||||
> [!NOTE]
|
||||
Enabling batching for Elasticsearch is invaluable when the expected number of falco alerts is in the hundreds or thousands per second. The batching of data can be fine-tuned for your specific use case. The batch request is sent to Elasticsearch when the pending data size reaches `batchsize` or upon the `flushinterval`.
|
||||
Enabling gzip compression increases throughput even further.
|
||||
|
||||
> [!WARNING]
|
||||
By enabling the creation of the index template with `elasticsearch.createindextemplate=true`, the output fields of the Falco events will be flatten to avoid any mapping conflict.
|
||||
|
||||
|
@ -51,6 +65,12 @@ elasticsearch:
|
|||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# enablecompression: # if true enables gzip compression for http requests (default: false)
|
||||
# batching: # batching configuration, improves throughput dramatically utilizing _bulk Elasticsearch API
|
||||
# enabled: true # if true enables batching
|
||||
# batchsize: 5242880 # batch size in bytes (default: 5 MB)
|
||||
# flushinterval: 1s # batch fush interval (default: 1s)
|
||||
# maxconcurrentrequests: # max number of concurrent http requests (default: 1)
|
||||
```
|
||||
|
||||
## Screenshots
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 375 KiB |
Binary file not shown.
After Width: | Height: | Size: 81 KiB |
Binary file not shown.
After Width: | Height: | Size: 162 KiB |
|
@ -0,0 +1,51 @@
|
|||
# Logstash
|
||||
|
||||
- **Category**: Logs
|
||||
- **Website**: https://github.com/elastic/logstash
|
||||
|
||||
## Table of content
|
||||
|
||||
- [Logstash](#logstash)
|
||||
- [Table of content](#table-of-content)
|
||||
- [Configuration](#configuration)
|
||||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Additional info](#additional-info)
|
||||
- [Screenshots](#screenshots)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| -------------------------- | -------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `logstash.address` | `LOGSTASH_ADDRESS` | | Logstash address, if not empty, Logstash output is **enabled** |
|
||||
| `logstash.port` | `LOGSTASH_PORT` | 5044 | Logstash port number |
|
||||
| `logstash.tls` | `LOGSTASH_TLS` | false | Use TLS connection (true/false) |
|
||||
| `logstash.mutualtls` | `LOGSTASH_MUTUALTLS` | false | Authenticate to the output with TLS; if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `logstash.checkcert` | `LOGSTASH_CHECKCERT` | true | Check if ssl certificate of the output is valid |
|
||||
| `logstash.certfile` | `LOGSTASH_CERTFILE` | | Use this certificate file instead of the client certificate when using mutual TLS |
|
||||
| `logstash.keyfile` | `LOGSTASH_KEYFILE` | | Use this key file instead of the client certificate when using mutual TLS |
|
||||
| `logstash.cacertfile` | `LOGSTASH_CACERTFILE` | | Use this CA certificate file instead of the client certificate when using mutual TLS |
|
||||
| `logstash.minimumpriority` | `LOGSTASH_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `logstash.tags` | `LOGSTASH_TAGS` | | An additional list of tags that will be added to those produced by Falco; these tags may help in decision-making while routing logs |
|
||||
|
||||
> [!NOTE]
|
||||
Values stored in environment variables will override the settings from yaml file.
|
||||
|
||||
## Example of config.yaml
|
||||
|
||||
```yaml
|
||||
logstash:
|
||||
address: "" # Logstash address, if not empty, Logstash output is enabled
|
||||
# port: 5044 # Logstash port number (default: 5044)
|
||||
# tls: false # communicate over tls; requires Logstash version 8+ to work
|
||||
# mutualtls: false # or authenticate to the output with TLS; if true, checkcert flag will be ignored (server cert will always be checked) (default: false)
|
||||
# checkcert: true # Check if ssl certificate of the output is valid (default: true)
|
||||
# certfile: "" # Use this certificate file instead of the client certificate when using mutual TLS (default: "")
|
||||
# keyfile: "" # Use this key file instead of the client certificate when using mutual TLS (default: "")
|
||||
# cacertfile: "" # Use this CA certificate file instead of the client certificate when using mutual TLS (default: "")
|
||||
# minimumpriority: minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "debug")
|
||||
# tags: ["falco"] # An additional list of tags that will be added to those produced by Falco (default: [])
|
||||
```
|
||||
|
||||
## Additional info
|
||||
|
||||
## Screenshots
|
|
@ -14,18 +14,19 @@
|
|||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ---------------------- | ---------------------- | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `loki.hostport` | `LOKI_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Loki output is **enabled** |
|
||||
| `loki.user` | `LOKI_USER` | | User for Grafana Logs |
|
||||
| `loki.apikey` | `LOKI_APIKEY` | | API KEy for Grafana Logs |
|
||||
| `loki.tenant` | `LOKI_TENANT` | | Add the tenant header if needed |
|
||||
| `loki.endpoint` | `LOKI_ENDPOINT` | `/api/prom/push ` | The endpoint URL path, more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush |
|
||||
| `loki.extralabels` | `LOKI_EXTRALABELS` | | comma separated list of fields to use as labels additionally to `rule`, `source`, `priority`, `tags` and `custom_fields` |
|
||||
| `loki.customheaders` | `LOKI_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication |
|
||||
| `loki.mutualtls` | `LOKI_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `loki.checkcert` | `LOKI_CHECKCERT` | `/api/v1/alerts` | Check if ssl certificate of the output is valid | `mattermost.minimumpriority` | `MATTERMOST_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""`
|
||||
| `loki.minimumpriority` | `LOKI_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| Setting | Env var | Default value | Description | | | | |
|
||||
| ---------------------- | ---------------------- | ------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | ---------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `loki.hostport` | `LOKI_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Loki output is **enabled** | | | | |
|
||||
| `loki.user` | `LOKI_USER` | | User for Grafana Logs | | | | |
|
||||
| `loki.apikey` | `LOKI_APIKEY` | | API KEy for Grafana Logs | | | | |
|
||||
| `loki.tenant` | `LOKI_TENANT` | | Add the tenant header if needed | | | | |
|
||||
| `loki.format` | `LOKI_FORMAT` | `text` | Format for the log entry value: json, text | | | | |
|
||||
| `loki.endpoint` | `LOKI_ENDPOINT` | `/loki/api/v1/push` | The endpoint URL path, more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush | | | | |
|
||||
| `loki.extralabels` | `LOKI_EXTRALABELS` | | comma separated list of fields to use as labels additionally to `rule`, `source`, `priority`, `tags` and `custom_fields` | | | | |
|
||||
| `loki.customheaders` | `LOKI_CUSTOMHEADERS` | | Custom headers to add in POST, useful for Authentication | | | | |
|
||||
| `loki.mutualtls` | `LOKI_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) | | | | |
|
||||
| `loki.checkcert` | `LOKI_CHECKCERT` | `/api/v1/alerts` | Check if ssl certificate of the output is valid | `mattermost.minimumpriority` | `MATTERMOST_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `loki.minimumpriority` | `LOKI_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` | | | | |
|
||||
|
||||
|
||||
> [!NOTE]
|
||||
|
@ -41,7 +42,8 @@ loki:
|
|||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
# tenant: "" # Add the tenant header if needed. Enabled if not empty
|
||||
# endpoint: "/api/prom/push" # The endpoint URL path, default is "/api/prom/push" more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush
|
||||
# format: "text" # Format for the log entry value: json, text (default)
|
||||
# endpoint: "/loki/api/v1/push" # The endpoint URL path, default is "/loki/api/v1/push" more info : https://grafana.com/docs/loki/latest/api/#post-apiprompush
|
||||
# extralabels: "" # comma separated list of fields to use as labels additionally to rule, source, priority, tags and custom_fields
|
||||
# customHeaders: # Custom headers to add in POST, useful for Authentication
|
||||
# key: value
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
|
||||
- **Category**: Chat/Messaging
|
||||
- **Website**: https://rocket.chat
|
||||
- **Website**: https://github.com/mattermost/mattermost
|
||||
|
||||
## Table of content
|
||||
|
||||
|
@ -65,4 +65,4 @@ Go templates also support some basic methods for text manipulation which can be
|
|||
|
||||
## Screenshots
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -8,18 +8,23 @@
|
|||
- [NATS](#nats)
|
||||
- [Table of content](#table-of-content)
|
||||
- [Configuration](#configuration)
|
||||
- [subjecttemplate: "falco.." # template for the subject, tokens and will be automatically replaced (default: falco..)](#subjecttemplate-falco--template-for-the-subject-tokens--and--will-be-automatically-replaced-default-falco)
|
||||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Additional info](#additional-info)
|
||||
- [Screenshots](#screenshots)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ---------------------- | ---------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `nats.hostport` | `NATS_HOSTPORT` | | nats://{domain or ip}:{port}, if not empty, NATS output is **enabled** |
|
||||
| `nats.mutualtls` | `NATS_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `nats.checkcert` | `NATS_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
|
||||
| `nats.minimumpriority` | `NATS_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
|
||||
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ---------------------- | ---------------------- | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `nats.hostport` | `NATS_HOSTPORT` | | nats://{domain or ip}:{port}, if not empty, NATS output is **enabled** |
|
||||
| `nats.subjecttemplate` | `NATS_SUBJECTTEMPLATE` | `falco.<priority>.<rule>` | Template for the subject, tokens <priority> and <rule> will be automatically replaced |
|
||||
| `nats.mutualtls` | `NATS_MUTUALTLS` | `false` | Authenticate to the output with TLS, if true, checkcert flag will be ignored (server cert will always be checked) |
|
||||
| `nats.checkcert` | `NATS_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
|
||||
| `nats.minimumpriority` | `NATS_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
|
||||
> [!NOTE]
|
||||
The Env var values override the settings from yaml file.
|
||||
|
@ -30,6 +35,7 @@ The Env var values override the settings from yaml file.
|
|||
nats:
|
||||
hostport: "" # nats://{domain or ip}:{port}, if not empty, NATS output is enabled
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
|
||||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
```
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
# OTEL Logs
|
||||
|
||||
- **Category**: Logs
|
||||
- **Website**: <https://opentelemetry.io/docs/concepts/signals/logs/>
|
||||
|
||||
## Table of content
|
||||
|
||||
- [OTEL Logs](#otel-logs)
|
||||
- [Table of content](#table-of-content)
|
||||
- [Configuration](#configuration)
|
||||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Additional info](#additional-info)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| --------------------------- | --------------------------- | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `otlp.logs.endpoint` | `OTLP_LOGS_ENDPOINT` | | OTLP endpoint in the form of http://{domain or ip}:4318/v1/logs |
|
||||
| `otlp.logs.protocol` | `OTLP_LOGS_PROTOCOL` | `http/protobuf` (from SDK) | OTLP Protocol: `http/protobuf`, `grpc` |
|
||||
| `otlp.logs.timeout` | `OTLP_LOGS_TIMEOUT` | `10000` (from SDK) | Timeout value in milliseconds |
|
||||
| `otlp.logs.headers` | `OTLP_LOGS_HEADERS` | | List of headers to apply to all outgoing logs in the form of "some-key=some-value,other-key=other-value" |
|
||||
| `otlp.logs.synced` | `OTLP_LOGS_SYNCED` | `false` | Set to `true` if you want logs to be sent synchronously |
|
||||
| `otlp.logs.minimumpriority` | `OTLP_LOGS_MINIMUMPRIORITY` | `""` (=`debug`) | minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `otlp.logs.checkcert` | `OTLP_LOGS_CHECKCERT` | `false` | Set if you want to skip TLS certificate validation |
|
||||
| `otlp.logs.duration` | `OTLP_LOGS_DURATION` | `1000` | Artificial span duration in milliseconds (as Falco doesn't provide an ending timestamp) |
|
||||
| `otlp.logs.extraenvvars` | `OTLP_LOGS_EXTRAENVVARS` | | Extra env vars (override the other settings) |
|
||||
|
||||
> [!NOTE]
|
||||
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/)
|
||||
|
||||
## Example of config.yaml
|
||||
|
||||
```yaml
|
||||
otlp:
|
||||
logs:
|
||||
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/logs), if not empty, OTLP Traces output is enabled
|
||||
protocol: "" # OTLP protocol: http/protobuf, grpc (default: "" which uses SDK default: "http/protobuf")
|
||||
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
|
||||
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
|
||||
# extraenvvars: # Extra env vars (override the other settings)
|
||||
# OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: 10000
|
||||
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# checkcert: true # Set if you want to skip TLS certificate validation (default: true)
|
||||
```
|
||||
|
||||
## Additional info
|
||||
|
||||
> [!WARNING]
|
||||
Because of the way the OTEL SDK is structured, the OTLP outputs don't appear in the metrics (Prometheus, Statsd, ...)
|
||||
and the error logs just specify `OTEL` as output.
|
|
@ -0,0 +1,208 @@
|
|||
# OTEL Metrics
|
||||
|
||||
- **Category**: Metrics/Observability
|
||||
- **Website**: <https://opentelemetry.io/docs/concepts/signals/metrics/>
|
||||
|
||||
## Table of content
|
||||
|
||||
- [OTEL Metrics](#otel-metrics)
|
||||
- [Table of content](#table-of-content)
|
||||
- [Configuration](#configuration)
|
||||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Additional info](#additional-info)
|
||||
- [Running a whole stack with docker-compose](#running-a-whole-stack-with-docker-compose)
|
||||
- [Requirements](#requirements)
|
||||
- [Configuration files](#configuration-files)
|
||||
- [Run it](#run-it)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ------------------------------ | ------------------------------ | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `otlp.metrics.endpoint` | `OTLP_METRICS_ENDPOINT` | | OTLP endpoint, typically in the form http(s)://{domain or ip}:4318(/v1/metrics) |
|
||||
| `otlp.metrics.protocol` | `OTLP_METRICS_PROTOCOL` | `http/protobuf` (from SDK) | OTLP Protocol: `http/protobuf`, `grpc` |
|
||||
| `otlp.metrics.timeout` | `OTLP_METRICS_TIMEOUT` | `10000` (from SDK) | OTLP timeout for outgoing metrics in milliseconds |
|
||||
| `otlp.metrics.headers` | `OTLP_METRICS_HEADERS` | `""` | List of headers to apply to all outgoing metrics in the form of `some-key=some-value,other-key=other-value` |
|
||||
| `otlp.metrics.extraenvvars` | `OTLP_METRICS_EXTRAENVVARS` | `""` | Extra env vars (override the other settings) |
|
||||
| `otlp.metrics.minimumpriority` | `OTLP_METRICS_MINIMUMPRIORITY` | `""` (=`debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `otlp.metrics.checkcert` | `OTLP_METRICS_CHECKCERT` | `true` | Set to false if you want to skip TLS certificate validation (only with https) |
|
||||
| `otlp.metrics.extraattributes` | `OTLP_METRICS_EXTRAATTRIBUTES` | `""` | Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields |
|
||||
|
||||
> [!NOTE]
|
||||
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/)
|
||||
|
||||
> [!WARNING]
|
||||
If you use `grpc`, the endpoint format must be `http(s)://{domain or ip}:4318`
|
||||
If you use `http/protobuf`, the endpoint format must be `http(s)://{domain or ip}:4318/v1/traces`
|
||||
|
||||
## Example of config.yaml
|
||||
|
||||
```yaml
|
||||
otlp:
|
||||
metrics:
|
||||
# endpoint: "" # OTLP endpoint, typically in the form http(s)://{domain or ip}:4318(/v1/metrics), if not empty, OTLP Metrics output is enabled
|
||||
# protocol: "" # OTLP protocol: http/protobuf, grpc (default: "" which uses SDK default: "http/protobuf")
|
||||
# timeout: "" # OTLP timeout for outgoing metrics in milliseconds (default: "" which uses SDK default: 10000)
|
||||
# headers: "" # List of headers to apply to all outgoing metrics in the form of "some-key=some-value,other-key=other-value" (default: "")
|
||||
# extraenvvars: # Extra env vars (override the other settings) (default: "")
|
||||
# OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: 10000
|
||||
# OTEL_EXPORTER_OTLP_TIMEOUT: 10000
|
||||
# minimumpriority: "" # Minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default: "")
|
||||
# checkcert: true # Set to false if you want to skip TLS certificate validation (only with https) (default: true)
|
||||
# extraattributes: "" # Comma-separated list of fields to use as labels additionally to source, priority, rule, hostname, tags, k8s_ns_name, k8s_pod_name and custom_fields
|
||||
```
|
||||
|
||||
## Additional info
|
||||
|
||||
> [!NOTE]
|
||||
This output is used to collect metrics about Falco events and Falcosidekick inputs and outputs in OTLP metrics format.
|
||||
|
||||
> [!WARNING]
|
||||
Because of the way the OTEL SDK is structured, the OTLP outputs don't appear in the metrics (Prometheus, Statsd, ...)
|
||||
and the error logs just specify `OTEL` as output.
|
||||
|
||||
## Running a whole stack with docker-compose
|
||||
|
||||
Below `docker-compose` file runs a stack of:
|
||||
|
||||
- `falco`
|
||||
- `falcosidekick`
|
||||
- `prometheus` as metrics backend
|
||||
- OTEL collector to collect OTEL metrics from `falcosidekick` and let prometheus scrape them
|
||||
- `events-generator` to generate arbitrary Falco events
|
||||
|
||||
### Requirements
|
||||
|
||||
A local Linux kernel capable of running `falco`--modern-bpf`, see <https://falco.org/blog/falco-modern-bpf/>.
|
||||
|
||||
### Configuration files
|
||||
|
||||
You need to create these files:
|
||||
|
||||
- `./docker-compose.yaml`: minimal docker-compose configuration
|
||||
|
||||
```yaml
|
||||
---
|
||||
services:
|
||||
falco:
|
||||
image: falcosecurity/falco:0.39.0
|
||||
privileged: true
|
||||
volumes:
|
||||
- /var/run/docker.sock:/host/var/run/docker.sock
|
||||
- /dev:/host/dev
|
||||
- /proc:/host/proc:ro
|
||||
- /boot:/host/boot:ro
|
||||
- /lib/modules:/host/lib/modules:ro
|
||||
- /usr:/host/usr:ro
|
||||
- /etc/falco:/host/etc:ro
|
||||
command: [
|
||||
"/usr/bin/falco" ,
|
||||
"-o", "json_output=true",
|
||||
"-o", "http_output.enabled=true",
|
||||
"-o", "http_output.url=http://sidekick:2801", # Set the HTTP output url to Falcosidekick endpoint
|
||||
"-o", "http_output.insecure=true"
|
||||
]
|
||||
|
||||
sidekick:
|
||||
image: falcosidekick:latest
|
||||
ports:
|
||||
- "2801:2801" # Expose default port towards Falco instance
|
||||
environment:
|
||||
- OTLP_METRICS_ENDPOINT=http://otel-collector:4317
|
||||
- OTLP_METRICS_CHECKCERT=false
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib
|
||||
volumes:
|
||||
- ./config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- "4317:4317" # Expose OTLP gRPC port
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090" # Expose port to access Prometheus expression browser
|
||||
|
||||
event-generator:
|
||||
image: falcosecurity/event-generator
|
||||
command: run
|
||||
restart: always
|
||||
trigger:
|
||||
image: alpine
|
||||
command: [ # Alternate reads to /etc/shadow with creations of symlinks from it
|
||||
"sh",
|
||||
"-c",
|
||||
"while true; do cat /etc/shadow > /dev/null; sleep 5; ln -s /etc/shadow shadow; rm shadow; sleep 5; done"
|
||||
]
|
||||
```
|
||||
|
||||
> `./docker-compose.yaml` mentions the `falcosidekick:latest` docker image, that must be locally available before
|
||||
> bringing up the stack. You can build it from source by cloning the repository and issuing the building commands:
|
||||
> ```shell
|
||||
> git clone https://github.com/falcosecurity/falcosidekick.git
|
||||
> cd falcosidekick
|
||||
> go build . && docker build . -t falcosidekick:latest
|
||||
> ```
|
||||
|
||||
- `./config.yaml`: minimal OTEL collector configuration
|
||||
|
||||
```yaml
|
||||
---
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "0.0.0.0:4317"
|
||||
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:9090"
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: []
|
||||
exporters: [prometheus]
|
||||
```
|
||||
|
||||
- `./prometheus.yml`: minimal prometheus configuration
|
||||
|
||||
```yaml
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:9090']
|
||||
```
|
||||
|
||||
### Run it
|
||||
|
||||
To bring up the stack, and see the results on prometheus expression browser:
|
||||
|
||||
1. Bring up the stack
|
||||
|
||||
```shell
|
||||
docker compose up
|
||||
```
|
||||
|
||||
2. Navigate to <http://localhost:9090/graph> to start browsing the local prometheus expression browser
|
||||
|
||||
3. Navigate to the `Graph` tab and adjust the time interval to be comparable to the stack uptime (e.g.: 15 minutes)
|
||||
|
||||
5. To get information regarding the `falcosecurity_falco_rules_matches_total` metric, you can enter a simple query like
|
||||
`falcosecurity_falco_rules_matches_total` or `sum by (rule) (falcosecurity_falco_rules_matches_total)` and press
|
||||
`Execute`
|
||||
|
||||
6. Explore the obtained results
|
||||

|
||||
|
||||
1. Bring down the stack
|
||||
|
||||
```shell
|
||||
docker compose down
|
||||
```
|
|
@ -11,31 +11,38 @@
|
|||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Additional info](#additional-info)
|
||||
- [Running a whole stack with docker-compose](#running-a-whole-stack-with-docker-compose)
|
||||
- [Requirements](#requirements)
|
||||
- [Configuration files](#configuration-files)
|
||||
- [Run it](#run-it)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ----------------------------- | ----------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `otlp.traces.endpoint` | `OTLP_TRACES_ENDPOINT` | | OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces |
|
||||
| `otlp.traces.protocol` | `OTLP_TRACES_PROTOCOL` | `http` (from SDK) | OTLP Protocol |
|
||||
| `otlp.traces.timeout` | `OTLP_TRACES_TIMEOUT` | `10000` (from SDK) | Timeout value in milliseconds |
|
||||
| `otlp.traces.headers` | `OTLP_TRACES_HEADERS` | | List of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" |
|
||||
| `otlp.traces.synced` | `OTLP_TRACES_SYNCED` | `false` | Set to `true` if you want traces to be sent synchronously |
|
||||
| `otlp.traces.minimumpriority` | `OTLP_TRACES_MINIMUMPRIORITY` | `""` (=`debug`) | minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `otlp.traces.checkcert` | `OTLP_TRACES_CHECKCERT` | `false` | Set if you want to skip TLS certificate validation |
|
||||
| `otlp.traces.duration` | `OTLP_TRACES_DURATION` | `1000` | Artificial span duration in milliseconds (as Falco doesn't provide an ending timestamp) |
|
||||
| `otlp.traces.extraenvvars` | `OTLP_TRACES_EXTRAENVVARS` | | Extra env vars (override the other settings) |
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ----------------------------- | ----------------------------- | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `otlp.traces.endpoint` | `OTLP_TRACES_ENDPOINT` | | OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/traces) |
|
||||
| `otlp.traces.protocol` | `OTLP_TRACES_PROTOCOL` | `http/protobuf` (from SDK) | OTLP Protocol: `http/protobuf`, `grpc` |
|
||||
| `otlp.traces.timeout` | `OTLP_TRACES_TIMEOUT` | `10000` (from SDK) | Timeout value in milliseconds |
|
||||
| `otlp.traces.headers` | `OTLP_TRACES_HEADERS` | | List of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" |
|
||||
| `otlp.traces.synced` | `OTLP_TRACES_SYNCED` | `false` | Set to `true` if you want traces to be sent synchronously |
|
||||
| `otlp.traces.minimumpriority` | `OTLP_TRACES_MINIMUMPRIORITY` | `""` (=`debug`) | minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| `otlp.traces.checkcert` | `OTLP_TRACES_CHECKCERT` | `false` | Set if you want to skip TLS certificate validation |
|
||||
| `otlp.traces.duration` | `OTLP_TRACES_DURATION` | `1000` | Artificial span duration in milliseconds (as Falco doesn't provide an ending timestamp) |
|
||||
| `otlp.traces.extraenvvars` | `OTLP_TRACES_EXTRAENVVARS` | | Extra env vars (override the other settings) |
|
||||
|
||||
> [!NOTE]
|
||||
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/):
|
||||
For the extra Env Vars values see [standard `OTEL_*` environment variables](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/)
|
||||
|
||||
> [!WARNING]
|
||||
If you use `grpc`, the endpoint format must be `http(s)://{domain or ip}:4318`
|
||||
If you use `http/protobuf`, the endpoint format must be `http(s)://{domain or ip}:4318/v1/traces`
|
||||
|
||||
## Example of config.yaml
|
||||
|
||||
```yaml
|
||||
otlp:
|
||||
traces:
|
||||
# endpoint: "" # OTLP endpoint in the form of http://{domain or ip}:4318/v1/traces
|
||||
# protocol: "" # OTLP protocol http/json, http/protobuf, grpc (default: "" which uses SDK default: http/json)
|
||||
# endpoint: "" # OTLP endpoint in the form of http(s)://{domain or ip}:4318(/v1/traces), if not empty, OTLP Traces output is enabled
|
||||
# protocol: "" # OTLP protocol: http/protobuf, grpc (default: "" which uses SDK default: "http/protobuf")
|
||||
# timeout: "" # OTLP timeout: timeout value in milliseconds (default: "" which uses SDK default: 10000)
|
||||
# headers: "" # OTLP headers: list of headers to apply to all outgoing traces in the form of "some-key=some-value,other-key=other-value" (default: "")
|
||||
# synced: false # Set to true if you want traces to be sent synchronously (default: false)
|
||||
|
@ -52,6 +59,10 @@ otlp:
|
|||
> [!NOTE]
|
||||
The OTLP Traces are only available for the source: `syscalls`.
|
||||
|
||||
> [!WARNING]
|
||||
Because of the way the OTEL SDK is structured, the OTLP outputs don't appear in the metrics (Prometheus, Statsd, ...)
|
||||
and the error logs just specify `OTEL` as output.
|
||||
|
||||
## Running a whole stack with docker-compose
|
||||
|
||||
Below `docker-compose` file runs a stack of:
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ------------------------------- | ------------------------------- | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `quickwit.hosport` | `QUICKWIT_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Quickwit output is **enabled** |
|
||||
| `quickwit.hostport` | `QUICKWIT_HOSTPORT` | | http://{domain or ip}:{port}, if not empty, Quickwit output is **enabled** |
|
||||
| `quickwit.apiendpoint` | `QUICKWIT_APIENDPOINT` | `api/v1` | API endpoint (containing the API version, overideable in case of quickwit behind a reverse proxy with URL rewriting) |
|
||||
| `quickwit.index` | `QUICKWIT_INDEX` | `falco` | Index |
|
||||
| `quickwit.version` | `QUICKWIT_VERSION` | `0.7` | Version of quickwit |
|
||||
|
|
|
@ -14,13 +14,14 @@
|
|||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ---------------------- | ---------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `stan.hostport` | `STAN_HOSTPORT` | | stan://{domain or ip}:{port}, if not empty, STAN output is **enabled** |
|
||||
| `stan.clusterid` | `STAN_CLUSTERID` | | Cluster name (mandatory) |
|
||||
| `stan.clientid` | `STAN_CLIENTID` | | Client ID (mandatory) |
|
||||
| `stan.checkcert` | `STAN_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
|
||||
| `stan.minimumpriority` | `STAN_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ---------------------- | ---------------------- | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `stan.hostport` | `STAN_HOSTPORT` | | stan://{domain or ip}:{port}, if not empty, STAN output is **enabled** |
|
||||
| `stan.subjecttemplate` | `STAN_SUBJECTTEMPLATE` | `falco.<priority>.<rule>` | Template for the subject, tokens <priority> and <rule> will be automatically replaced |
|
||||
| `stan.clusterid` | `STAN_CLUSTERID` | | Cluster name (mandatory) |
|
||||
| `stan.clientid` | `STAN_CLIENTID` | | Client ID (mandatory) |
|
||||
| `stan.checkcert` | `STAN_CHECKCERT` | `true` | Check if ssl certificate of the output is valid |
|
||||
| `stan.minimumpriority` | `STAN_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
|
||||
> [!NOTE]
|
||||
The Env var values override the settings from yaml file.
|
||||
|
@ -32,6 +33,7 @@ stan:
|
|||
hostport: "" # stan://{domain or ip}:{port}, if not empty, STAN output is enabled
|
||||
clusterid: "" # Cluster name (mandatory)
|
||||
clientid: "" # Client ID (mandatory)
|
||||
# subjecttemplate: "falco.<priority>.<rule>" # template for the subject, tokens <priority> and <rule> will be automatically replaced (default: falco.<priority>.<rule>)
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
# mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
|
||||
# checkcert: true # check if ssl certificate of the output is valid (default: true)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
| -------------------------- | -------------------------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `telegram.chatid` | `TELEGRAM_CHATID` | | Telegram Identifier of the shared chat, if not empty, Telegram is **enabled** |
|
||||
| `telegram.token` | `TELEGRAM_TOKEN` | | Telegram bot authentication token |
|
||||
`telegram.message_thread_id` | `TELEGRAM_MESSAGE_THREAD_ID` | | Telegram individual chats within the group
|
||||
| `telegram.minimumpriority` | `TELEGRAM_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
|
||||
> [!NOTE]
|
||||
|
@ -29,6 +30,7 @@ The Env var values override the settings from yaml file.
|
|||
telegram:
|
||||
chatid: "" # Telegram Identifier of the shared chat, if not empty, Telegram is enabled
|
||||
token: "" # Telegram bot authentication token
|
||||
# message_thread_id: "" # Telegram individual chats within the group
|
||||
# minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
# Webex
|
||||
|
||||
- **Category**: Chat/Messaging
|
||||
- **Website**: https://webex.com
|
||||
|
||||
## Table of content
|
||||
|
||||
- [Teams](#teams)
|
||||
- [Table of content](#table-of-content)
|
||||
- [Configuration](#configuration)
|
||||
- [Example of config.yaml](#example-of-configyaml)
|
||||
- [Screenshots](#screenshots)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Setting | Env var | Default value | Description |
|
||||
| ----------------------- | ----------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `webex.webhookurl` | `WEBEX_WEBHOOKURL` | | Teams WebhookURL, if not empty, Webex output is **enabled** |
|
||||
| `webex.minimumpriority` | `WEBEX_MINIMUMPRIORITY` | `""` (= `debug`) | Minimum priority of event for using this output, order is `emergency,alert,critical,error,warning,notice,informational,debug or ""` |
|
||||
|
||||
|
||||
> [!NOTE]
|
||||
The Env var values override the settings from yaml file.
|
||||
|
||||
## Example of config.yaml
|
||||
|
||||
```yaml
|
||||
webex:
|
||||
webhookurl: "" # Webex WebhookURL, if not empty, Webex output is enabled
|
||||
# minimumpriority: "debug" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
|
||||
```
|
||||
## Screenshots
|
||||
|
||||

|
||||
|
237
go.mod
237
go.mod
|
@ -1,160 +1,211 @@
|
|||
module github.com/falcosecurity/falcosidekick
|
||||
|
||||
go 1.22.0
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.4
|
||||
|
||||
require (
|
||||
cloud.google.com/go/functions v1.16.3
|
||||
cloud.google.com/go/pubsub v1.40.0
|
||||
cloud.google.com/go/storage v1.42.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1
|
||||
cloud.google.com/go/functions v1.19.6
|
||||
cloud.google.com/go/pubsub v1.49.0
|
||||
cloud.google.com/go/storage v1.56.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.3.2
|
||||
github.com/DataDog/datadog-go v4.8.3+incompatible
|
||||
github.com/PagerDuty/go-pagerduty v1.8.0
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/aws/aws-sdk-go v1.54.11
|
||||
github.com/aws/aws-sdk-go-v2 v1.37.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.3
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.2
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.47.3
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.4
|
||||
github.com/aws/aws-sdk-go-v2/service/lambda v1.74.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sns v1.17.4
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19
|
||||
github.com/cloudevents/sdk-go/v2 v2.15.2
|
||||
github.com/eclipse/paho.mqtt.golang v1.4.3
|
||||
github.com/eclipse/paho.mqtt.golang v1.5.0
|
||||
github.com/embano1/memlog v0.4.6
|
||||
github.com/emersion/go-sasl v0.0.0-20231106173351-e73c9f7bad43
|
||||
github.com/emersion/go-smtp v0.21.2
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6
|
||||
github.com/emersion/go-smtp v0.22.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/googleapis/gax-go/v2 v2.12.5
|
||||
github.com/jackc/pgx/v5 v5.6.0
|
||||
github.com/nats-io/nats.go v1.36.0
|
||||
github.com/googleapis/gax-go/v2 v2.15.0
|
||||
github.com/jackc/pgx/v5 v5.7.5
|
||||
github.com/nats-io/nats.go v1.39.1
|
||||
github.com/nats-io/stan.go v0.10.4
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/redis/go-redis/v9 v9.5.3
|
||||
github.com/segmentio/kafka-go v0.4.47
|
||||
github.com/redis/go-redis/v9 v9.8.0
|
||||
github.com/segmentio/kafka-go v0.4.48
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/wavefronthq/wavefront-sdk-go v0.15.0
|
||||
github.com/xitongsys/parquet-go v1.6.2
|
||||
github.com/xitongsys/parquet-go-source v0.0.0-20240122235623-d6294584ab18
|
||||
go.opentelemetry.io/otel v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
|
||||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.opentelemetry.io/otel/trace v1.27.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/text v0.16.0
|
||||
google.golang.org/api v0.186.0
|
||||
google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4
|
||||
k8s.io/api v0.30.2
|
||||
k8s.io/apimachinery v0.30.2
|
||||
k8s.io/client-go v0.30.2
|
||||
github.com/xitongsys/parquet-go-source v0.0.0-20241021075129-b732d2ac9c9b
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.10.0
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0
|
||||
go.opentelemetry.io/otel/metric v1.37.0
|
||||
go.opentelemetry.io/otel/sdk v1.37.0
|
||||
go.opentelemetry.io/otel/sdk/log v0.13.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/api v0.243.0
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
sigs.k8s.io/wg-policy-prototypes v0.0.0-20240327135653-0fc2ddc5d3e3
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.115.0 // indirect
|
||||
cloud.google.com/go/auth v0.6.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
cloud.google.com/go/iam v1.1.8 // indirect
|
||||
cloud.google.com/go/longrunning v0.5.7 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/Azure/go-amqp v1.0.5 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.4 // indirect
|
||||
cloud.google.com/go/auth v0.16.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/longrunning v0.6.7 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/go-amqp v1.4.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect
|
||||
github.com/apache/thrift v0.20.0 // indirect
|
||||
github.com/apache/thrift v0.21.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0 // indirect
|
||||
github.com/aws/smithy-go v1.22.5 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/caio/go-tdigest/v4 v4.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/magiconair/properties v1.8.9 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.9.23 // indirect
|
||||
github.com/nats-io/nats-streaming-server v0.24.3 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.10.27 // indirect
|
||||
github.com/nats-io/nats-streaming-server v0.24.6 // indirect
|
||||
github.com/nats-io/nkeys v0.4.10 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.14.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/telkomdev/go-stash v1.0.6
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.13.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect
|
||||
google.golang.org/grpc v1.64.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect
|
||||
golang.org/x/net v0.42.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 // indirect
|
||||
google.golang.org/grpc v1.74.2 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240423202451-8948a665c108 // indirect
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.17.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.20.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
|
|
105
handlers.go
105
handlers.go
|
@ -7,17 +7,20 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -35,6 +38,8 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "Please send a valid request body", http.StatusBadRequest)
|
||||
stats.Requests.Add("rejected", 1)
|
||||
promStats.Inputs.With(map[string]string{"source": "requests", "status": "rejected"}).Inc()
|
||||
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
|
||||
attribute.String("status", "rejected")).Inc()
|
||||
nullClient.CountMetric("inputs.requests.rejected", 1, []string{"error:nobody"})
|
||||
|
||||
return
|
||||
|
@ -44,6 +49,8 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "Please send with post http method", http.StatusBadRequest)
|
||||
stats.Requests.Add("rejected", 1)
|
||||
promStats.Inputs.With(map[string]string{"source": "requests", "status": "rejected"}).Inc()
|
||||
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
|
||||
attribute.String("status", "rejected")).Inc()
|
||||
nullClient.CountMetric("inputs.requests.rejected", 1, []string{"error:nobody"})
|
||||
|
||||
return
|
||||
|
@ -54,6 +61,8 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "Please send a valid request body", http.StatusBadRequest)
|
||||
stats.Requests.Add("rejected", 1)
|
||||
promStats.Inputs.With(map[string]string{"source": "requests", "status": "rejected"}).Inc()
|
||||
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
|
||||
attribute.String("status", "rejected")).Inc()
|
||||
nullClient.CountMetric("inputs.requests.rejected", 1, []string{"error:invalidjson"})
|
||||
|
||||
return
|
||||
|
@ -62,6 +71,8 @@ func mainHandler(w http.ResponseWriter, r *http.Request) {
|
|||
nullClient.CountMetric("inputs.requests.accepted", 1, []string{})
|
||||
stats.Requests.Add("accepted", 1)
|
||||
promStats.Inputs.With(map[string]string{"source": "requests", "status": "accepted"}).Inc()
|
||||
otlpMetrics.Inputs.With(attribute.String("source", "requests"),
|
||||
attribute.String("status", "accepted")).Inc()
|
||||
forwardEvent(falcopayload)
|
||||
}
|
||||
|
||||
|
@ -80,7 +91,7 @@ func healthHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// testHandler sends a test event to all enabled outputs.
|
||||
func testHandler(w http.ResponseWriter, r *http.Request) {
|
||||
r.Body = io.NopCloser(bytes.NewReader([]byte(`{"output":"This is a test from falcosidekick","priority":"Debug","hostname": "falcosidekick", "rule":"Test rule", "time":"` + time.Now().UTC().Format(time.RFC3339) + `","output_fields": {"proc.name":"falcosidekick","user.name":"falcosidekick"}, "tags":["test","example"]}`)))
|
||||
r.Body = io.NopCloser(bytes.NewReader([]byte(`{"output":"This is a test from falcosidekick","source":"debug","priority":"Debug","hostname":"falcosidekick", "rule":"Test rule","time":"` + time.Now().UTC().Format(time.RFC3339) + `","output_fields":{"proc.name":"falcosidekick","user.name":"falcosidekick"},"tags":["test","example"]}`)))
|
||||
mainHandler(w, r)
|
||||
}
|
||||
|
||||
|
@ -106,6 +117,8 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
}
|
||||
}
|
||||
|
||||
falcopayload.Tags = append(falcopayload.Tags, config.Customtags...)
|
||||
|
||||
if falcopayload.Rule == "Test rule" {
|
||||
falcopayload.Source = "internal"
|
||||
}
|
||||
|
@ -136,12 +149,12 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
for key, value := range config.Templatedfields {
|
||||
tmpl, err := template.New("").Parse(value)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Parsing error for templated field '%v': %v\n", key, err)
|
||||
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Parsing error for templated field '%v': %v", key, err))
|
||||
continue
|
||||
}
|
||||
v := new(bytes.Buffer)
|
||||
if err := tmpl.Execute(v, falcopayload.OutputFields); err != nil {
|
||||
log.Printf("[ERROR] : Parsing error for templated field '%v': %v\n", key, err)
|
||||
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Parsing error for templated field '%v': %v", key, err))
|
||||
}
|
||||
templatedFields += key + "=" + v.String() + " "
|
||||
falcopayload.OutputFields[key] = v.String()
|
||||
|
@ -154,7 +167,14 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
|
||||
nullClient.CountMetric("falco.accepted", 1, []string{"priority:" + falcopayload.Priority.String()})
|
||||
stats.Falco.Add(strings.ToLower(falcopayload.Priority.String()), 1)
|
||||
promLabels := map[string]string{"rule": falcopayload.Rule, "priority": falcopayload.Priority.String(), "source": falcopayload.Source, "k8s_ns_name": kn, "k8s_pod_name": kp}
|
||||
promLabels := map[string]string{
|
||||
"rule": falcopayload.Rule,
|
||||
"priority_raw": strings.ToLower(falcopayload.Priority.String()),
|
||||
"priority": strconv.Itoa(int(falcopayload.Priority)),
|
||||
"source": falcopayload.Source,
|
||||
"k8s_ns_name": kn,
|
||||
"k8s_pod_name": kp,
|
||||
}
|
||||
if falcopayload.Hostname != "" {
|
||||
promLabels["hostname"] = falcopayload.Hostname
|
||||
} else {
|
||||
|
@ -166,6 +186,11 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
promLabels[key] = value
|
||||
}
|
||||
}
|
||||
for key := range config.Templatedfields {
|
||||
if regPromLabels.MatchString(strings.ReplaceAll(key, ".", "_")) {
|
||||
promLabels[key] = fmt.Sprintf("%v", falcopayload.OutputFields[key])
|
||||
}
|
||||
}
|
||||
for _, i := range config.Prometheus.ExtraLabelsList {
|
||||
promLabels[strings.ReplaceAll(i, ".", "_")] = ""
|
||||
for key, value := range falcopayload.OutputFields {
|
||||
|
@ -181,6 +206,43 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
}
|
||||
promStats.Falco.With(promLabels).Inc()
|
||||
|
||||
// Falco OTLP metric
|
||||
hostname := falcopayload.Hostname
|
||||
if hostname == "" {
|
||||
hostname = "unknown"
|
||||
}
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("source", falcopayload.Source),
|
||||
attribute.String("priority", falcopayload.Priority.String()),
|
||||
attribute.String("rule", falcopayload.Rule),
|
||||
attribute.String("hostname", hostname),
|
||||
attribute.StringSlice("tags", falcopayload.Tags),
|
||||
}
|
||||
|
||||
for key, value := range config.Customfields {
|
||||
if regOTLPMetricsAttributes.MatchString(key) {
|
||||
attrs = append(attrs, attribute.String(key, value))
|
||||
}
|
||||
}
|
||||
for _, attr := range config.OTLP.Metrics.ExtraAttributesList {
|
||||
attrName := strings.ReplaceAll(attr, ".", "_")
|
||||
attrValue := ""
|
||||
for key, val := range falcopayload.OutputFields {
|
||||
if key != attr {
|
||||
continue
|
||||
}
|
||||
if keyName := strings.ReplaceAll(key, ".", "_"); !regOTLPMetricsAttributes.MatchString(keyName) {
|
||||
continue
|
||||
}
|
||||
// Notice: Don't remove the _ for the second return value, otherwise will panic if it can convert the value
|
||||
// to string
|
||||
attrValue, _ = val.(string)
|
||||
break
|
||||
}
|
||||
attrs = append(attrs, attribute.String(attrName, attrValue))
|
||||
}
|
||||
otlpMetrics.Falco.With(attrs...).Inc()
|
||||
|
||||
if config.BracketReplacer != "" {
|
||||
for i, j := range falcopayload.OutputFields {
|
||||
if strings.Contains(i, "[") {
|
||||
|
@ -202,6 +264,7 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
n = strings.ReplaceAll(n, "<output>", o)
|
||||
n = strings.ReplaceAll(n, "<custom_fields>", strings.TrimSuffix(customFields, " "))
|
||||
n = strings.ReplaceAll(n, "<templated_fields>", strings.TrimSuffix(templatedFields, " "))
|
||||
n = strings.ReplaceAll(n, "<tags>", strings.Join(falcopayload.Tags, ","))
|
||||
n = strings.TrimSuffix(n, " ")
|
||||
n = strings.TrimSuffix(n, "( )")
|
||||
n = strings.TrimSuffix(n, "()")
|
||||
|
@ -212,9 +275,9 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
|
||||
if len(falcopayload.String()) > 4096 {
|
||||
for i, j := range falcopayload.OutputFields {
|
||||
switch j.(type) {
|
||||
switch l := j.(type) {
|
||||
case string:
|
||||
if len(j.(string)) > 512 {
|
||||
if len(l) > 512 {
|
||||
k := j.(string)[:507] + "[...]"
|
||||
falcopayload.Output = strings.ReplaceAll(falcopayload.Output, j.(string), k)
|
||||
falcopayload.OutputFields[i] = k
|
||||
|
@ -224,7 +287,7 @@ func newFalcoPayload(payload io.Reader) (types.FalcoPayload, error) {
|
|||
}
|
||||
|
||||
if config.Debug {
|
||||
log.Printf("[DEBUG] : Falco's payload : %v\n", falcopayload.String())
|
||||
utils.Log(utils.DebugLvl, "", fmt.Sprintf("Falco's payload : %v", falcopayload.String()))
|
||||
}
|
||||
|
||||
return falcopayload, nil
|
||||
|
@ -251,16 +314,26 @@ func forwardEvent(falcopayload types.FalcoPayload) {
|
|||
go teamsClient.TeamsPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.Webex.WebhookURL != "" && (falcopayload.Priority >= types.Priority(config.Webex.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go webexClient.WebexPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.Datadog.APIKey != "" && (falcopayload.Priority >= types.Priority(config.Datadog.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go datadogClient.DatadogPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.DatadogLogs.APIKey != "" && (falcopayload.Priority >= types.Priority(config.DatadogLogs.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go datadogLogsClient.DatadogLogsPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.Discord.WebhookURL != "" && (falcopayload.Priority >= types.Priority(config.Discord.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go discordClient.DiscordPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.Alertmanager.HostPort != "" && (falcopayload.Priority >= types.Priority(config.Alertmanager.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go alertmanagerClient.AlertmanagerPost(falcopayload)
|
||||
if len(config.Alertmanager.HostPort) != 0 && (falcopayload.Priority >= types.Priority(config.Alertmanager.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
for _, i := range alertmanagerClients {
|
||||
go i.AlertmanagerPost(falcopayload)
|
||||
}
|
||||
}
|
||||
|
||||
if config.Elasticsearch.HostPort != "" && (falcopayload.Priority >= types.Priority(config.Elasticsearch.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
|
@ -469,10 +542,18 @@ func forwardEvent(falcopayload types.FalcoPayload) {
|
|||
}
|
||||
|
||||
if config.OTLP.Traces.Endpoint != "" && (falcopayload.Priority >= types.Priority(config.OTLP.Traces.MinimumPriority)) && (falcopayload.Source == syscall || falcopayload.Source == syscalls) {
|
||||
go otlpClient.OTLPTracesPost(falcopayload)
|
||||
go otlpTracesClient.OTLPTracesPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.OTLP.Logs.Endpoint != "" && (falcopayload.Priority >= types.Priority(config.OTLP.Logs.MinimumPriority)) {
|
||||
go otlpLogsClient.OTLPLogsPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.Talon.Address != "" && (falcopayload.Priority >= types.Priority(config.Talon.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go talonClient.TalonPost(falcopayload)
|
||||
}
|
||||
|
||||
if config.Logstash.Address != "" && (falcopayload.Priority >= types.Priority(config.Logstash.MinimumPriority) || falcopayload.Rule == testRule) {
|
||||
go logstashClient.LogstashPost(falcopayload)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
// SPDX-License-Identifier: MIT OR Apache-2.0
|
||||
|
||||
package batcher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBatchSize = 5 * 1024 * 1024 // max batch size in bytes, 5MB by default
|
||||
defaultFlushInterval = time.Second
|
||||
)
|
||||
|
||||
type CallbackFunc func(falcoPayloads []types.FalcoPayload, serialized []byte)
|
||||
|
||||
type OptionFunc func(b *Batcher)
|
||||
|
||||
// MarshalFunc is a callback that allows the user of the batcher to overwrite the default JSON marshalling
|
||||
type MarshalFunc func(payload types.FalcoPayload) ([]byte, error)
|
||||
|
||||
// Batcher A simple generic implementation of Falco payloads batching
|
||||
// Batching can be configured by the batchSize which is a max number of payloads in the batch or the flushInterval.
|
||||
// The callback function is called when the number of payloads reaches the batchSize or upon the flushInterval
|
||||
type Batcher struct {
|
||||
batchSize int
|
||||
flushInterval time.Duration
|
||||
|
||||
callbackFn CallbackFunc
|
||||
marshalFn MarshalFunc
|
||||
|
||||
mx sync.Mutex
|
||||
|
||||
pending bytes.Buffer
|
||||
// Keeping the original payloads for errors resolution
|
||||
pendingPayloads []types.FalcoPayload
|
||||
|
||||
curTimer *time.Timer
|
||||
}
|
||||
|
||||
func New(opts ...OptionFunc) *Batcher {
|
||||
b := &Batcher{
|
||||
batchSize: defaultBatchSize,
|
||||
flushInterval: defaultFlushInterval,
|
||||
callbackFn: func(falcoPayloads []types.FalcoPayload, batch []byte) {},
|
||||
marshalFn: jsonMarshal,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(b)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func WithBatchSize(sz int) OptionFunc {
|
||||
return func(b *Batcher) {
|
||||
b.batchSize = sz
|
||||
}
|
||||
}
|
||||
|
||||
func WithFlushInterval(interval time.Duration) OptionFunc {
|
||||
return func(b *Batcher) {
|
||||
b.flushInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
func WithCallback(cb CallbackFunc) OptionFunc {
|
||||
return func(b *Batcher) {
|
||||
b.callbackFn = cb
|
||||
}
|
||||
}
|
||||
|
||||
func WithMarshal(fn MarshalFunc) OptionFunc {
|
||||
return func(b *Batcher) {
|
||||
b.marshalFn = fn
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batcher) Push(falcopayload types.FalcoPayload) error {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
data, err := b.marshalFn(falcopayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.pending.Len() == 0 {
|
||||
b.scheduleFlushInterval()
|
||||
} else if b.pending.Len()+len(data) > b.batchSize {
|
||||
b.flush()
|
||||
b.scheduleFlushInterval()
|
||||
}
|
||||
_, _ = b.pending.Write(data)
|
||||
b.pendingPayloads = append(b.pendingPayloads, falcopayload)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batcher) scheduleFlushInterval() {
|
||||
if b.curTimer != nil {
|
||||
b.curTimer.Stop()
|
||||
}
|
||||
b.curTimer = time.AfterFunc(b.flushInterval, b.flushOnTimer)
|
||||
}
|
||||
|
||||
func (b *Batcher) flushOnTimer() {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
b.flush()
|
||||
}
|
||||
|
||||
func (b *Batcher) flush() {
|
||||
if b.pending.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
serialized := b.pending.Bytes()
|
||||
falcoPayloads := b.pendingPayloads
|
||||
|
||||
b.pending = bytes.Buffer{}
|
||||
b.pendingPayloads = nil
|
||||
b.callbackFn(falcoPayloads, serialized)
|
||||
}
|
||||
|
||||
// jsonMarshal default marshal function
|
||||
func jsonMarshal(payload types.FalcoPayload) ([]byte, error) {
|
||||
return json.Marshal(payload)
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
// SPDX-License-Identifier: MIT OR Apache-2.0
|
||||
|
||||
package batcher
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func TestElasticsearchBatcher(t *testing.T) {
|
||||
const (
|
||||
batchSize = 1234
|
||||
testCount = 100
|
||||
flushInterval = 300 * time.Millisecond
|
||||
)
|
||||
|
||||
// Just to emulated similar payload for testing, not strictly needed
|
||||
type eSPayload struct {
|
||||
types.FalcoPayload
|
||||
Timestamp time.Time `json:"@timestamp"`
|
||||
}
|
||||
|
||||
marshalFunc := func(payload types.FalcoPayload) ([]byte, error) {
|
||||
return json.Marshal(eSPayload{FalcoPayload: payload, Timestamp: payload.Time})
|
||||
}
|
||||
|
||||
var wantBatches, gotBatches [][]byte
|
||||
|
||||
var mx sync.Mutex
|
||||
batcher := New(
|
||||
WithBatchSize(batchSize),
|
||||
WithFlushInterval(500*time.Millisecond),
|
||||
WithMarshal(marshalFunc),
|
||||
WithCallback(func(falcoPayloads []types.FalcoPayload, data []byte) {
|
||||
mx.Lock()
|
||||
defer mx.Unlock()
|
||||
gotBatches = append(gotBatches, data)
|
||||
}))
|
||||
|
||||
var currentBatch []byte
|
||||
for i := 0; i < testCount; i++ {
|
||||
payload := types.FalcoPayload{UUID: uuid.Must(uuid.NewV7()).String()}
|
||||
data, err := marshalFunc(payload)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(currentBatch)+len(data) > batchSize {
|
||||
wantBatches = append(wantBatches, currentBatch)
|
||||
currentBatch = nil
|
||||
}
|
||||
|
||||
currentBatch = append(currentBatch, data...)
|
||||
|
||||
err = batcher.Push(payload)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
wantBatches = append(wantBatches, currentBatch)
|
||||
|
||||
// give it time to flush
|
||||
time.Sleep(flushInterval * 2)
|
||||
|
||||
mx.Lock()
|
||||
defer mx.Unlock()
|
||||
diff := cmp.Diff(wantBatches, gotBatches)
|
||||
if diff != "" {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
// SPDX-License-Identifier: MIT OR Apache-2.0
|
||||
|
||||
package utils
|
||||
|
||||
import "log"
|
||||
|
||||
const (
|
||||
InfoLvl string = "info"
|
||||
InfoPrefix string = "[INFO] "
|
||||
ErrorLvl string = "error"
|
||||
ErrorPrefix string = "[ERROR]"
|
||||
DebugLvl string = "debug"
|
||||
DebugPrefix string = "[DEBUG]"
|
||||
WarningLvl string = "warning"
|
||||
WarningPrefix string = "[WARN] "
|
||||
FatalLvl string = "fatal"
|
||||
FatalPrefix string = "[FATAL]"
|
||||
)
|
||||
|
||||
func Log(level, output, msg string) {
|
||||
var prefix string
|
||||
switch level {
|
||||
case InfoLvl:
|
||||
prefix = InfoPrefix
|
||||
case ErrorLvl:
|
||||
prefix = ErrorPrefix
|
||||
case DebugLvl:
|
||||
prefix = DebugPrefix
|
||||
case WarningLvl:
|
||||
prefix = WarningPrefix
|
||||
}
|
||||
if output != "" {
|
||||
log.Printf("%v : %v - %v", prefix, output, msg)
|
||||
} else {
|
||||
log.Printf("%v : %v", prefix, msg)
|
||||
}
|
||||
}
|
260
main.go
260
main.go
|
@ -7,7 +7,6 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -17,10 +16,11 @@ import (
|
|||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/embano1/memlog"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/outputs"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -32,9 +32,11 @@ var (
|
|||
rocketchatClient *outputs.Client
|
||||
mattermostClient *outputs.Client
|
||||
teamsClient *outputs.Client
|
||||
webexClient *outputs.Client
|
||||
datadogClient *outputs.Client
|
||||
datadogLogsClient *outputs.Client
|
||||
discordClient *outputs.Client
|
||||
alertmanagerClient *outputs.Client
|
||||
alertmanagerClients []*outputs.Client
|
||||
elasticsearchClient *outputs.Client
|
||||
quickwitClient *outputs.Client
|
||||
influxdbClient *outputs.Client
|
||||
|
@ -77,21 +79,27 @@ var (
|
|||
n8nClient *outputs.Client
|
||||
openObserveClient *outputs.Client
|
||||
dynatraceClient *outputs.Client
|
||||
otlpClient *outputs.Client
|
||||
otlpTracesClient *outputs.Client
|
||||
otlpLogsClient *outputs.Client
|
||||
talonClient *outputs.Client
|
||||
logstashClient *outputs.Client
|
||||
|
||||
statsdClient, dogstatsdClient *statsd.Client
|
||||
config *types.Configuration
|
||||
stats *types.Statistics
|
||||
promStats *types.PromStatistics
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics
|
||||
initClientArgs *types.InitClientArgs
|
||||
|
||||
regPromLabels *regexp.Regexp
|
||||
regOutputFormat *regexp.Regexp
|
||||
shutDownFuncs []func()
|
||||
regPromLabels *regexp.Regexp
|
||||
regOTLPMetricsAttributes *regexp.Regexp
|
||||
regOutputFormat *regexp.Regexp
|
||||
shutDownFuncs []func()
|
||||
)
|
||||
|
||||
func init() {
|
||||
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick version: %s", GetVersionInfo().GitVersion))
|
||||
|
||||
// detect unit testing and skip init.
|
||||
// see: https://github.com/alecthomas/kingpin/issues/187
|
||||
testing := (strings.HasSuffix(os.Args[0], ".test") ||
|
||||
|
@ -101,17 +109,21 @@ func init() {
|
|||
}
|
||||
|
||||
regPromLabels, _ = regexp.Compile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
|
||||
// TODO: replace the following regex if something more appropriate is found
|
||||
regOTLPMetricsAttributes = regexp.MustCompile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
|
||||
regOutputFormat, _ = regexp.Compile(`(?i)[0-9:]+\.[0-9]+: (Debug|Informational|Notice|Warning|Error|Critical|Alert|Emergency) .*`)
|
||||
|
||||
config = getConfig()
|
||||
stats = getInitStats()
|
||||
promStats = getInitPromStats(config)
|
||||
otlpMetrics = newOTLPMetrics(config)
|
||||
|
||||
nullClient = &outputs.Client{
|
||||
OutputType: "null",
|
||||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}
|
||||
|
@ -121,6 +133,7 @@ func init() {
|
|||
Stats: stats,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
}
|
||||
|
||||
if config.Statsd.Forwarder != "" {
|
||||
|
@ -147,7 +160,7 @@ func init() {
|
|||
|
||||
if config.Slack.WebhookURL != "" {
|
||||
var err error
|
||||
slackClient, err = outputs.NewClient("Slack", config.Slack.WebhookURL, config.Slack.MutualTLS, config.Slack.CheckCert, *initClientArgs)
|
||||
slackClient, err = outputs.NewClient("Slack", config.Slack.WebhookURL, config.Slack.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Slack.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -157,7 +170,7 @@ func init() {
|
|||
|
||||
if config.Cliq.WebhookURL != "" {
|
||||
var err error
|
||||
cliqClient, err = outputs.NewClient("Cliq", config.Cliq.WebhookURL, config.Cliq.MutualTLS, config.Cliq.CheckCert, *initClientArgs)
|
||||
cliqClient, err = outputs.NewClient("Cliq", config.Cliq.WebhookURL, config.Cliq.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Cliq.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -167,7 +180,7 @@ func init() {
|
|||
|
||||
if config.Rocketchat.WebhookURL != "" {
|
||||
var err error
|
||||
rocketchatClient, err = outputs.NewClient("Rocketchat", config.Rocketchat.WebhookURL, config.Rocketchat.MutualTLS, config.Rocketchat.CheckCert, *initClientArgs)
|
||||
rocketchatClient, err = outputs.NewClient("Rocketchat", config.Rocketchat.WebhookURL, config.Rocketchat.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Rocketchat.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -177,7 +190,7 @@ func init() {
|
|||
|
||||
if config.Mattermost.WebhookURL != "" {
|
||||
var err error
|
||||
mattermostClient, err = outputs.NewClient("Mattermost", config.Mattermost.WebhookURL, config.Mattermost.MutualTLS, config.Mattermost.CheckCert, *initClientArgs)
|
||||
mattermostClient, err = outputs.NewClient("Mattermost", config.Mattermost.WebhookURL, config.Mattermost.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Mattermost.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -187,7 +200,7 @@ func init() {
|
|||
|
||||
if config.Teams.WebhookURL != "" {
|
||||
var err error
|
||||
teamsClient, err = outputs.NewClient("Teams", config.Teams.WebhookURL, config.Teams.MutualTLS, config.Teams.CheckCert, *initClientArgs)
|
||||
teamsClient, err = outputs.NewClient("Teams", config.Teams.WebhookURL, config.Teams.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Teams.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -195,10 +208,20 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
if config.Webex.WebhookURL != "" {
|
||||
var err error
|
||||
webexClient, err = outputs.NewClient("Webex", config.Webex.WebhookURL, config.Webex.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Webex.WebhookURL = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Webex")
|
||||
}
|
||||
}
|
||||
|
||||
if config.Datadog.APIKey != "" {
|
||||
var err error
|
||||
endpointUrl := fmt.Sprintf("%s?api_key=%s", config.Datadog.Host+outputs.DatadogPath, config.Datadog.APIKey)
|
||||
datadogClient, err = outputs.NewClient("Datadog", endpointUrl, config.Datadog.MutualTLS, config.Datadog.CheckCert, *initClientArgs)
|
||||
datadogClient, err = outputs.NewClient("Datadog", endpointUrl, config.Datadog.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Datadog.APIKey = ""
|
||||
} else {
|
||||
|
@ -206,9 +229,20 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
if config.DatadogLogs.APIKey != "" {
|
||||
var err error
|
||||
endpointUrl := config.DatadogLogs.Host + outputs.DatadogLogsPath
|
||||
datadogLogsClient, err = outputs.NewClient("DatadogLogs", endpointUrl, config.DatadogLogs.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.DatadogLogs.APIKey = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "DatadogLogs")
|
||||
}
|
||||
}
|
||||
|
||||
if config.Discord.WebhookURL != "" {
|
||||
var err error
|
||||
discordClient, err = outputs.NewClient("Discord", config.Discord.WebhookURL, config.Discord.MutualTLS, config.Discord.CheckCert, *initClientArgs)
|
||||
discordClient, err = outputs.NewClient("Discord", config.Discord.WebhookURL, config.Discord.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Discord.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -216,12 +250,11 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
if config.Alertmanager.HostPort != "" {
|
||||
if len(config.Alertmanager.HostPort) != 0 {
|
||||
var err error
|
||||
endpointUrl := fmt.Sprintf("%s%s", config.Alertmanager.HostPort, config.Alertmanager.Endpoint)
|
||||
alertmanagerClient, err = outputs.NewClient("AlertManager", endpointUrl, config.Alertmanager.MutualTLS, config.Alertmanager.CheckCert, *initClientArgs)
|
||||
alertmanagerClients, err = outputs.NewAlertManagerClient(config.Alertmanager.HostPort, config.Alertmanager.Endpoint, config.Alertmanager.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Alertmanager.HostPort = ""
|
||||
config.Alertmanager.HostPort = []string{}
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "AlertManager")
|
||||
}
|
||||
|
@ -229,8 +262,7 @@ func init() {
|
|||
|
||||
if config.Elasticsearch.HostPort != "" {
|
||||
var err error
|
||||
endpointUrl := fmt.Sprintf("%s/%s/%s", config.Elasticsearch.HostPort, config.Elasticsearch.Index, config.Elasticsearch.Type)
|
||||
elasticsearchClient, err = outputs.NewClient("Elasticsearch", endpointUrl, config.Elasticsearch.MutualTLS, config.Elasticsearch.CheckCert, *initClientArgs)
|
||||
elasticsearchClient, err = outputs.NewElasticsearchClient(*initClientArgs)
|
||||
if err != nil {
|
||||
config.Elasticsearch.HostPort = ""
|
||||
} else {
|
||||
|
@ -251,7 +283,7 @@ func init() {
|
|||
var err error
|
||||
|
||||
endpointUrl := fmt.Sprintf("%s/%s/%s/ingest", config.Quickwit.HostPort, config.Quickwit.ApiEndpoint, config.Quickwit.Index)
|
||||
quickwitClient, err = outputs.NewClient("Quickwit", endpointUrl, config.Quickwit.MutualTLS, config.Quickwit.CheckCert, *initClientArgs)
|
||||
quickwitClient, err = outputs.NewClient("Quickwit", endpointUrl, config.Quickwit.CommonConfig, *initClientArgs)
|
||||
if err == nil && config.Quickwit.AutoCreateIndex {
|
||||
err = quickwitClient.AutoCreateQuickwitIndex(*initClientArgs)
|
||||
}
|
||||
|
@ -265,7 +297,7 @@ func init() {
|
|||
|
||||
if config.Loki.HostPort != "" {
|
||||
var err error
|
||||
lokiClient, err = outputs.NewClient("Loki", config.Loki.HostPort+config.Loki.Endpoint, config.Loki.MutualTLS, config.Loki.CheckCert, *initClientArgs)
|
||||
lokiClient, err = outputs.NewClient("Loki", config.Loki.HostPort+config.Loki.Endpoint, config.Loki.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Loki.HostPort = ""
|
||||
} else {
|
||||
|
@ -275,7 +307,7 @@ func init() {
|
|||
|
||||
if config.SumoLogic.ReceiverURL != "" {
|
||||
var err error
|
||||
sumologicClient, err = outputs.NewClient("SumoLogic", config.SumoLogic.ReceiverURL, false, config.SumoLogic.CheckCert, *initClientArgs)
|
||||
sumologicClient, err = outputs.NewClient("SumoLogic", config.SumoLogic.ReceiverURL, config.SumoLogic.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.SumoLogic.ReceiverURL = ""
|
||||
} else {
|
||||
|
@ -285,7 +317,7 @@ func init() {
|
|||
|
||||
if config.Nats.HostPort != "" {
|
||||
var err error
|
||||
natsClient, err = outputs.NewClient("NATS", config.Nats.HostPort, config.Nats.MutualTLS, config.Nats.CheckCert, *initClientArgs)
|
||||
natsClient, err = outputs.NewClient("NATS", config.Nats.HostPort, config.Nats.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Nats.HostPort = ""
|
||||
} else {
|
||||
|
@ -295,7 +327,7 @@ func init() {
|
|||
|
||||
if config.Stan.HostPort != "" && config.Stan.ClusterID != "" && config.Stan.ClientID != "" {
|
||||
var err error
|
||||
stanClient, err = outputs.NewClient("STAN", config.Stan.HostPort, config.Stan.MutualTLS, config.Stan.CheckCert, *initClientArgs)
|
||||
stanClient, err = outputs.NewClient("STAN", config.Stan.HostPort, config.Stan.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Stan.HostPort = ""
|
||||
config.Stan.ClusterID = ""
|
||||
|
@ -320,7 +352,7 @@ func init() {
|
|||
}
|
||||
|
||||
var err error
|
||||
influxdbClient, err = outputs.NewClient("Influxdb", url, config.Influxdb.MutualTLS, config.Influxdb.CheckCert, *initClientArgs)
|
||||
influxdbClient, err = outputs.NewClient("Influxdb", url, config.Influxdb.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Influxdb.HostPort = ""
|
||||
} else {
|
||||
|
@ -332,7 +364,7 @@ func init() {
|
|||
config.AWS.SNS.TopicArn != "" || config.AWS.CloudWatchLogs.LogGroup != "" || config.AWS.S3.Bucket != "" ||
|
||||
config.AWS.Kinesis.StreamName != "" || (config.AWS.SecurityLake.Bucket != "" && config.AWS.SecurityLake.Region != "" && config.AWS.SecurityLake.AccountID != "") {
|
||||
var err error
|
||||
awsClient, err = outputs.NewAWSClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
awsClient, err = outputs.NewAWSClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.AWS.AccessKeyID = ""
|
||||
config.AWS.SecretAccessKey = ""
|
||||
|
@ -388,7 +420,7 @@ func init() {
|
|||
|
||||
if config.SMTP.HostPort != "" && config.SMTP.From != "" && config.SMTP.To != "" {
|
||||
var err error
|
||||
smtpClient, err = outputs.NewSMTPClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
smtpClient, err = outputs.NewSMTPClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.SMTP.HostPort = ""
|
||||
} else {
|
||||
|
@ -402,7 +434,7 @@ func init() {
|
|||
if strings.ToLower(config.Opsgenie.Region) == "eu" {
|
||||
url = "https://api.eu.opsgenie.com/v2/alerts"
|
||||
}
|
||||
opsgenieClient, err = outputs.NewClient("Opsgenie", url, config.Opsgenie.MutualTLS, config.Opsgenie.CheckCert, *initClientArgs)
|
||||
opsgenieClient, err = outputs.NewClient("Opsgenie", url, config.Opsgenie.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Opsgenie.APIKey = ""
|
||||
} else {
|
||||
|
@ -412,7 +444,7 @@ func init() {
|
|||
|
||||
if config.Webhook.Address != "" {
|
||||
var err error
|
||||
webhookClient, err = outputs.NewClient("Webhook", config.Webhook.Address, config.Webhook.MutualTLS, config.Webhook.CheckCert, *initClientArgs)
|
||||
webhookClient, err = outputs.NewClient("Webhook", config.Webhook.Address, config.Webhook.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Webhook.Address = ""
|
||||
} else {
|
||||
|
@ -422,7 +454,7 @@ func init() {
|
|||
|
||||
if config.NodeRed.Address != "" {
|
||||
var err error
|
||||
noderedClient, err = outputs.NewClient("NodeRed", config.NodeRed.Address, false, config.NodeRed.CheckCert, *initClientArgs)
|
||||
noderedClient, err = outputs.NewClient("NodeRed", config.NodeRed.Address, config.NodeRed.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.NodeRed.Address = ""
|
||||
} else {
|
||||
|
@ -432,7 +464,7 @@ func init() {
|
|||
|
||||
if config.CloudEvents.Address != "" {
|
||||
var err error
|
||||
cloudeventsClient, err = outputs.NewClient("CloudEvents", config.CloudEvents.Address, config.CloudEvents.MutualTLS, config.CloudEvents.CheckCert, *initClientArgs)
|
||||
cloudeventsClient, err = outputs.NewClient("CloudEvents", config.CloudEvents.Address, config.CloudEvents.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.CloudEvents.Address = ""
|
||||
} else {
|
||||
|
@ -442,7 +474,7 @@ func init() {
|
|||
|
||||
if config.Azure.EventHub.Name != "" {
|
||||
var err error
|
||||
azureClient, err = outputs.NewEventHubClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
azureClient, err = outputs.NewEventHubClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Azure.EventHub.Name = ""
|
||||
config.Azure.EventHub.Namespace = ""
|
||||
|
@ -455,7 +487,7 @@ func init() {
|
|||
|
||||
if (config.GCP.PubSub.ProjectID != "" && config.GCP.PubSub.Topic != "") || config.GCP.Storage.Bucket != "" || config.GCP.CloudFunctions.Name != "" {
|
||||
var err error
|
||||
gcpClient, err = outputs.NewGCPClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
gcpClient, err = outputs.NewGCPClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.GCP.PubSub.ProjectID = ""
|
||||
config.GCP.PubSub.Topic = ""
|
||||
|
@ -478,7 +510,7 @@ func init() {
|
|||
var err error
|
||||
var outputName = "GCPCloudRun"
|
||||
|
||||
gcpCloudRunClient, err = outputs.NewClient(outputName, config.GCP.CloudRun.Endpoint, false, false, *initClientArgs)
|
||||
gcpCloudRunClient, err = outputs.NewClient(outputName, config.GCP.CloudRun.Endpoint, types.CommonConfig{}, *initClientArgs)
|
||||
|
||||
if err != nil {
|
||||
config.GCP.CloudRun.Endpoint = ""
|
||||
|
@ -489,7 +521,7 @@ func init() {
|
|||
|
||||
if config.Googlechat.WebhookURL != "" {
|
||||
var err error
|
||||
googleChatClient, err = outputs.NewClient("Googlechat", config.Googlechat.WebhookURL, config.Googlechat.MutualTLS, config.Googlechat.CheckCert, *initClientArgs)
|
||||
googleChatClient, err = outputs.NewClient("Googlechat", config.Googlechat.WebhookURL, config.Googlechat.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Googlechat.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -499,7 +531,7 @@ func init() {
|
|||
|
||||
if config.Kafka.HostPort != "" && config.Kafka.Topic != "" {
|
||||
var err error
|
||||
kafkaClient, err = outputs.NewKafkaClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
kafkaClient, err = outputs.NewKafkaClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Kafka.HostPort = ""
|
||||
} else {
|
||||
|
@ -509,7 +541,7 @@ func init() {
|
|||
|
||||
if config.KafkaRest.Address != "" {
|
||||
var err error
|
||||
kafkaRestClient, err = outputs.NewClient("KafkaRest", config.KafkaRest.Address, config.KafkaRest.MutualTLS, config.KafkaRest.CheckCert, *initClientArgs)
|
||||
kafkaRestClient, err = outputs.NewClient("KafkaRest", config.KafkaRest.Address, config.KafkaRest.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.KafkaRest.Address = ""
|
||||
} else {
|
||||
|
@ -522,7 +554,7 @@ func init() {
|
|||
var url = "https://events.pagerduty.com/v2/enqueue"
|
||||
var outputName = "Pagerduty"
|
||||
|
||||
pagerdutyClient, err = outputs.NewClient(outputName, url, config.Pagerduty.MutualTLS, config.Pagerduty.CheckCert, *initClientArgs)
|
||||
pagerdutyClient, err = outputs.NewClient(outputName, url, config.Pagerduty.CommonConfig, *initClientArgs)
|
||||
|
||||
if err != nil {
|
||||
config.Pagerduty.RoutingKey = ""
|
||||
|
@ -533,9 +565,9 @@ func init() {
|
|||
|
||||
if config.Kubeless.Namespace != "" && config.Kubeless.Function != "" {
|
||||
var err error
|
||||
kubelessClient, err = outputs.NewKubelessClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
kubelessClient, err = outputs.NewKubelessClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Kubeless - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, kubelessClient.OutputType, err.Error())
|
||||
config.Kubeless.Namespace = ""
|
||||
config.Kubeless.Function = ""
|
||||
} else {
|
||||
|
@ -545,7 +577,7 @@ func init() {
|
|||
|
||||
if config.WebUI.URL != "" {
|
||||
var err error
|
||||
webUIClient, err = outputs.NewClient("WebUI", config.WebUI.URL, config.WebUI.MutualTLS, config.WebUI.CheckCert, *initClientArgs)
|
||||
webUIClient, err = outputs.NewClient("WebUI", config.WebUI.URL, config.WebUI.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.WebUI.URL = ""
|
||||
} else {
|
||||
|
@ -555,7 +587,7 @@ func init() {
|
|||
|
||||
if config.PolicyReport.Enabled {
|
||||
var err error
|
||||
policyReportClient, err = outputs.NewPolicyReportClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
policyReportClient, err = outputs.NewPolicyReportClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.PolicyReport.Enabled = false
|
||||
} else {
|
||||
|
@ -565,9 +597,9 @@ func init() {
|
|||
|
||||
if config.Openfaas.FunctionName != "" {
|
||||
var err error
|
||||
openfaasClient, err = outputs.NewOpenfaasClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
openfaasClient, err = outputs.NewOpenfaasClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : OpenFaaS - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, openfaasClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OpenFaaS")
|
||||
}
|
||||
|
@ -575,9 +607,9 @@ func init() {
|
|||
|
||||
if config.Tekton.EventListener != "" {
|
||||
var err error
|
||||
tektonClient, err = outputs.NewClient("Tekton", config.Tekton.EventListener, config.Tekton.MutualTLS, config.Tekton.CheckCert, *initClientArgs)
|
||||
tektonClient, err = outputs.NewClient("Tekton", config.Tekton.EventListener, config.Tekton.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Tekton - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, tektonClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Tekton")
|
||||
}
|
||||
|
@ -585,7 +617,7 @@ func init() {
|
|||
|
||||
if config.Rabbitmq.URL != "" && config.Rabbitmq.Queue != "" {
|
||||
var err error
|
||||
rabbitmqClient, err = outputs.NewRabbitmqClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
rabbitmqClient, err = outputs.NewRabbitmqClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Rabbitmq.URL = ""
|
||||
} else {
|
||||
|
@ -595,9 +627,9 @@ func init() {
|
|||
|
||||
if config.Wavefront.EndpointType != "" && config.Wavefront.EndpointHost != "" {
|
||||
var err error
|
||||
wavefrontClient, err = outputs.NewWavefrontClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
wavefrontClient, err = outputs.NewWavefrontClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Wavefront - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, wavefrontClient.OutputType, err.Error())
|
||||
config.Wavefront.EndpointHost = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Wavefront")
|
||||
|
@ -606,9 +638,9 @@ func init() {
|
|||
|
||||
if config.Fission.Function != "" {
|
||||
var err error
|
||||
fissionClient, err = outputs.NewFissionClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
fissionClient, err = outputs.NewFissionClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Fission - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, fissionClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, outputs.Fission)
|
||||
}
|
||||
|
@ -618,7 +650,7 @@ func init() {
|
|||
var err error
|
||||
var outputName = "Grafana"
|
||||
endpointUrl := fmt.Sprintf("%s/api/annotations", config.Grafana.HostPort)
|
||||
grafanaClient, err = outputs.NewClient(outputName, endpointUrl, config.Grafana.MutualTLS, config.Grafana.CheckCert, *initClientArgs)
|
||||
grafanaClient, err = outputs.NewClient(outputName, endpointUrl, config.Grafana.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Grafana.HostPort = ""
|
||||
config.Grafana.APIKey = ""
|
||||
|
@ -630,7 +662,7 @@ func init() {
|
|||
if config.GrafanaOnCall.WebhookURL != "" {
|
||||
var err error
|
||||
var outputName = "GrafanaOnCall"
|
||||
grafanaOnCallClient, err = outputs.NewClient(outputName, config.GrafanaOnCall.WebhookURL, config.GrafanaOnCall.MutualTLS, config.GrafanaOnCall.CheckCert, *initClientArgs)
|
||||
grafanaOnCallClient, err = outputs.NewClient(outputName, config.GrafanaOnCall.WebhookURL, config.GrafanaOnCall.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.GrafanaOnCall.WebhookURL = ""
|
||||
} else {
|
||||
|
@ -640,10 +672,10 @@ func init() {
|
|||
|
||||
if config.Yandex.S3.Bucket != "" {
|
||||
var err error
|
||||
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Yandex.S3.Bucket = ""
|
||||
log.Printf("[ERROR] : Yandex - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, yandexClient.OutputType, err.Error())
|
||||
} else {
|
||||
if config.Yandex.S3.Bucket != "" {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "YandexS3")
|
||||
|
@ -653,10 +685,10 @@ func init() {
|
|||
|
||||
if config.Yandex.DataStreams.StreamName != "" {
|
||||
var err error
|
||||
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
yandexClient, err = outputs.NewYandexClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Yandex.DataStreams.StreamName = ""
|
||||
log.Printf("[ERROR] : Yandex - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, yandexClient.OutputType, err.Error())
|
||||
} else {
|
||||
if config.Yandex.DataStreams.StreamName != "" {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "YandexDataStreams")
|
||||
|
@ -666,10 +698,10 @@ func init() {
|
|||
|
||||
if config.Syslog.Host != "" {
|
||||
var err error
|
||||
syslogClient, err = outputs.NewSyslogClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
syslogClient, err = outputs.NewSyslogClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Syslog.Host = ""
|
||||
log.Printf("[ERROR] : Syslog - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, syslogClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Syslog")
|
||||
}
|
||||
|
@ -677,10 +709,10 @@ func init() {
|
|||
|
||||
if config.MQTT.Broker != "" {
|
||||
var err error
|
||||
mqttClient, err = outputs.NewMQTTClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
mqttClient, err = outputs.NewMQTTClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.MQTT.Broker = ""
|
||||
log.Printf("[ERROR] : MQTT - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, mqttClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "MQTT")
|
||||
}
|
||||
|
@ -689,7 +721,7 @@ func init() {
|
|||
if config.Zincsearch.HostPort != "" {
|
||||
var err error
|
||||
endpointUrl := fmt.Sprintf("%s/api/%s/_doc", config.Zincsearch.HostPort, config.Zincsearch.Index)
|
||||
zincsearchClient, err = outputs.NewClient("Zincsearch", endpointUrl, false, config.Zincsearch.CheckCert, *initClientArgs)
|
||||
zincsearchClient, err = outputs.NewClient("Zincsearch", endpointUrl, types.CommonConfig{CheckCert: config.Zincsearch.CheckCert}, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Zincsearch.HostPort = ""
|
||||
} else {
|
||||
|
@ -700,7 +732,7 @@ func init() {
|
|||
if config.Gotify.HostPort != "" {
|
||||
var err error
|
||||
endpointUrl := fmt.Sprintf("%s/message", config.Gotify.HostPort)
|
||||
gotifyClient, err = outputs.NewClient("Gotify", endpointUrl, false, config.Gotify.CheckCert, *initClientArgs)
|
||||
gotifyClient, err = outputs.NewClient("Gotify", endpointUrl, types.CommonConfig{CheckCert: config.Gotify.CheckCert}, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Gotify.HostPort = ""
|
||||
} else {
|
||||
|
@ -710,10 +742,10 @@ func init() {
|
|||
|
||||
if config.Spyderbat.OrgUID != "" {
|
||||
var err error
|
||||
spyderbatClient, err = outputs.NewSpyderbatClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
spyderbatClient, err = outputs.NewSpyderbatClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Spyderbat.OrgUID = ""
|
||||
log.Printf("[ERROR] : Spyderbat - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, spyderbatClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Spyderbat")
|
||||
}
|
||||
|
@ -721,10 +753,10 @@ func init() {
|
|||
|
||||
if config.TimescaleDB.Host != "" {
|
||||
var err error
|
||||
timescaleDBClient, err = outputs.NewTimescaleDBClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
timescaleDBClient, err = outputs.NewTimescaleDBClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.TimescaleDB.Host = ""
|
||||
log.Printf("[ERROR] : TimescaleDB - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, timescaleDBClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "TimescaleDB")
|
||||
}
|
||||
|
@ -732,7 +764,7 @@ func init() {
|
|||
|
||||
if config.Redis.Address != "" {
|
||||
var err error
|
||||
redisClient, err = outputs.NewRedisClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
redisClient, err = outputs.NewRedisClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Redis.Address = ""
|
||||
} else {
|
||||
|
@ -740,17 +772,27 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
if config.Logstash.Address != "" {
|
||||
var err error
|
||||
logstashClient, err = outputs.NewLogstashClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.Logstash.Address = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Logstash")
|
||||
}
|
||||
}
|
||||
|
||||
if config.Telegram.ChatID != "" && config.Telegram.Token != "" {
|
||||
var err error
|
||||
var urlFormat = "https://api.telegram.org/bot%s/sendMessage"
|
||||
|
||||
telegramClient, err = outputs.NewClient("Telegram", fmt.Sprintf(urlFormat, config.Telegram.Token), false, config.Telegram.CheckCert, *initClientArgs)
|
||||
telegramClient, err = outputs.NewClient("Telegram", fmt.Sprintf(urlFormat, config.Telegram.Token), types.CommonConfig{CheckCert: config.Telegram.CheckCert}, *initClientArgs)
|
||||
|
||||
if err != nil {
|
||||
config.Telegram.ChatID = ""
|
||||
config.Telegram.Token = ""
|
||||
|
||||
log.Printf("[ERROR] : Telegram - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, telegramClient.OutputType, err.Error())
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "Telegram")
|
||||
}
|
||||
|
@ -758,7 +800,7 @@ func init() {
|
|||
|
||||
if config.N8N.Address != "" {
|
||||
var err error
|
||||
n8nClient, err = outputs.NewClient("n8n", config.N8N.Address, false, config.N8N.CheckCert, *initClientArgs)
|
||||
n8nClient, err = outputs.NewClient("n8n", config.N8N.Address, types.CommonConfig{CheckCert: config.N8N.CheckCert}, *initClientArgs)
|
||||
if err != nil {
|
||||
config.N8N.Address = ""
|
||||
} else {
|
||||
|
@ -769,7 +811,7 @@ func init() {
|
|||
if config.OpenObserve.HostPort != "" {
|
||||
var err error
|
||||
endpointUrl := fmt.Sprintf("%s/api/%s/%s/_multi", config.OpenObserve.HostPort, config.OpenObserve.OrganizationName, config.OpenObserve.StreamName)
|
||||
openObserveClient, err = outputs.NewClient("OpenObserve", endpointUrl, config.OpenObserve.MutualTLS, config.OpenObserve.CheckCert, *initClientArgs)
|
||||
openObserveClient, err = outputs.NewClient("OpenObserve", endpointUrl, config.OpenObserve.CommonConfig, *initClientArgs)
|
||||
if err != nil {
|
||||
config.OpenObserve.HostPort = ""
|
||||
} else {
|
||||
|
@ -780,7 +822,7 @@ func init() {
|
|||
if config.Dynatrace.APIToken != "" && config.Dynatrace.APIUrl != "" {
|
||||
var err error
|
||||
dynatraceApiUrl := strings.TrimRight(config.Dynatrace.APIUrl, "/") + "/v2/logs/ingest"
|
||||
dynatraceClient, err = outputs.NewClient("Dynatrace", dynatraceApiUrl, false, config.Dynatrace.CheckCert, *initClientArgs)
|
||||
dynatraceClient, err = outputs.NewClient("Dynatrace,", dynatraceApiUrl, types.CommonConfig{CheckCert: config.Dynatrace.CheckCert}, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Dynatrace.APIToken = ""
|
||||
config.Dynatrace.APIUrl = ""
|
||||
|
@ -791,18 +833,45 @@ func init() {
|
|||
|
||||
if config.OTLP.Traces.Endpoint != "" {
|
||||
var err error
|
||||
otlpClient, err = outputs.NewOtlpTracesClient(config, stats, promStats, statsdClient, dogstatsdClient)
|
||||
otlpTracesClient, err = outputs.NewOtlpTracesClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.OTLP.Traces.Endpoint = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OTLPTraces")
|
||||
shutDownFuncs = append(shutDownFuncs, otlpClient.ShutDownFunc)
|
||||
shutDownFuncs = append(shutDownFuncs, otlpTracesClient.ShutDownFunc)
|
||||
}
|
||||
}
|
||||
|
||||
if config.OTLP.Logs.Endpoint != "" {
|
||||
var err error
|
||||
otlpLogsClient, err = outputs.NewOtlpLogsClient(config, stats, promStats, otlpMetrics, statsdClient, dogstatsdClient)
|
||||
if err != nil {
|
||||
config.OTLP.Logs.Endpoint = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OTLPLogs")
|
||||
shutDownFuncs = append(shutDownFuncs, otlpLogsClient.ShutDownFunc)
|
||||
}
|
||||
}
|
||||
|
||||
if config.OTLP.Metrics.Endpoint != "" {
|
||||
shutDownFunc, err := otlpmetrics.InitProvider(context.Background(), &config.OTLP.Metrics)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
config.OTLP.Logs.Endpoint = ""
|
||||
} else {
|
||||
outputs.EnabledOutputs = append(outputs.EnabledOutputs, "OTLPMetrics")
|
||||
fn := func() {
|
||||
if err := shutDownFunc(context.TODO()); err != nil {
|
||||
utils.Log(utils.ErrorLvl, "OTLP Metrics", err.Error())
|
||||
}
|
||||
}
|
||||
shutDownFuncs = append(shutDownFuncs, fn)
|
||||
}
|
||||
}
|
||||
|
||||
if config.Talon.Address != "" {
|
||||
var err error
|
||||
talonClient, err = outputs.NewClient("Talon", config.Talon.Address, false, config.Talon.CheckCert, *initClientArgs)
|
||||
talonClient, err = outputs.NewClient("Talon", config.Talon.Address, types.CommonConfig{CheckCert: config.Talon.CheckCert}, *initClientArgs)
|
||||
if err != nil {
|
||||
config.Talon.Address = ""
|
||||
} else {
|
||||
|
@ -810,9 +879,7 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : Falco Sidekick version: %s\n", GetVersionInfo().GitVersion)
|
||||
log.Printf("[INFO] : Enabled Outputs : %s\n", outputs.EnabledOutputs)
|
||||
|
||||
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Enabled Outputs: %s", outputs.EnabledOutputs))
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -820,7 +887,7 @@ func main() {
|
|||
defer shutdown()
|
||||
}
|
||||
if config.Debug {
|
||||
log.Printf("[INFO] : Debug mode : %v", config.Debug)
|
||||
utils.Log(utils.InfoPrefix, "", fmt.Sprintf("Debug mode: %v", config.Debug))
|
||||
}
|
||||
|
||||
routes := map[string]http.Handler{
|
||||
|
@ -842,11 +909,11 @@ func main() {
|
|||
if ok {
|
||||
delete(routes, r)
|
||||
if config.Debug {
|
||||
log.Printf("[DEBUG] : %s is served on http", r)
|
||||
utils.Log(utils.DebugLvl, "", fmt.Sprintf("%s is served on http", r))
|
||||
}
|
||||
HTTPServeMux.Handle(r, handler)
|
||||
} else {
|
||||
log.Printf("[WARN] : tlsserver.notlspaths has unknown path '%s'", r)
|
||||
utils.Log(utils.WarningLvl, "", fmt.Sprintf("tlsserver.notlspaths has unknown path '%s'", r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -869,12 +936,12 @@ func main() {
|
|||
if config.TLSServer.Deploy {
|
||||
if config.TLSServer.MutualTLS {
|
||||
if config.Debug {
|
||||
log.Printf("[DEBUG] : running mTLS server")
|
||||
utils.Log(utils.DebugLvl, "", "running mTLS server")
|
||||
}
|
||||
|
||||
caCert, err := os.ReadFile(config.TLSServer.CaCertFile)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "", err.Error())
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
@ -888,16 +955,16 @@ func main() {
|
|||
}
|
||||
|
||||
if config.Debug && !config.TLSServer.MutualTLS {
|
||||
log.Printf("[DEBUG] : running TLS server")
|
||||
utils.Log(utils.DebugLvl, "", "running TLS server")
|
||||
}
|
||||
|
||||
if len(config.TLSServer.NoTLSPaths) == 0 {
|
||||
log.Printf("[WARN] : tlsserver.deploy is true but tlsserver.notlspaths is empty, change tlsserver.deploy to true to deploy two servers, at least for /ping endpoint")
|
||||
utils.Log(utils.WarningLvl, "", "tlsserver.deploy is true but tlsserver.notlspaths is empty, change tlsserver.deploy to true to deploy two servers, at least for /ping endpoint")
|
||||
}
|
||||
|
||||
if len(config.TLSServer.NoTLSPaths) != 0 {
|
||||
if config.Debug {
|
||||
log.Printf("[DEBUG] : running HTTP server for endpoints defined in tlsserver.notlspaths")
|
||||
utils.Log(utils.DebugLvl, "", "running HTTP server for endpoints defined in tlsserver.notlspaths")
|
||||
}
|
||||
|
||||
httpServer := &http.Server{
|
||||
|
@ -909,30 +976,31 @@ func main() {
|
|||
WriteTimeout: 60 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
}
|
||||
log.Printf("[INFO] : Falcosidekick is up and listening on %s:%d for TLS and %s:%d for non-TLS", config.ListenAddress, config.ListenPort, config.ListenAddress, config.TLSServer.NoTLSPort)
|
||||
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick is up and listening on %s:%d for TLS and %s:%d for non-TLS", config.ListenAddress, config.ListenPort, config.ListenAddress, config.TLSServer.NoTLSPort))
|
||||
|
||||
errs := make(chan error, 1)
|
||||
go serveTLS(server, errs)
|
||||
go serveHTTP(httpServer, errs)
|
||||
log.Fatal(<-errs)
|
||||
err := <-errs
|
||||
utils.Log(utils.FatalLvl, "", err.Error())
|
||||
} else {
|
||||
log.Printf("[INFO] : Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort)
|
||||
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort))
|
||||
if err := server.ListenAndServeTLS(config.TLSServer.CertFile, config.TLSServer.KeyFile); err != nil {
|
||||
log.Fatalf("[ERROR] : %v", err.Error())
|
||||
utils.Log(utils.FatalLvl, "", err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if config.Debug {
|
||||
log.Printf("[DEBUG] : running HTTP server")
|
||||
utils.Log(utils.DebugLvl, "", "running HTTP server")
|
||||
}
|
||||
|
||||
if config.TLSServer.MutualTLS {
|
||||
log.Printf("[WARN] : tlsserver.deploy is false but tlsserver.mutualtls is true, change tlsserver.deploy to true to use mTLS")
|
||||
utils.Log(utils.WarningLvl, "", "tlsserver.deploy is false but tlsserver.mutualtls is true, change tlsserver.deploy to true to use mTLS")
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort)
|
||||
utils.Log(utils.InfoLvl, "", fmt.Sprintf("Falcosidekick is up and listening on %s:%d", config.ListenAddress, config.ListenPort))
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Fatalf("[ERROR] : %v", err.Error())
|
||||
utils.Log(utils.FatalLvl, "", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func newOTLPMetrics(config *types.Configuration) *otlpmetrics.OTLPMetrics {
|
||||
otlpMetrics = &otlpmetrics.OTLPMetrics{
|
||||
Falco: newOTLPFalcoMatchesCounter(config),
|
||||
Inputs: newOTLPInputsCounter(),
|
||||
Outputs: newOTLPOutputsCounter(),
|
||||
}
|
||||
return otlpMetrics
|
||||
}
|
||||
|
||||
func newOTLPInputsCounter() otlpmetrics.Counter {
|
||||
supportedAttributes := []string{"source", "status"}
|
||||
name := "falcosecurity_falcosidekick_inputs"
|
||||
description := "Number of times an input is received"
|
||||
counter := otlpmetrics.NewCounter(name, description, supportedAttributes)
|
||||
return counter
|
||||
}
|
||||
|
||||
func newOTLPOutputsCounter() otlpmetrics.Counter {
|
||||
name := "falcosecurity_falcosidekick_outputs"
|
||||
description := "Number of times an output is generated"
|
||||
supportedAttributes := []string{"destination", "status"}
|
||||
counter := otlpmetrics.NewCounter(name, description, supportedAttributes)
|
||||
return counter
|
||||
}
|
||||
|
||||
func newOTLPFalcoMatchesCounter(config *types.Configuration) otlpmetrics.Counter {
|
||||
regOTLPLabels, _ := regexp.Compile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
|
||||
|
||||
supportedAttributes := []string{
|
||||
"source",
|
||||
"priority",
|
||||
"rule",
|
||||
"hostname",
|
||||
"tags",
|
||||
"k8s_ns_name",
|
||||
"k8s_pod_name",
|
||||
}
|
||||
for i := range config.Customfields {
|
||||
if !regOTLPLabels.MatchString(i) {
|
||||
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Custom field '%v' is not a valid OTLP metric attribute name", i))
|
||||
continue
|
||||
}
|
||||
supportedAttributes = append(supportedAttributes, i)
|
||||
}
|
||||
|
||||
for _, i := range config.OTLP.Metrics.ExtraAttributesList {
|
||||
if !regOTLPLabels.MatchString(strings.ReplaceAll(i, ".", "_")) {
|
||||
utils.Log(utils.ErrorLvl, "", fmt.Sprintf("Extra field '%v' is not a valid OTLP metric attribute name", i))
|
||||
continue
|
||||
}
|
||||
supportedAttributes = append(supportedAttributes, strings.ReplaceAll(i, ".", "_"))
|
||||
}
|
||||
|
||||
name := "falcosecurity_falco_rules_matches_total"
|
||||
description := "Number of times rules match"
|
||||
counter := otlpmetrics.NewCounter(name, description, supportedAttributes)
|
||||
return counter
|
||||
}
|
|
@ -4,13 +4,17 @@ package outputs
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -36,6 +40,28 @@ var (
|
|||
reg = regexp.MustCompile("[^a-zA-Z0-9_]")
|
||||
)
|
||||
|
||||
func NewAlertManagerClient(hostPorts []string, endpoint string, cfg types.CommonConfig, params types.InitClientArgs) ([]*Client, error) {
|
||||
clients := make([]*Client, 0)
|
||||
if len(hostPorts) == 1 {
|
||||
endpointUrl := fmt.Sprintf("%s%s", hostPorts[0], endpoint)
|
||||
c, err := NewClient("AlertManager", endpointUrl, cfg, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clients = append(clients, c)
|
||||
} else {
|
||||
for i, j := range hostPorts {
|
||||
endpointUrl := fmt.Sprintf("%s%s", j, endpoint)
|
||||
c, err := NewClient(fmt.Sprintf("AlertManager_%v", i), endpointUrl, cfg, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clients = append(clients, c)
|
||||
}
|
||||
}
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
func newAlertmanagerPayload(falcopayload types.FalcoPayload, config *types.Configuration) []alertmanagerPayload {
|
||||
var amPayload alertmanagerPayload
|
||||
amPayload.Labels = make(map[string]string)
|
||||
|
@ -130,24 +156,27 @@ func newAlertmanagerPayload(falcopayload types.FalcoPayload, config *types.Confi
|
|||
// AlertmanagerPost posts event to AlertManager
|
||||
func (c *Client) AlertmanagerPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Alertmanager.Add(Total, 1)
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
for i, j := range c.Config.Alertmanager.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
}
|
||||
|
||||
err := c.Post(newAlertmanagerPayload(falcopayload, c.Config))
|
||||
err := c.Post(newAlertmanagerPayload(falcopayload, c.Config), func(req *http.Request) {
|
||||
for i, j := range c.Config.Alertmanager.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:alertmanager", "status:error"})
|
||||
c.Stats.Alertmanager.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "alertmanager", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : AlertManager - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "alertmanager"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:alertmanager", "status:ok"})
|
||||
c.Stats.Alertmanager.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "alertmanager", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "alertmanager"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
func alertmanagerSafeLabel(label string) string {
|
||||
|
|
213
outputs/aws.go
213
outputs/aws.go
|
@ -4,36 +4,44 @@ package outputs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
"github.com/aws/aws-sdk-go/service/lambda"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/sns"
|
||||
"github.com/aws/aws-sdk-go/service/sqs"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
|
||||
"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"
|
||||
cloudwatchlogstypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/kinesis"
|
||||
"github.com/aws/aws-sdk-go-v2/service/lambda"
|
||||
lambdatypes "github.com/aws/aws-sdk-go-v2/service/lambda/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sns"
|
||||
snstypes "github.com/aws/aws-sdk-go-v2/service/sns/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sqs"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewAWSClient returns a new output.Client for accessing the AWS API.
|
||||
func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
var region string
|
||||
if config.AWS.Region != "" {
|
||||
region = config.AWS.Region
|
||||
|
@ -42,15 +50,28 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
} else if os.Getenv("AWS_DEFAULT_REGION") != "" {
|
||||
region = os.Getenv("AWS_DEFAULT_REGION")
|
||||
} else {
|
||||
metaSession := session.Must(session.NewSession())
|
||||
metaClient := ec2metadata.New(metaSession)
|
||||
|
||||
var err error
|
||||
region, err = metaClient.Region()
|
||||
cfg, err := awsconfig.LoadDefaultConfig(context.TODO())
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : AWS - Error while getting region from Metadata AWS Session: %v\n", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
metaClient := imds.NewFromConfig(cfg)
|
||||
|
||||
getMetadataOutput, err := metaClient.GetMetadata(context.TODO(), &imds.GetMetadataInput{Path: "placement/region"})
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, "AWS", fmt.Sprintf("Error while calling from Metadata AWS: %v", err.Error()))
|
||||
return nil, errors.New("error calling to get metadata")
|
||||
}
|
||||
|
||||
defer getMetadataOutput.Content.Close()
|
||||
regionBytes, err := io.ReadAll(getMetadataOutput.Content)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, "AWS", fmt.Sprintf("Error while getting region from Metadata AWS Session: %v", err.Error()))
|
||||
return nil, errors.New("error getting region from metadata")
|
||||
}
|
||||
|
||||
region = string(regionBytes)
|
||||
utils.Log(utils.InfoLvl, "AWS", fmt.Sprintf("region from metadata: %s", region))
|
||||
}
|
||||
|
||||
if config.AWS.AccessKeyID != "" && config.AWS.SecretAccessKey != "" && region != "" {
|
||||
|
@ -58,52 +79,45 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
err2 := os.Setenv("AWS_SECRET_ACCESS_KEY", config.AWS.SecretAccessKey)
|
||||
err3 := os.Setenv("AWS_DEFAULT_REGION", region)
|
||||
if err1 != nil || err2 != nil || err3 != nil {
|
||||
log.Println("[ERROR] : AWS - Error setting AWS env vars")
|
||||
utils.Log(utils.ErrorLvl, "AWS", "Error setting AWS env vars")
|
||||
return nil, errors.New("error setting AWS env vars")
|
||||
}
|
||||
}
|
||||
|
||||
awscfg := &aws.Config{Region: aws.String(region)}
|
||||
awscfg := &aws.Config{Region: region}
|
||||
|
||||
if config.AWS.RoleARN != "" {
|
||||
baseSess := session.Must(session.NewSession(awscfg))
|
||||
stsSvc := sts.New(baseSess)
|
||||
stsSvc := sts.NewFromConfig(*awscfg)
|
||||
stsArIn := new(sts.AssumeRoleInput)
|
||||
stsArIn.RoleArn = aws.String(config.AWS.RoleARN)
|
||||
stsArIn.RoleSessionName = aws.String(fmt.Sprintf("session-%v", uuid.New().String()))
|
||||
if config.AWS.ExternalID != "" {
|
||||
stsArIn.ExternalId = aws.String(config.AWS.ExternalID)
|
||||
}
|
||||
assumedRole, err := stsSvc.AssumeRole(stsArIn)
|
||||
assumedRole, err := stsSvc.AssumeRole(context.Background(), stsArIn)
|
||||
if err != nil {
|
||||
log.Println("[ERROR] : AWS - Error while Assuming Role")
|
||||
utils.Log(utils.ErrorLvl, "AWS", "Error while Assuming Role")
|
||||
return nil, errors.New("error while assuming role")
|
||||
}
|
||||
awscfg.Credentials = credentials.NewStaticCredentials(
|
||||
awscfg.Credentials = aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(
|
||||
*assumedRole.Credentials.AccessKeyId,
|
||||
*assumedRole.Credentials.SecretAccessKey,
|
||||
*assumedRole.Credentials.SessionToken,
|
||||
)
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(awscfg)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : AWS - Error while creating AWS Session: %v\n", err.Error())
|
||||
return nil, errors.New("error while creating AWS Session")
|
||||
))
|
||||
}
|
||||
|
||||
if config.AWS.CheckIdentity {
|
||||
_, err = sts.New(sess).GetCallerIdentity(&sts.GetCallerIdentityInput{})
|
||||
_, err := sts.NewFromConfig(*awscfg).GetCallerIdentity(context.Background(), &sts.GetCallerIdentityInput{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : AWS - Error while getting AWS Token: %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "AWS", fmt.Sprintf("Error while getting AWS Token: %v", err.Error()))
|
||||
return nil, errors.New("error while getting AWS Token")
|
||||
}
|
||||
}
|
||||
|
||||
var endpointURL *url.URL
|
||||
endpointURL, err = url.Parse(config.AWS.SQS.URL)
|
||||
endpointURL, err := url.Parse(config.AWS.SQS.URL)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : AWS SQS - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "AWS SQS", err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
|
||||
|
@ -111,9 +125,10 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
OutputType: "AWS",
|
||||
EndpointURL: endpointURL,
|
||||
Config: config,
|
||||
AWSSession: sess,
|
||||
AWSConfig: awscfg,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -121,42 +136,46 @@ func NewAWSClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
|
||||
// InvokeLambda invokes a lambda function
|
||||
func (c *Client) InvokeLambda(falcopayload types.FalcoPayload) {
|
||||
svc := lambda.New(c.AWSSession)
|
||||
svc := lambda.NewFromConfig(*c.AWSConfig)
|
||||
|
||||
f, _ := json.Marshal(falcopayload)
|
||||
|
||||
input := &lambda.InvokeInput{
|
||||
FunctionName: aws.String(c.Config.AWS.Lambda.FunctionName),
|
||||
InvocationType: aws.String(c.Config.AWS.Lambda.InvocationType),
|
||||
LogType: aws.String(c.Config.AWS.Lambda.LogType),
|
||||
InvocationType: lambdatypes.InvocationType(c.Config.AWS.Lambda.InvocationType),
|
||||
LogType: lambdatypes.LogType(c.Config.AWS.Lambda.LogType),
|
||||
Payload: f,
|
||||
}
|
||||
|
||||
c.Stats.AWSLambda.Add("total", 1)
|
||||
|
||||
resp, err := svc.Invoke(input)
|
||||
resp, err := svc.Invoke(context.Background(), input)
|
||||
if err != nil {
|
||||
go c.CountMetric("outputs", 1, []string{"output:awslambda", "status:error"})
|
||||
c.Stats.AWSLambda.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awslambda", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v Lambda - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awslambda"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" Lambda", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if c.Config.Debug {
|
||||
r, _ := base64.StdEncoding.DecodeString(*resp.LogResult)
|
||||
log.Printf("[DEBUG] : %v Lambda result : %v\n", c.OutputType, string(r))
|
||||
utils.Log(utils.DebugLvl, c.OutputType+" Lambda", fmt.Sprintf("result : %v", string(r)))
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %v Lambda - Invoke OK (%v)\n", c.OutputType, *resp.StatusCode)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" Lambda", fmt.Sprintf("Invoke OK (%v)", resp.StatusCode))
|
||||
go c.CountMetric("outputs", 1, []string{"output:awslambda", "status:ok"})
|
||||
c.Stats.AWSLambda.Add("ok", 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awslambda", "status": "ok"}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awslambda"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// SendMessage sends a message to SQS Queue
|
||||
func (c *Client) SendMessage(falcopayload types.FalcoPayload) {
|
||||
svc := sqs.New(c.AWSSession)
|
||||
svc := sqs.NewFromConfig(*c.AWSConfig)
|
||||
|
||||
f, _ := json.Marshal(falcopayload)
|
||||
|
||||
|
@ -167,23 +186,27 @@ func (c *Client) SendMessage(falcopayload types.FalcoPayload) {
|
|||
|
||||
c.Stats.AWSSQS.Add("total", 1)
|
||||
|
||||
resp, err := svc.SendMessage(input)
|
||||
resp, err := svc.SendMessage(context.Background(), input)
|
||||
if err != nil {
|
||||
go c.CountMetric("outputs", 1, []string{"output:awssqs", "status:error"})
|
||||
c.Stats.AWSSQS.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssqs", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v SQS - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssqs"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SQS", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if c.Config.Debug {
|
||||
log.Printf("[DEBUG] : %v SQS - MD5OfMessageBody : %v\n", c.OutputType, *resp.MD5OfMessageBody)
|
||||
utils.Log(utils.DebugLvl, c.OutputType+" SQS", fmt.Sprintf("MD5OfMessageBody : %v", *resp.MD5OfMessageBody))
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %v SQS - Send Message OK (%v)\n", c.OutputType, *resp.MessageId)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" SQS", fmt.Sprintf("Send Message OK (%v)", *resp.MessageId))
|
||||
go c.CountMetric("outputs", 1, []string{"output:awssqs", "status:ok"})
|
||||
c.Stats.AWSSQS.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssqs", "status": "ok"}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssqs"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// UploadS3 upload payload to S3
|
||||
|
@ -198,35 +221,42 @@ func (c *Client) UploadS3(falcopayload types.FalcoPayload) {
|
|||
|
||||
key := fmt.Sprintf("%s/%s/%s.json", prefix, t.Format("2006-01-02"), t.Format(time.RFC3339Nano))
|
||||
awsConfig := aws.NewConfig()
|
||||
var client s3.Client
|
||||
if c.Config.AWS.S3.Endpoint != "" {
|
||||
awsConfig = awsConfig.WithEndpoint(c.Config.AWS.S3.Endpoint)
|
||||
s3.NewFromConfig(*awsConfig, s3.WithEndpointResolver(s3.EndpointResolverFromURL(c.Config.AWS.S3.Endpoint)))
|
||||
} else {
|
||||
client = *s3.NewFromConfig(*awsConfig)
|
||||
}
|
||||
resp, err := s3.New(c.AWSSession, awsConfig).PutObject(&s3.PutObjectInput{
|
||||
resp, err := client.PutObject(context.Background(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.Config.AWS.S3.Bucket),
|
||||
Key: aws.String(key),
|
||||
Body: bytes.NewReader(f),
|
||||
ACL: aws.String(c.Config.AWS.S3.ObjectCannedACL),
|
||||
ACL: s3types.ObjectCannedACL(c.Config.AWS.S3.ObjectCannedACL),
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric("outputs", 1, []string{"output:awss3", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awss3", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v S3 - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awss3"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" S3", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if resp.SSECustomerAlgorithm != nil {
|
||||
log.Printf("[INFO] : %v S3 - Upload payload OK (%v)\n", c.OutputType, *resp.SSECustomerKeyMD5)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" S3", fmt.Sprintf("Upload payload OK (%v)", *resp.SSECustomerKeyMD5))
|
||||
} else {
|
||||
log.Printf("[INFO] : %v S3 - Upload payload OK\n", c.OutputType)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" S3", "Upload payload OK")
|
||||
}
|
||||
|
||||
go c.CountMetric("outputs", 1, []string{"output:awss3", "status:ok"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awss3", "status": "ok"}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awss3"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// PublishTopic sends a message to a SNS Topic
|
||||
func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
|
||||
svc := sns.New(c.AWSSession)
|
||||
svc := sns.NewFromConfig(*c.AWSConfig)
|
||||
|
||||
var msg *sns.PublishInput
|
||||
|
||||
|
@ -239,7 +269,7 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
|
|||
} else {
|
||||
msg = &sns.PublishInput{
|
||||
Message: aws.String(falcopayload.Output),
|
||||
MessageAttributes: map[string]*sns.MessageAttributeValue{
|
||||
MessageAttributes: map[string]snstypes.MessageAttributeValue{
|
||||
"priority": {
|
||||
DataType: aws.String("String"),
|
||||
StringValue: aws.String(falcopayload.Priority.String()),
|
||||
|
@ -257,13 +287,13 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
|
|||
}
|
||||
|
||||
if len(falcopayload.Tags) != 0 {
|
||||
msg.MessageAttributes["tags"] = &sns.MessageAttributeValue{
|
||||
msg.MessageAttributes["tags"] = snstypes.MessageAttributeValue{
|
||||
DataType: aws.String("String"),
|
||||
StringValue: aws.String(strings.Join(falcopayload.Tags, ",")),
|
||||
}
|
||||
}
|
||||
if falcopayload.Hostname != "" {
|
||||
msg.MessageAttributes[Hostname] = &sns.MessageAttributeValue{
|
||||
msg.MessageAttributes[Hostname] = snstypes.MessageAttributeValue{
|
||||
DataType: aws.String("String"),
|
||||
StringValue: aws.String(falcopayload.Hostname),
|
||||
}
|
||||
|
@ -272,12 +302,12 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
|
|||
m := strings.ReplaceAll(strings.ReplaceAll(i, "]", ""), "[", ".")
|
||||
switch j.(type) {
|
||||
case string:
|
||||
msg.MessageAttributes[m] = &sns.MessageAttributeValue{
|
||||
msg.MessageAttributes[m] = snstypes.MessageAttributeValue{
|
||||
DataType: aws.String("String"),
|
||||
StringValue: aws.String(fmt.Sprintf("%v", j)),
|
||||
}
|
||||
case json.Number:
|
||||
msg.MessageAttributes[m] = &sns.MessageAttributeValue{
|
||||
msg.MessageAttributes[m] = snstypes.MessageAttributeValue{
|
||||
DataType: aws.String("Number"),
|
||||
StringValue: aws.String(fmt.Sprintf("%v", j)),
|
||||
}
|
||||
|
@ -289,28 +319,32 @@ func (c *Client) PublishTopic(falcopayload types.FalcoPayload) {
|
|||
|
||||
if c.Config.Debug {
|
||||
p, _ := json.Marshal(msg)
|
||||
log.Printf("[DEBUG] : %v SNS - Message : %v\n", c.OutputType, string(p))
|
||||
utils.Log(utils.DebugLvl, c.OutputType+" SNS", fmt.Sprintf("Message : %v", string(p)))
|
||||
}
|
||||
|
||||
c.Stats.AWSSNS.Add("total", 1)
|
||||
resp, err := svc.Publish(msg)
|
||||
resp, err := svc.Publish(context.TODO(), msg)
|
||||
if err != nil {
|
||||
go c.CountMetric("outputs", 1, []string{"output:awssns", "status:error"})
|
||||
c.Stats.AWSSNS.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssns", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v SNS - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssns"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SNS", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %v SNS - Send to topic OK (%v)\n", c.OutputType, *resp.MessageId)
|
||||
utils.Log(utils.DebugLvl, c.OutputType+" SNS", fmt.Sprintf("Send to topic OK (%v)", *resp.MessageId))
|
||||
go c.CountMetric("outputs", 1, []string{"output:awssns", "status:ok"})
|
||||
c.Stats.AWSSNS.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssns", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssns"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// SendCloudWatchLog sends a message to CloudWatch Log
|
||||
func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
|
||||
svc := cloudwatchlogs.New(c.AWSSession)
|
||||
svc := cloudwatchlogs.NewFromConfig(*c.AWSConfig)
|
||||
|
||||
f, _ := json.Marshal(falcopayload)
|
||||
|
||||
|
@ -318,21 +352,24 @@ func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
|
|||
|
||||
if c.Config.AWS.CloudWatchLogs.LogStream == "" {
|
||||
streamName := "falcosidekick-logstream"
|
||||
log.Printf("[INFO] : %v CloudWatchLogs - Log Stream not configured creating one called %s\n", c.OutputType, streamName)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Log Stream not configured creating one called %s", streamName))
|
||||
inputLogStream := &cloudwatchlogs.CreateLogStreamInput{
|
||||
LogGroupName: aws.String(c.Config.AWS.CloudWatchLogs.LogGroup),
|
||||
LogStreamName: aws.String(streamName),
|
||||
}
|
||||
|
||||
_, err := svc.CreateLogStream(inputLogStream)
|
||||
_, err := svc.CreateLogStream(context.Background(), inputLogStream)
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == cloudwatchlogs.ErrCodeResourceAlreadyExistsException {
|
||||
log.Printf("[INFO] : %v CloudWatchLogs - Log Stream %s already exist, reusing...\n", c.OutputType, streamName)
|
||||
var rae *cloudwatchlogstypes.ResourceAlreadyExistsException
|
||||
if errors.As(err, &rae) {
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Log Stream %s already exist, reusing...", streamName))
|
||||
} else {
|
||||
go c.CountMetric("outputs", 1, []string{"output:awscloudwatchlogs", "status:error"})
|
||||
c.Stats.AWSCloudWatchLogs.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awscloudwatchlogs", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v CloudWatchLogs - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awscloudwatchlogs"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" CloudWatchLogs", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -340,13 +377,13 @@ func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
|
|||
c.Config.AWS.CloudWatchLogs.LogStream = streamName
|
||||
}
|
||||
|
||||
logevent := &cloudwatchlogs.InputLogEvent{
|
||||
logevent := cloudwatchlogstypes.InputLogEvent{
|
||||
Message: aws.String(string(f)),
|
||||
Timestamp: aws.Int64(falcopayload.Time.UnixNano() / int64(time.Millisecond)),
|
||||
}
|
||||
|
||||
input := &cloudwatchlogs.PutLogEventsInput{
|
||||
LogEvents: []*cloudwatchlogs.InputLogEvent{logevent},
|
||||
LogEvents: []cloudwatchlogstypes.InputLogEvent{logevent},
|
||||
LogGroupName: aws.String(c.Config.AWS.CloudWatchLogs.LogGroup),
|
||||
LogStreamName: aws.String(c.Config.AWS.CloudWatchLogs.LogStream),
|
||||
}
|
||||
|
@ -357,22 +394,26 @@ func (c *Client) SendCloudWatchLog(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric("outputs", 1, []string{"output:awscloudwatchlogs", "status:error"})
|
||||
c.Stats.AWSCloudWatchLogs.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awscloudwatchlogs", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v CloudWatchLogs - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awscloudwatchlogs"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" CloudWatchLogs", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %v CloudWatchLogs - Send Log OK (%v)\n", c.OutputType, resp.String())
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Send Log OK (%v)", resp.ResultMetadata))
|
||||
go c.CountMetric("outputs", 1, []string{"output:awscloudwatchlogs", "status:ok"})
|
||||
c.Stats.AWSCloudWatchLogs.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awscloudwatchlogs", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awscloudwatchlogs"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// PutLogEvents will attempt to execute and handle invalid tokens.
|
||||
func (c *Client) putLogEvents(svc *cloudwatchlogs.CloudWatchLogs, input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) {
|
||||
resp, err := svc.PutLogEvents(input)
|
||||
func (c *Client) putLogEvents(svc *cloudwatchlogs.Client, input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) {
|
||||
resp, err := svc.PutLogEvents(context.Background(), input)
|
||||
if err != nil {
|
||||
if exception, ok := err.(*cloudwatchlogs.InvalidSequenceTokenException); ok {
|
||||
log.Printf("[INFO] : %v Refreshing token for LogGroup: %s LogStream: %s", c.OutputType, *input.LogGroupName, *input.LogStreamName)
|
||||
if exception, ok := err.(*cloudwatchlogstypes.InvalidSequenceTokenException); ok {
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" CloudWatchLogs", fmt.Sprintf("Refreshing token for LogGroup: %s LogStream: %s", *input.LogGroupName, *input.LogStreamName))
|
||||
input.SequenceToken = exception.ExpectedSequenceToken
|
||||
|
||||
return c.putLogEvents(svc, input)
|
||||
|
@ -386,7 +427,7 @@ func (c *Client) putLogEvents(svc *cloudwatchlogs.CloudWatchLogs, input *cloudwa
|
|||
|
||||
// PutRecord puts a record in Kinesis
|
||||
func (c *Client) PutRecord(falcoPayLoad types.FalcoPayload) {
|
||||
svc := kinesis.New(c.AWSSession)
|
||||
svc := kinesis.NewFromConfig(*c.AWSConfig)
|
||||
|
||||
c.Stats.AWSKinesis.Add(Total, 1)
|
||||
|
||||
|
@ -397,17 +438,21 @@ func (c *Client) PutRecord(falcoPayLoad types.FalcoPayload) {
|
|||
StreamName: aws.String(c.Config.AWS.Kinesis.StreamName),
|
||||
}
|
||||
|
||||
resp, err := svc.PutRecord(input)
|
||||
resp, err := svc.PutRecord(context.Background(), input)
|
||||
if err != nil {
|
||||
go c.CountMetric("outputs", 1, []string{"output:awskinesis", "status:error"})
|
||||
c.Stats.AWSKinesis.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awskinesis", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v Kinesis - %v\n", c.OutputType, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awskinesis"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" Kinesis", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %v Kinesis - Put Record OK (%v)\n", c.OutputType, resp.SequenceNumber)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" Kinesis", fmt.Sprintf("Put Record OK (%v)", resp.SequenceNumber))
|
||||
go c.CountMetric("outputs", 1, []string{"output:awskinesis", "status:ok"})
|
||||
c.Stats.AWSKinesis.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awskinesis", "status": "ok"}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awskinesis"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -8,16 +8,18 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/embano1/memlog"
|
||||
"github.com/google/uuid"
|
||||
"github.com/xitongsys/parquet-go-source/mem"
|
||||
"github.com/xitongsys/parquet-go/writer"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -203,10 +205,12 @@ func (c *Client) EnqueueSecurityLake(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
|
||||
c.Stats.AWSSecurityLake.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", err.Error())
|
||||
return
|
||||
}
|
||||
log.Printf("[INFO] : %v SecurityLake - Event queued (%v)\n", c.OutputType, falcopayload.UUID)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Event queued (%v)", falcopayload.UUID))
|
||||
*c.Config.AWS.SecurityLake.WriteOffset = offset
|
||||
}
|
||||
|
||||
|
@ -217,7 +221,7 @@ func (c *Client) StartSecurityLakeWorker() {
|
|||
continue
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(c.Config.AWS.SecurityLake.Interval) * time.Minute)
|
||||
time.Sleep(time.Duration(c.Config.AWS.SecurityLake.Interval) * time.Minute) //nolint:gosec // disable G115
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,7 +237,9 @@ func (c *Client) processNextBatch() error {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
|
||||
c.Stats.AWSSecurityLake.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", err.Error())
|
||||
// ctx currently not handled in main
|
||||
// https://github.com/falcosecurity/falcosidekick/pull/390#discussion_r1081690326
|
||||
return err
|
||||
|
@ -245,6 +251,8 @@ func (c *Client) processNextBatch() error {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
|
||||
c.Stats.AWSSecurityLake.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
|
||||
earliest = earliest - 1 // to ensure next batch includes earliest as we read from ReadOffset+1
|
||||
msg := fmt.Errorf("slow batch reader: resetting read offset from %d to %d: %v",
|
||||
|
@ -252,7 +260,7 @@ func (c *Client) processNextBatch() error {
|
|||
earliest,
|
||||
err,
|
||||
)
|
||||
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, msg)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", msg.Error())
|
||||
awslake.ReadOffset = &earliest
|
||||
return err
|
||||
}
|
||||
|
@ -262,7 +270,9 @@ func (c *Client) processNextBatch() error {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
|
||||
c.Stats.AWSSecurityLake.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v SecurityLake - %v\n", c.OutputType, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -274,6 +284,8 @@ func (c *Client) processNextBatch() error {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:error"})
|
||||
c.Stats.AWSSecurityLake.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
// we don't update ReadOffset to retry and not skip records
|
||||
return err
|
||||
}
|
||||
|
@ -281,6 +293,8 @@ func (c *Client) processNextBatch() error {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:awssecuritylake.", "status:ok"})
|
||||
c.Stats.AWSSecurityLake.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "awssecuritylake.", "status": "ok"}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "awssecuritylake"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
|
||||
// update offset
|
||||
*awslake.ReadOffset = batch[count-1].Metadata.Offset
|
||||
|
@ -295,50 +309,51 @@ func (c *Client) writeParquet(uid string, records []memlog.Record) error {
|
|||
key := fmt.Sprintf("/%s/region=%s/accountId=%s/eventDay=%s/%s.parquet", c.Config.AWS.SecurityLake.Prefix, c.Config.AWS.SecurityLake.Region, c.Config.AWS.SecurityLake.AccountID, t.Format("20060102"), uid)
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancelFn()
|
||||
resp, err := s3.New(c.AWSSession).PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||
|
||||
resp, err := s3.NewFromConfig(*c.AWSConfig).PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.Config.AWS.SecurityLake.Bucket),
|
||||
Key: aws.String(key),
|
||||
Body: aws.ReadSeekCloser(r),
|
||||
Body: r,
|
||||
ContentType: aws.String("Apache Parquet"),
|
||||
ACL: aws.String(s3.ObjectCannedACLBucketOwnerFullControl),
|
||||
ACL: s3types.ObjectCannedACLBucketOwnerFullControl,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Upload parquet file %s.parquet Failed: %v\n", c.OutputType, uid, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Upload parquet file %s.parquet Failed: %v", uid, err))
|
||||
return err
|
||||
}
|
||||
if resp.SSECustomerAlgorithm != nil {
|
||||
log.Printf("[INFO] : %v SecurityLake - Upload parquet file %s.parquet OK (%v) (%v events) \n", c.OutputType, uid, *resp.SSECustomerKeyMD5, len(records))
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Upload parquet file %s.parquet OK (%v) (%v events)", uid, *resp.SSECustomerKeyMD5, len(records)))
|
||||
} else {
|
||||
log.Printf("[INFO] : %v SecurityLake - Upload parquet file %s.parquet OK (%v events)\n", c.OutputType, uid, len(records))
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Upload parquet file %s.parquet OK (%v events)\n", uid, len(records)))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Can't create the parquet file %s.parquet: %v\n", c.OutputType, uid, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't create the parquet file %s.parquet: %v", uid, err))
|
||||
return err
|
||||
}
|
||||
pw, err := writer.NewParquetWriter(fw, new(OCSFSecurityFinding), 10)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Can't create the parquet writer: %v\n", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't create the parquet writer: %v", err))
|
||||
return err
|
||||
}
|
||||
for _, i := range records {
|
||||
var f types.FalcoPayload
|
||||
if err := json.Unmarshal(i.Data, &f); err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Unmarshalling error: %v\n", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Unmarshalling error: %v", err))
|
||||
continue
|
||||
}
|
||||
o := NewOCSFSecurityFinding(f)
|
||||
if err = pw.Write(o); err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Parquet writer error: %v\n", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Parquet writer error: %v", err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err = pw.WriteStop(); err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Can't stop the parquet writer: %v\n", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't stop the parquet writer: %v", err))
|
||||
}
|
||||
if err = fw.Close(); err != nil {
|
||||
log.Printf("[ERROR] : %v SecurityLake - Can't close the parquet file %s.parquet: %v\n", c.OutputType, uid, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" SecurityLake", fmt.Sprintf("Can't close the parquet file %s.parquet: %v", uid, err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -5,23 +5,28 @@ package outputs
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
azeventhubs "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs"
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewEventHubClient returns a new output.Client for accessing the Azure Event Hub.
|
||||
func NewEventHubClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewEventHubClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
return &Client{
|
||||
OutputType: "AzureEventHub",
|
||||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -34,48 +39,48 @@ func (c *Client) EventHubPost(falcopayload types.FalcoPayload) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
log.Printf("[INFO] : %v EventHub - Try sending event", c.OutputType)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" EventHub", "Try sending event")
|
||||
defaultAzureCred, err := azidentity.NewDefaultAzureCredential(nil)
|
||||
if err != nil {
|
||||
c.setEventHubErrorMetrics()
|
||||
log.Printf("[ERROR] : %v EventHub - %v\n", c.OutputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
producerClient, err := azeventhubs.NewProducerClient(c.Config.Azure.EventHub.Namespace, c.Config.Azure.EventHub.Name, defaultAzureCred, nil)
|
||||
if err != nil {
|
||||
c.setEventHubErrorMetrics()
|
||||
log.Printf("[ERROR] : %v EventHub - %v\n", c.OutputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", err.Error())
|
||||
return
|
||||
}
|
||||
defer producerClient.Close(ctx)
|
||||
|
||||
log.Printf("[INFO] : %v EventHub - Hub client created\n", c.OutputType)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" EventHub", "Hub client created")
|
||||
|
||||
data, err := json.Marshal(falcopayload)
|
||||
if err != nil {
|
||||
c.setEventHubErrorMetrics()
|
||||
log.Printf("[ERROR] : Cannot marshal payload: %v", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", fmt.Sprintf("Cannot marshal payload: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
batch, err := producerClient.NewEventDataBatch(ctx, nil)
|
||||
if err != nil {
|
||||
c.setEventHubErrorMetrics()
|
||||
log.Printf("[ERROR] : Cannot marshal payload: %v", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", fmt.Sprintf("Cannot marshal payload: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := batch.AddEventData(&azeventhubs.EventData{Body: data}, nil); err != nil {
|
||||
c.setEventHubErrorMetrics()
|
||||
log.Printf("[ERROR] : Cannot marshal payload: %v", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", fmt.Sprintf("Cannot marshal payload: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
producerClient.SendEventDataBatch(ctx, batch, nil)
|
||||
if err := producerClient.SendEventDataBatch(ctx, batch, nil); err != nil {
|
||||
c.setEventHubErrorMetrics()
|
||||
log.Printf("[ERROR] : %v EventHub - %v\n", c.OutputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" EventHub", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -83,7 +88,9 @@ func (c *Client) EventHubPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:azureeventhub", "status:ok"})
|
||||
c.Stats.AzureEventHub.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "azureeventhub", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : %v EventHub - Publish OK", c.OutputType)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "azureeventhub"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" EventHub", "Publish OK")
|
||||
}
|
||||
|
||||
// setEventHubErrorMetrics set the error stats
|
||||
|
@ -91,4 +98,6 @@ func (c *Client) setEventHubErrorMetrics() {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:azureeventhub", "status:error"})
|
||||
c.Stats.AzureEventHub.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "azureeventhub", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "azureeventhub"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
|
|
|
@ -5,14 +5,15 @@ package outputs
|
|||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -20,24 +21,26 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
crdClient "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/generated/v1alpha2/clientset/versioned"
|
||||
|
||||
gcpfunctions "cloud.google.com/go/functions/apiv1"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
wavefront "github.com/wavefronthq/wavefront-sdk-go/senders"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
||||
"github.com/segmentio/kafka-go"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
timescaledb "github.com/jackc/pgx/v5/pgxpool"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
redis "github.com/redis/go-redis/v9"
|
||||
"github.com/segmentio/kafka-go"
|
||||
logstash "github.com/telkomdev/go-stash"
|
||||
wavefront "github.com/wavefronthq/wavefront-sdk-go/senders"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
crdClient "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/generated/v1alpha2/clientset/versioned"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/batcher"
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -92,31 +95,27 @@ const MutualTLSCacertFilename = "/ca.crt"
|
|||
const HttpPost = "POST"
|
||||
const HttpPut = "PUT"
|
||||
|
||||
// Headers to add to the client before sending the request
|
||||
type Header struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
// Protocol
|
||||
const GRPC = "grpc"
|
||||
|
||||
// Client communicates with the different API.
|
||||
type Client struct {
|
||||
OutputType string
|
||||
EndpointURL *url.URL
|
||||
MutualTLSEnabled bool
|
||||
CheckCert bool
|
||||
HeaderList []Header
|
||||
OutputType string
|
||||
|
||||
// FIXME: This causes race condition if outputs overwrite this URL during requests from multiple go routines
|
||||
EndpointURL *url.URL
|
||||
|
||||
ContentType string
|
||||
ShutDownFunc func()
|
||||
Config *types.Configuration
|
||||
Stats *types.Statistics
|
||||
PromStats *types.PromStatistics
|
||||
AWSSession *session.Session
|
||||
OTLPMetrics *otlpmetrics.OTLPMetrics
|
||||
AWSConfig *aws.Config
|
||||
StatsdClient *statsd.Client
|
||||
DogstatsdClient *statsd.Client
|
||||
GCPTopicClient *pubsub.Topic
|
||||
GCPCloudFunctionsClient *gcpfunctions.CloudFunctionsClient
|
||||
// FIXME: this lock requires a per-output usage lock currently if headers are used -- needs to be refactored
|
||||
httpClientLock sync.Mutex
|
||||
|
||||
GCSStorageClient *storage.Client
|
||||
KafkaProducer *kafka.Writer
|
||||
|
@ -128,45 +127,99 @@ type Client struct {
|
|||
MQTTClient mqtt.Client
|
||||
TimescaleDBClient *timescaledb.Pool
|
||||
RedisClient *redis.Client
|
||||
OTLPLogsLogger *slog.Logger
|
||||
LogstashClient *logstash.Stash
|
||||
|
||||
// Enable gzip compression
|
||||
EnableCompression bool
|
||||
|
||||
// cached http.Client
|
||||
httpcli *http.Client
|
||||
// lock for http client creation
|
||||
mx sync.Mutex
|
||||
|
||||
// common config
|
||||
cfg types.CommonConfig
|
||||
|
||||
// init once on first request
|
||||
initOnce sync.Once
|
||||
|
||||
// maxconcurrent requests limiter
|
||||
sem *semaphore.Weighted
|
||||
|
||||
// batcher
|
||||
batcher *batcher.Batcher
|
||||
}
|
||||
|
||||
// InitClient returns a new output.Client for accessing the different API.
|
||||
func NewClient(outputType string, defaultEndpointURL string, mutualTLSEnabled bool, checkCert bool, params types.InitClientArgs) (*Client, error) {
|
||||
func NewClient(outputType string, defaultEndpointURL string, cfg types.CommonConfig, params types.InitClientArgs) (*Client, error) {
|
||||
reg := regexp.MustCompile(`(http|nats)(s?)://.*`)
|
||||
if !reg.MatchString(defaultEndpointURL) {
|
||||
log.Printf("[ERROR] : %v - %v\n", outputType, "Bad Endpoint")
|
||||
utils.Log(utils.ErrorLvl, outputType, "Bad Endpoint")
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
if _, err := url.ParseRequestURI(defaultEndpointURL); err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", outputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, outputType, err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
endpointURL, err := url.Parse(defaultEndpointURL)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", outputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, outputType, err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
return &Client{OutputType: outputType, EndpointURL: endpointURL, MutualTLSEnabled: mutualTLSEnabled, CheckCert: checkCert, HeaderList: []Header{}, ContentType: DefaultContentType, Config: params.Config, Stats: params.Stats, PromStats: params.PromStats, StatsdClient: params.StatsdClient, DogstatsdClient: params.DogstatsdClient}, nil
|
||||
return &Client{
|
||||
cfg: cfg,
|
||||
OutputType: outputType,
|
||||
EndpointURL: endpointURL,
|
||||
ContentType: DefaultContentType,
|
||||
Config: params.Config,
|
||||
Stats: params.Stats,
|
||||
PromStats: params.PromStats,
|
||||
OTLPMetrics: params.OTLPMetrics,
|
||||
StatsdClient: params.StatsdClient,
|
||||
DogstatsdClient: params.DogstatsdClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type RequestOptionFunc func(req *http.Request)
|
||||
|
||||
// Get get a payload from Output with GET http method.
|
||||
func (c *Client) Get() error {
|
||||
return c.sendRequest("GET", nil)
|
||||
func (c *Client) Get(opts ...RequestOptionFunc) error {
|
||||
return c.sendRequest("GET", nil, nil, opts...)
|
||||
}
|
||||
|
||||
// Post sends event (payload) to Output with POST http method.
|
||||
func (c *Client) Post(payload interface{}) error {
|
||||
return c.sendRequest("POST", payload)
|
||||
func (c *Client) Post(payload interface{}, opts ...RequestOptionFunc) error {
|
||||
return c.sendRequest("POST", payload, nil, opts...)
|
||||
}
|
||||
|
||||
// PostWithResponse sends event (payload) to Output with POST http method and returns a stringified response body
|
||||
// This is added in order to get the response body and avoid breaking any other code that relies on the Post implmentation
|
||||
func (c *Client) PostWithResponse(payload interface{}, opts ...RequestOptionFunc) (string, error) {
|
||||
var responseBody string
|
||||
|
||||
err := c.sendRequest("POST", payload, &responseBody, opts...)
|
||||
|
||||
return responseBody, err
|
||||
}
|
||||
|
||||
// Put sends event (payload) to Output with PUT http method.
|
||||
func (c *Client) Put(payload interface{}) error {
|
||||
return c.sendRequest("PUT", payload)
|
||||
func (c *Client) Put(payload interface{}, opts ...RequestOptionFunc) error {
|
||||
return c.sendRequest("PUT", payload, nil, opts...)
|
||||
}
|
||||
|
||||
// Get the response body as inlined string
|
||||
func getInlinedBodyAsString(resp *http.Response) string {
|
||||
func (c *Client) getInlinedBodyAsString(resp *http.Response) string {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
contentEncoding := resp.Header.Get("Content-Encoding")
|
||||
if contentEncoding == "gzip" {
|
||||
dec, err := decompressData(body)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Failed to decompress response: %v", err))
|
||||
return ""
|
||||
}
|
||||
body = dec
|
||||
}
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType == "application/json" {
|
||||
var compactedBody bytes.Buffer
|
||||
|
@ -179,44 +232,233 @@ func getInlinedBodyAsString(resp *http.Response) string {
|
|||
return string(body)
|
||||
}
|
||||
|
||||
func compressData(reader io.Reader) ([]byte, error) {
|
||||
var compressed bytes.Buffer
|
||||
gw := gzip.NewWriter(&compressed)
|
||||
|
||||
if _, err := io.Copy(gw, reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return compressed.Bytes(), nil
|
||||
}
|
||||
|
||||
func decompressData(compressed []byte) (data []byte, err error) {
|
||||
gr, err := gzip.NewReader(bytes.NewBuffer(compressed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = errors.Join(err, gr.Close())
|
||||
}()
|
||||
|
||||
data, err = io.ReadAll(gr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Post sends event (payload) to Output.
|
||||
func (c *Client) sendRequest(method string, payload interface{}) error {
|
||||
// Returns stringified response body or error
|
||||
func (c *Client) sendRequest(method string, payload interface{}, responseBody *string, opts ...RequestOptionFunc) error {
|
||||
// Initialize the semaphore once here
|
||||
// because currently there are multiple code paths
|
||||
// where the client is created directly without using NewClient constructor
|
||||
c.initOnce.Do(func() {
|
||||
if c.cfg.MaxConcurrentRequests == 0 {
|
||||
c.sem = semaphore.NewWeighted(math.MaxInt64)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Max concurrent requests: unlimited")
|
||||
} else {
|
||||
c.sem = semaphore.NewWeighted(int64(c.cfg.MaxConcurrentRequests))
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Max concurrent requests: %v", c.cfg.MaxConcurrentRequests))
|
||||
}
|
||||
})
|
||||
|
||||
// defer + recover to catch panic if output doesn't respond
|
||||
defer func(c *Client) {
|
||||
if err := recover(); err != nil {
|
||||
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:connectionrefused"})
|
||||
log.Printf("[ERROR] : %v - %s", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprint(err))
|
||||
}
|
||||
}(c)
|
||||
|
||||
body := new(bytes.Buffer)
|
||||
switch payload.(type) {
|
||||
var reader io.Reader = body
|
||||
switch v := payload.(type) {
|
||||
case influxdbPayload:
|
||||
fmt.Fprintf(body, "%v", payload)
|
||||
if c.Config.Debug {
|
||||
log.Printf("[DEBUG] : %v payload : %v\n", c.OutputType, body)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : %v", body))
|
||||
}
|
||||
case spyderbatPayload:
|
||||
zipper := gzip.NewWriter(body)
|
||||
if err := json.NewEncoder(zipper).Encode(payload); err != nil {
|
||||
log.Printf("[ERROR] : %v - %s", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
}
|
||||
zipper.Close()
|
||||
if c.Config.Debug {
|
||||
debugBody := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(debugBody).Encode(payload); err == nil {
|
||||
log.Printf("[DEBUG] : %v payload : %v\n", c.OutputType, debugBody)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : %v", body))
|
||||
}
|
||||
}
|
||||
case io.Reader:
|
||||
reader = v
|
||||
case []byte:
|
||||
reader = bytes.NewBuffer(v)
|
||||
default:
|
||||
if err := json.NewEncoder(body).Encode(payload); err != nil {
|
||||
log.Printf("[ERROR] : %v - %s", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
}
|
||||
if c.Config.Debug {
|
||||
log.Printf("[DEBUG] : %v payload : %v\n", c.OutputType, body)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : %v", body))
|
||||
}
|
||||
}
|
||||
|
||||
if c.EnableCompression {
|
||||
data, err := compressData(reader)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Failed to compress data: %v", err))
|
||||
return err
|
||||
}
|
||||
reader = bytes.NewBuffer(data)
|
||||
}
|
||||
|
||||
client := c.httpClient()
|
||||
|
||||
var req *http.Request
|
||||
var err error
|
||||
if method == "GET" {
|
||||
req, err = http.NewRequest(method, c.EndpointURL.String(), nil)
|
||||
} else {
|
||||
req, err = http.NewRequest(method, c.EndpointURL.String(), reader)
|
||||
}
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set(ContentTypeHeaderKey, c.ContentType)
|
||||
req.Header.Set(UserAgentHeaderKey, UserAgentHeaderValue)
|
||||
|
||||
if c.EnableCompression {
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
|
||||
// Call request options functions
|
||||
// Allows the clients to adjust request as needed
|
||||
for _, opt := range opts {
|
||||
opt(req)
|
||||
}
|
||||
|
||||
// Using the background context for now
|
||||
// TODO: Eventually pass the proper context to sendRequest, add pass it to NewRequest call as well
|
||||
// in order to make the requests cancellable
|
||||
ctx := context.Background()
|
||||
err = c.sem.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return err
|
||||
}
|
||||
defer c.sem.Release(1)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:connectionrefused"})
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Clear out headers - they will be set for the next request.
|
||||
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:" + strings.ToLower(http.StatusText(resp.StatusCode))})
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent: //200, 201, 202, 204
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v OK (%v)", method, resp.StatusCode))
|
||||
ot := c.OutputType
|
||||
logResponse := ot == Kubeless || ot == Openfaas || ot == Fission
|
||||
if responseBody != nil || logResponse {
|
||||
s := c.getInlinedBodyAsString(resp)
|
||||
if responseBody != nil {
|
||||
// In some cases now we need to capture the response on 200
|
||||
// For example the Elasticsearch output bulk request that returns 200
|
||||
// even when some items in the bulk failed
|
||||
*responseBody = s
|
||||
}
|
||||
if logResponse {
|
||||
utils.Log(utils.InfoLvl, ot, fmt.Sprintf("Function Response : %s", s))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case http.StatusBadRequest: //400
|
||||
msg := c.getInlinedBodyAsString(resp)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrHeaderMissing, resp.StatusCode, msg))
|
||||
if msg != "" {
|
||||
return errors.New(msg)
|
||||
}
|
||||
return ErrHeaderMissing
|
||||
case http.StatusUnauthorized: //401
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrClientAuthenticationError, resp.StatusCode, c.getInlinedBodyAsString(resp)))
|
||||
return ErrClientAuthenticationError
|
||||
case http.StatusForbidden: //403
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrForbidden, resp.StatusCode, c.getInlinedBodyAsString(resp)))
|
||||
return ErrForbidden
|
||||
case http.StatusNotFound: //404
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrNotFound, resp.StatusCode, c.getInlinedBodyAsString(resp)))
|
||||
return ErrNotFound
|
||||
case http.StatusUnprocessableEntity: //422
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrUnprocessableEntityError, resp.StatusCode, c.getInlinedBodyAsString(resp)))
|
||||
return ErrUnprocessableEntityError
|
||||
case http.StatusTooManyRequests: //429
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v): %s", ErrTooManyRequest, resp.StatusCode, c.getInlinedBodyAsString(resp)))
|
||||
return ErrTooManyRequest
|
||||
case http.StatusInternalServerError: //500
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v)", ErrTooManyRequest, resp.StatusCode))
|
||||
return ErrInternalServer
|
||||
case http.StatusBadGateway: //502
|
||||
msg := c.getInlinedBodyAsString(resp)
|
||||
fmt.Println(msg)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v (%v)", ErrTooManyRequest, resp.StatusCode))
|
||||
return ErrBadGateway
|
||||
default:
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("unexpected Response (%v)", resp.StatusCode))
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// httpClient returns http client.
|
||||
// It returns the cached client if it was successfully configured before, for compatibility.
|
||||
// It returns misconfigured client as before if some of the configuration steps failed.
|
||||
// It was only logging the failures in it's original implementation, so keeping it the same.
|
||||
func (c *Client) httpClient() *http.Client {
|
||||
c.mx.Lock()
|
||||
defer c.mx.Unlock()
|
||||
if c.httpcli != nil {
|
||||
return c.httpcli
|
||||
}
|
||||
|
||||
customTransport, err := c.configureTransport()
|
||||
client := &http.Client{
|
||||
Transport: customTransport,
|
||||
}
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
} else {
|
||||
c.httpcli = client // cache the client instance for future http calls
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// configureTransport configure http transport
|
||||
// This preserves the previous behavior where it only logged errors, but returned misconfigured transport in case of errors
|
||||
func (c *Client) configureTransport() (*http.Transport, error) {
|
||||
customTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
|
||||
if customTransport.TLSClientConfig == nil {
|
||||
|
@ -236,12 +478,12 @@ func (c *Client) sendRequest(method string, payload interface{}) error {
|
|||
if c.Config.TLSClient.CaCertFile != "" {
|
||||
caCert, err := os.ReadFile(c.Config.TLSClient.CaCertFile)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
return customTransport, err
|
||||
}
|
||||
customTransport.TLSClientConfig.RootCAs.AppendCertsFromPEM(caCert)
|
||||
}
|
||||
|
||||
if c.MutualTLSEnabled {
|
||||
if c.cfg.MutualTLS {
|
||||
// Load client cert
|
||||
var MutualTLSClientCertPath, MutualTLSClientKeyPath, MutualTLSClientCaCertPath string
|
||||
if c.Config.MutualTLSClient.CertFile != "" {
|
||||
|
@ -261,110 +503,23 @@ func (c *Client) sendRequest(method string, payload interface{}) error {
|
|||
}
|
||||
cert, err := tls.LoadX509KeyPair(MutualTLSClientCertPath, MutualTLSClientKeyPath)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
return customTransport, err
|
||||
}
|
||||
|
||||
// Load CA cert
|
||||
caCert, err := os.ReadFile(MutualTLSClientCaCertPath)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
return customTransport, err
|
||||
}
|
||||
customTransport.TLSClientConfig.RootCAs.AppendCertsFromPEM(caCert)
|
||||
customTransport.TLSClientConfig.Certificates = []tls.Certificate{cert}
|
||||
} else {
|
||||
// With MutualTLS enabled, the check cert flag is ignored
|
||||
if !c.CheckCert {
|
||||
if !c.cfg.CheckCert {
|
||||
customTransport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true, // #nosec G402 This is only set as a result of explicit configuration
|
||||
}
|
||||
}
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: customTransport,
|
||||
}
|
||||
req := new(http.Request)
|
||||
var err error
|
||||
if method == "GET" {
|
||||
req, err = http.NewRequest(method, c.EndpointURL.String(), nil)
|
||||
} else {
|
||||
req, err = http.NewRequest(method, c.EndpointURL.String(), body)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
}
|
||||
|
||||
req.Header.Add(ContentTypeHeaderKey, c.ContentType)
|
||||
req.Header.Add(UserAgentHeaderKey, UserAgentHeaderValue)
|
||||
|
||||
for _, headerObj := range c.HeaderList {
|
||||
req.Header.Set(headerObj.Key, headerObj.Value)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
c.HeaderList = []Header{}
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:connectionrefused"})
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Clear out headers - they will be set for the next request.
|
||||
c.HeaderList = []Header{}
|
||||
go c.CountMetric("outputs", 1, []string{"output:" + strings.ToLower(c.OutputType), "status:" + strings.ToLower(http.StatusText(resp.StatusCode))})
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent: //200, 201, 202, 204
|
||||
log.Printf("[INFO] : %v - %v OK (%v)\n", c.OutputType, method, resp.StatusCode)
|
||||
if ot := c.OutputType; ot == Kubeless || ot == Openfaas || ot == Fission {
|
||||
log.Printf("[INFO] : %v - Function Response : %s\n", ot, getInlinedBodyAsString(resp))
|
||||
}
|
||||
return nil
|
||||
case http.StatusBadRequest: //400
|
||||
msg := getInlinedBodyAsString(resp)
|
||||
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrHeaderMissing, resp.StatusCode, msg)
|
||||
if msg != "" {
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
return ErrHeaderMissing
|
||||
case http.StatusUnauthorized: //401
|
||||
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrClientAuthenticationError, resp.StatusCode, getInlinedBodyAsString(resp))
|
||||
return ErrClientAuthenticationError
|
||||
case http.StatusForbidden: //403
|
||||
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrForbidden, resp.StatusCode, getInlinedBodyAsString(resp))
|
||||
return ErrForbidden
|
||||
case http.StatusNotFound: //404
|
||||
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrNotFound, resp.StatusCode, getInlinedBodyAsString(resp))
|
||||
return ErrNotFound
|
||||
case http.StatusUnprocessableEntity: //422
|
||||
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrUnprocessableEntityError, resp.StatusCode, getInlinedBodyAsString(resp))
|
||||
return ErrUnprocessableEntityError
|
||||
case http.StatusTooManyRequests: //429
|
||||
log.Printf("[ERROR] : %v - %v (%v): %s\n", c.OutputType, ErrTooManyRequest, resp.StatusCode, getInlinedBodyAsString(resp))
|
||||
return ErrTooManyRequest
|
||||
case http.StatusInternalServerError: //500
|
||||
log.Printf("[ERROR] : %v - %v (%v)\n", c.OutputType, ErrTooManyRequest, resp.StatusCode)
|
||||
return ErrInternalServer
|
||||
case http.StatusBadGateway: //502
|
||||
log.Printf("[ERROR] : %v - %v (%v)\n", c.OutputType, ErrTooManyRequest, resp.StatusCode)
|
||||
return ErrBadGateway
|
||||
default:
|
||||
log.Printf("[ERROR] : %v - unexpected Response (%v)\n", c.OutputType, resp.StatusCode)
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// BasicAuth adds an HTTP Basic Authentication compliant header to the Client.
|
||||
func (c *Client) BasicAuth(username, password string) {
|
||||
// Check out RFC7617 for the specifics on this code.
|
||||
// https://datatracker.ietf.org/doc/html/rfc7617
|
||||
// This might break I18n, but we can cross that bridge when we come to it.
|
||||
userPass := username + ":" + password
|
||||
b64UserPass := base64.StdEncoding.EncodeToString([]byte(userPass))
|
||||
c.AddHeader(AuthorizationHeaderKey, "Basic "+b64UserPass)
|
||||
}
|
||||
|
||||
// AddHeader adds an HTTP Header to the Client.
|
||||
func (c *Client) AddHeader(key, value string) {
|
||||
c.HeaderList = append(c.HeaderList, Header{Key: key, Value: value})
|
||||
return customTransport, nil
|
||||
}
|
||||
|
|
|
@ -43,11 +43,11 @@ func TestNewClient(t *testing.T) {
|
|||
PromStats: promStats,
|
||||
}
|
||||
|
||||
testClientOutput := Client{OutputType: "test", EndpointURL: u, MutualTLSEnabled: false, CheckCert: true, HeaderList: []Header{}, ContentType: "application/json; charset=utf-8", Config: config, Stats: stats, PromStats: promStats}
|
||||
_, err := NewClient("test", "localhost/%*$¨^!/:;", false, true, *initClientArgs)
|
||||
testClientOutput := Client{OutputType: "test", EndpointURL: u, cfg: types.CommonConfig{CheckCert: true}, ContentType: "application/json; charset=utf-8", Config: config, Stats: stats, PromStats: promStats}
|
||||
_, err := NewClient("test", "localhost/%*$¨^!/:;", types.CommonConfig{CheckCert: true}, *initClientArgs)
|
||||
require.NotNil(t, err)
|
||||
|
||||
nc, err := NewClient("test", "http://localhost", false, true, *initClientArgs)
|
||||
nc, err := NewClient("test", "http://localhost", types.CommonConfig{CheckCert: true}, *initClientArgs)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &testClientOutput, nc)
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func TestPost(t *testing.T) {
|
|||
Stats: &types.Statistics{},
|
||||
PromStats: &types.PromStatistics{},
|
||||
}
|
||||
nc, err := NewClient("", ts.URL+i, false, true, *initClientArgs)
|
||||
nc, err := NewClient("", ts.URL+i, types.CommonConfig{CheckCert: true}, *initClientArgs)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, nc)
|
||||
|
||||
|
@ -111,13 +111,13 @@ func TestAddHeader(t *testing.T) {
|
|||
Stats: &types.Statistics{},
|
||||
PromStats: &types.PromStatistics{},
|
||||
}
|
||||
nc, err := NewClient("", ts.URL, false, true, *initClientArgs)
|
||||
nc, err := NewClient("", ts.URL, types.CommonConfig{CheckCert: true}, *initClientArgs)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, nc)
|
||||
|
||||
nc.AddHeader(headerKey, headerVal)
|
||||
|
||||
nc.Post("")
|
||||
nc.Post("", func(req *http.Request) {
|
||||
req.Header.Set(headerKey, headerVal)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddBasicAuth(t *testing.T) {
|
||||
|
@ -167,13 +167,13 @@ func TestAddBasicAuth(t *testing.T) {
|
|||
Stats: &types.Statistics{},
|
||||
PromStats: &types.PromStatistics{},
|
||||
}
|
||||
nc, err := NewClient("", ts.URL, false, true, *initClientArgs)
|
||||
nc, err := NewClient("", ts.URL, types.CommonConfig{CheckCert: true}, *initClientArgs)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, nc)
|
||||
|
||||
nc.BasicAuth(username, password)
|
||||
|
||||
nc.Post("")
|
||||
nc.Post("", func(req *http.Request) {
|
||||
req.SetBasicAuth(username, password)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHeadersResetAfterReq(t *testing.T) {
|
||||
|
@ -188,17 +188,17 @@ func TestHeadersResetAfterReq(t *testing.T) {
|
|||
Stats: &types.Statistics{},
|
||||
PromStats: &types.PromStatistics{},
|
||||
}
|
||||
nc, err := NewClient("", ts.URL, false, true, *initClientArgs)
|
||||
nc, err := NewClient("", ts.URL, types.CommonConfig{CheckCert: true}, *initClientArgs)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, nc)
|
||||
|
||||
nc.AddHeader(headerKey, headerVal)
|
||||
nc.Post("", func(req *http.Request) {
|
||||
req.Header.Set(headerKey, headerVal)
|
||||
})
|
||||
|
||||
nc.Post("")
|
||||
|
||||
nc.AddHeader(headerKey, headerVal)
|
||||
|
||||
nc.Post("")
|
||||
nc.Post("", func(req *http.Request) {
|
||||
req.Header.Set(headerKey, headerVal)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMutualTlsPost(t *testing.T) {
|
||||
|
@ -239,7 +239,7 @@ func TestMutualTlsPost(t *testing.T) {
|
|||
Stats: &types.Statistics{},
|
||||
PromStats: &types.PromStatistics{},
|
||||
}
|
||||
nc, err := NewClient("", server.URL+Status200, true, true, *initClientArgs)
|
||||
nc, err := NewClient("", server.URL+Status200, types.CommonConfig{MutualTLS: true, CheckCert: true}, *initClientArgs)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, nc)
|
||||
|
||||
|
|
|
@ -5,8 +5,11 @@ package outputs
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -16,7 +19,7 @@ import (
|
|||
const (
|
||||
tableSlideType = "table"
|
||||
textSlideType = "text"
|
||||
botName = "Falco Sidekick"
|
||||
botName = "Falcosidekick"
|
||||
)
|
||||
|
||||
// Table slide fields
|
||||
|
@ -77,7 +80,7 @@ func newCliqPayload(falcopayload types.FalcoPayload, config *types.Configuration
|
|||
if config.Cliq.MessageFormatTemplate != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := config.Cliq.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
|
||||
log.Printf("[ERROR] : Cliq - Error expanding Cliq message %v", err)
|
||||
utils.Log(utils.ErrorLvl, "Cliq", fmt.Sprintf("Error expanding Cliq message: %v", err))
|
||||
} else {
|
||||
payload.Text = buf.String()
|
||||
|
||||
|
@ -164,15 +167,16 @@ func newCliqPayload(falcopayload types.FalcoPayload, config *types.Configuration
|
|||
func (c *Client) CliqPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Cliq.Add(Total, 1)
|
||||
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader(ContentTypeHeaderKey, "application/json")
|
||||
err := c.Post(newCliqPayload(falcopayload, c.Config))
|
||||
err := c.Post(newCliqPayload(falcopayload, c.Config), func(req *http.Request) {
|
||||
req.Header.Set(ContentTypeHeaderKey, "application/json")
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:cliq", "status:error"})
|
||||
c.Stats.Cliq.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "cliq", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Cliq - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cliq"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -180,4 +184,5 @@ func (c *Client) CliqPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:cliq", "status:ok"})
|
||||
c.Stats.Cliq.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "cliq", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cliq"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestNewCliqPayload(t *testing.T) {
|
|||
expectedOutput := cliqPayload{
|
||||
Text: "\U000026AA Rule: Test rule Priority: Debug",
|
||||
Bot: cliqBot{
|
||||
Name: "Falco Sidekick",
|
||||
Name: "Falcosidekick",
|
||||
Image: DefaultIconURL,
|
||||
},
|
||||
Slides: []cliqSlide{
|
||||
|
|
|
@ -4,10 +4,13 @@ package outputs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -19,7 +22,11 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
|
|||
client, err := cloudevents.NewClientHTTP()
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:cloudevents", "status:error"})
|
||||
log.Printf("[ERROR] : CloudEvents - NewDefaultClient : %v\n", err)
|
||||
c.Stats.CloudEvents.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "cloudevents", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cloudevents"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("NewDefaultClient : %v", err))
|
||||
return
|
||||
}
|
||||
c.CloudEventsClient = client
|
||||
|
@ -33,7 +40,7 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
|
|||
event.SetType("falco.rule.output.v1")
|
||||
event.SetExtension("priority", falcopayload.Priority.String())
|
||||
event.SetExtension("rule", falcopayload.Rule)
|
||||
event.SetExtension("event_source", falcopayload.Source)
|
||||
event.SetExtension("eventsource", falcopayload.Source)
|
||||
|
||||
if falcopayload.Hostname != "" {
|
||||
event.SetExtension(Hostname, falcopayload.Hostname)
|
||||
|
@ -45,14 +52,16 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
|
|||
}
|
||||
|
||||
if err := event.SetData(cloudevents.ApplicationJSON, falcopayload); err != nil {
|
||||
log.Printf("[ERROR] : CloudEvents, failed to set data : %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to set data : %v", err))
|
||||
}
|
||||
|
||||
if result := c.CloudEventsClient.Send(ctx, event); cloudevents.IsUndelivered(result) {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:cloudevents", "status:error"})
|
||||
c.Stats.CloudEvents.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "cloudevents", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : CloudEvents - %v\n", result)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cloudevents"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%v", result))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -60,5 +69,7 @@ func (c *Client) CloudEventsSend(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:cloudevents", "status:ok"})
|
||||
c.Stats.CloudEvents.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "cloudevents", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : CloudEvents - Send OK\n")
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "cloudevents"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Send OK")
|
||||
}
|
||||
|
|
|
@ -4,9 +4,11 @@ package outputs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -31,7 +33,7 @@ func newDatadogPayload(falcopayload types.FalcoPayload) datadogPayload {
|
|||
tags = append(tags, fmt.Sprintf("%v:%v", i, falcopayload.OutputFields[i]))
|
||||
|
||||
}
|
||||
tags = append(tags, "source:"+falcopayload.Source)
|
||||
tags = append(tags, "source:"+falcopayload.Source, "source:falco")
|
||||
if falcopayload.Hostname != "" {
|
||||
tags = append(tags, Hostname+":"+falcopayload.Hostname)
|
||||
}
|
||||
|
@ -69,11 +71,15 @@ func (c *Client) DatadogPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:datadog", "status:error"})
|
||||
c.Stats.Datadog.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "datadog", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Datadog - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadog"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:datadog", "status:ok"})
|
||||
c.Stats.Datadog.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "datadog", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadog"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
// SPDX-License-Identifier: MIT OR Apache-2.0
|
||||
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// DatadogLogsPath is the path of Datadog's logs API
|
||||
DatadogLogsPath string = "/api/v2/logs"
|
||||
)
|
||||
|
||||
type datadogLogsPayload struct {
|
||||
DDSource string `json:"ddsource,omitempty"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
Service string `json:"service,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
DDTags string `json:"ddtags,omitempty"`
|
||||
}
|
||||
|
||||
func newDatadogLogsPayload(falcopayload types.FalcoPayload, config *types.Configuration) datadogLogsPayload {
|
||||
var d datadogLogsPayload
|
||||
|
||||
if len(falcopayload.Tags) != 0 {
|
||||
sort.Strings(falcopayload.Tags)
|
||||
d.DDTags = strings.Join(falcopayload.Tags, ",")
|
||||
}
|
||||
|
||||
d.Hostname = falcopayload.Hostname
|
||||
d.DDSource = strings.ToLower(Falco)
|
||||
|
||||
d.Message = falcopayload.String()
|
||||
|
||||
d.Service = config.DatadogLogs.Service
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// DatadogLogsPost posts logs to Datadog
|
||||
func (c *Client) DatadogLogsPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.DatadogLogs.Add(Total, 1)
|
||||
|
||||
reqOpts := []RequestOptionFunc{
|
||||
func(req *http.Request) {
|
||||
if c.Config.DatadogLogs.APIKey != "" {
|
||||
req.Header.Set("DD-API-KEY", c.Config.DatadogLogs.APIKey)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
err := c.Post(newDatadogLogsPayload(falcopayload, c.Config), reqOpts...)
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:datadoglogs", "status:error"})
|
||||
c.Stats.DatadogLogs.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "datadoglogs", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadoglogs"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:datadoglogs", "status:ok"})
|
||||
c.Stats.DatadogLogs.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "datadoglogs", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "datadoglogs"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
// SPDX-License-Identifier: MIT OR Apache-2.0
|
||||
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func TestNewDatadogLogsPayload(t *testing.T) {
|
||||
expectedOutput := `{"title":"Test rule","text":"This is a test from falcosidekick","alert_type":"info","source_type_name":"falco","tags":["proc.name:falcosidekick", "source:syscalls", "source:falco", "hostname:test-host", "example", "test"]}`
|
||||
var f types.FalcoPayload
|
||||
json.Unmarshal([]byte(falcoTestInput), &f)
|
||||
s, _ := json.Marshal(newDatadogPayload(f))
|
||||
|
||||
var o1, o2 datadogLogsPayload
|
||||
require.Nil(t, json.Unmarshal([]byte(expectedOutput), &o1))
|
||||
require.Nil(t, json.Unmarshal(s, &o2))
|
||||
|
||||
require.Equal(t, o1, o2)
|
||||
}
|
|
@ -12,7 +12,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNewDatadogPayload(t *testing.T) {
|
||||
expectedOutput := `{"title":"Test rule","text":"This is a test from falcosidekick","alert_type":"info","source_type_name":"falco","tags":["proc.name:falcosidekick", "source:syscalls", "hostname:test-host", "example", "test"]}`
|
||||
expectedOutput := `{"title":"Test rule","text":"This is a test from falcosidekick","alert_type":"info","source_type_name":"falco","tags":["proc.name:falcosidekick", "source:syscalls", "source:falco", "hostname:test-host", "example", "test"]}`
|
||||
var f types.FalcoPayload
|
||||
json.Unmarshal([]byte(falcoTestInput), &f)
|
||||
s, _ := json.Marshal(newDatadogPayload(f))
|
||||
|
|
|
@ -4,10 +4,12 @@ package outputs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -105,7 +107,9 @@ func (c *Client) DiscordPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:discord", "status:error"})
|
||||
c.Stats.Discord.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "discord", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Discord - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "discord"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -113,4 +117,6 @@ func (c *Client) DiscordPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:discord", "status:ok"})
|
||||
c.Stats.Discord.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "discord", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "discord"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,11 +3,14 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -114,20 +117,22 @@ func (c *Client) DynatracePost(falcopayload types.FalcoPayload) {
|
|||
|
||||
c.ContentType = DynatraceContentType
|
||||
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader("Authorization", "Api-Token "+c.Config.Dynatrace.APIToken)
|
||||
|
||||
err := c.Post(newDynatracePayload(falcopayload).Payload)
|
||||
err := c.Post(newDynatracePayload(falcopayload).Payload, func(req *http.Request) {
|
||||
req.Header.Set("Authorization", "Api-Token "+c.Config.Dynatrace.APIToken)
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:dynatrace", "status:error"})
|
||||
c.Stats.Dynatrace.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "dynatrace", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Dynatrace - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dynatrace"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:dynatrace", "status:ok"})
|
||||
c.Stats.Dynatrace.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "dynatrace", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dynatrace"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func TestNewDynatracePayload(t *testing.T) {
|
||||
|
|
|
@ -3,14 +3,19 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/batcher"
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -19,111 +24,217 @@ type eSPayload struct {
|
|||
Timestamp time.Time `json:"@timestamp"`
|
||||
}
|
||||
|
||||
type mappingError struct {
|
||||
type esResponse struct {
|
||||
Error struct {
|
||||
RootCause []struct {
|
||||
Type string `json:"type"`
|
||||
Reason string `json:"reason"`
|
||||
} `json:"root_cause"`
|
||||
Type string `json:"type"`
|
||||
Reason string `json:"reason"`
|
||||
} `json:"error"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// ElasticsearchPost posts event to Elasticsearch
|
||||
func (c *Client) ElasticsearchPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Elasticsearch.Add(Total, 1)
|
||||
type esBulkResponse struct {
|
||||
Errors bool `json:"errors"`
|
||||
Items []esItemResponse `json:"items"`
|
||||
}
|
||||
|
||||
type esItemResponse struct {
|
||||
Create esResponse `json:"create"`
|
||||
}
|
||||
|
||||
func NewElasticsearchClient(params types.InitClientArgs) (*Client, error) {
|
||||
esCfg := params.Config.Elasticsearch
|
||||
endpointUrl := fmt.Sprintf("%s/%s/%s", esCfg.HostPort, esCfg.Index, esCfg.Type)
|
||||
c, err := NewClient("Elasticsearch", endpointUrl, esCfg.CommonConfig, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if esCfg.Batching.Enabled {
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Batching enabled: %v max bytes, %v interval", esCfg.Batching.BatchSize, esCfg.Batching.FlushInterval))
|
||||
callbackFn := func(falcoPayloads []types.FalcoPayload, data []byte) {
|
||||
go c.elasticsearchPost("", data, falcoPayloads...)
|
||||
}
|
||||
c.batcher = batcher.New(
|
||||
batcher.WithBatchSize(esCfg.Batching.BatchSize),
|
||||
batcher.WithFlushInterval(esCfg.Batching.FlushInterval),
|
||||
batcher.WithMarshal(c.marshalESBulkPayload),
|
||||
batcher.WithCallback(callbackFn),
|
||||
)
|
||||
}
|
||||
if esCfg.EnableCompression {
|
||||
c.EnableCompression = true
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Compression enabled")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Client) ElasticsearchPost(falcopayload types.FalcoPayload) {
|
||||
if c.Config.Elasticsearch.Batching.Enabled {
|
||||
c.batcher.Push(falcopayload)
|
||||
return
|
||||
}
|
||||
|
||||
payload, err := c.marshalESPayload(falcopayload)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Failed to marshal payload: %v", err))
|
||||
}
|
||||
|
||||
c.elasticsearchPost(c.getIndex(), payload, falcopayload)
|
||||
}
|
||||
|
||||
var esReasonMappingFieldsRegex *regexp.Regexp = regexp.MustCompile(`\[\w+(\.\w+)+\]`)
|
||||
|
||||
// ElasticsearchPost posts event to Elasticsearch
|
||||
func (c *Client) elasticsearchPost(index string, payload []byte, falcoPayloads ...types.FalcoPayload) {
|
||||
sz := int64(len(falcoPayloads))
|
||||
c.Stats.Elasticsearch.Add(Total, sz)
|
||||
|
||||
current := time.Now()
|
||||
var eURL string
|
||||
switch c.Config.Elasticsearch.Suffix {
|
||||
case None:
|
||||
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "/" + c.Config.Elasticsearch.Type
|
||||
case "monthly":
|
||||
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "-" + current.Format("2006.01") + "/" + c.Config.Elasticsearch.Type
|
||||
case "annually":
|
||||
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "-" + current.Format("2006") + "/" + c.Config.Elasticsearch.Type
|
||||
default:
|
||||
eURL = c.Config.Elasticsearch.HostPort + "/" + c.Config.Elasticsearch.Index + "-" + current.Format("2006.01.02") + "/" + c.Config.Elasticsearch.Type
|
||||
if index == "" {
|
||||
eURL = c.Config.Elasticsearch.HostPort + "/_bulk"
|
||||
} else {
|
||||
eURL = c.Config.Elasticsearch.HostPort + "/" + index + "/" + c.Config.Elasticsearch.Type
|
||||
}
|
||||
|
||||
endpointURL, err := url.Parse(eURL)
|
||||
if err != nil {
|
||||
c.setElasticSearchErrorMetrics()
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.EndpointURL = endpointURL
|
||||
if c.Config.Elasticsearch.Username != "" && c.Config.Elasticsearch.Password != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.BasicAuth(c.Config.Elasticsearch.Username, c.Config.Elasticsearch.Password)
|
||||
reqOpts := []RequestOptionFunc{
|
||||
// Set request headers
|
||||
func(req *http.Request) {
|
||||
if c.Config.Elasticsearch.ApiKey != "" {
|
||||
req.Header.Set("Authorization", "APIKey "+c.Config.Elasticsearch.ApiKey)
|
||||
}
|
||||
|
||||
if c.Config.Elasticsearch.Username != "" && c.Config.Elasticsearch.Password != "" {
|
||||
req.SetBasicAuth(c.Config.Elasticsearch.Username, c.Config.Elasticsearch.Password)
|
||||
}
|
||||
|
||||
for i, j := range c.Config.Elasticsearch.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
},
|
||||
|
||||
// Set the final endpointURL
|
||||
func(req *http.Request) {
|
||||
// Append pipeline parameter to the URL if configured
|
||||
if c.Config.Elasticsearch.Pipeline != "" {
|
||||
query := endpointURL.Query()
|
||||
query.Set("pipeline", c.Config.Elasticsearch.Pipeline)
|
||||
endpointURL.RawQuery = query.Encode()
|
||||
}
|
||||
// Set request URL
|
||||
req.URL = endpointURL
|
||||
},
|
||||
}
|
||||
|
||||
for i, j := range c.Config.Elasticsearch.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
}
|
||||
|
||||
payload := eSPayload{FalcoPayload: falcopayload, Timestamp: falcopayload.Time}
|
||||
if c.Config.Elasticsearch.FlattenFields || c.Config.Elasticsearch.CreateIndexTemplate {
|
||||
for i, j := range payload.OutputFields {
|
||||
payload.OutputFields[strings.ReplaceAll(i, ".", "_")] = j
|
||||
delete(payload.OutputFields, i)
|
||||
var response string
|
||||
if c.Config.Elasticsearch.Batching.Enabled {
|
||||
// Use PostWithResponse call when batching is enabled in order to capture response body on 200
|
||||
res, err := c.PostWithResponse(payload, reqOpts...)
|
||||
if err != nil {
|
||||
response = err.Error()
|
||||
} else {
|
||||
response = res
|
||||
}
|
||||
} else {
|
||||
// Use regular Post call, this avoid parsing response on http status 200
|
||||
err = c.Post(payload, reqOpts...)
|
||||
if err != nil {
|
||||
response = err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
err = c.Post(payload)
|
||||
if err != nil {
|
||||
var mappingErr mappingError
|
||||
if err2 := json.Unmarshal([]byte(err.Error()), &mappingErr); err2 != nil {
|
||||
c.setElasticSearchErrorMetrics()
|
||||
return
|
||||
}
|
||||
if mappingErr.Error.Type == "document_parsing_exception" {
|
||||
reg := regexp.MustCompile(`\[\w+(\.\w+)+\]`)
|
||||
k := reg.FindStringSubmatch(mappingErr.Error.Reason)
|
||||
if len(k) == 0 {
|
||||
c.setElasticSearchErrorMetrics()
|
||||
if response != "" {
|
||||
if c.Config.Elasticsearch.Batching.Enabled {
|
||||
var resp esBulkResponse
|
||||
if err2 := json.Unmarshal([]byte(response), &resp); err2 != nil {
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
return
|
||||
}
|
||||
if !strings.Contains(k[0], "output_fields") {
|
||||
c.setElasticSearchErrorMetrics()
|
||||
if len(resp.Items) != len(falcoPayloads) {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("mismatched %v responses with %v request payloads", len(resp.Items), len(falcoPayloads)))
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
return
|
||||
}
|
||||
s := strings.ReplaceAll(k[0], "[output_fields.", "")
|
||||
s = strings.ReplaceAll(s, "]", "")
|
||||
for i := range payload.OutputFields {
|
||||
if strings.HasPrefix(i, s) {
|
||||
delete(payload.OutputFields, i)
|
||||
// Check errors. Not using the mapping errors retry approach for batched/bulk requests
|
||||
// Only mark set the errors and stats
|
||||
if resp.Errors {
|
||||
failed := int64(0)
|
||||
for _, item := range resp.Items {
|
||||
switch item.Create.Status {
|
||||
case http.StatusOK, http.StatusCreated:
|
||||
default:
|
||||
failed++
|
||||
}
|
||||
}
|
||||
c.setElasticSearchErrorMetrics(failed)
|
||||
// Set success sz that is reported at the end of this function
|
||||
sz -= failed
|
||||
}
|
||||
fmt.Println(payload.OutputFields)
|
||||
log.Printf("[INFO] : %v - %v\n", c.OutputType, "attempt to POST again the payload without the wrong field")
|
||||
err = c.Post(payload)
|
||||
if err != nil {
|
||||
c.setElasticSearchErrorMetrics()
|
||||
} else {
|
||||
// Slightly refactored the original approach to mapping errors, but logic is still the same
|
||||
// The Request is retried only once without the field that can't be mapped.
|
||||
// One of the problems with this approach is that if the mapping has two "unmappable" fields
|
||||
// only the first one is returned with the error and removed from the retried request.
|
||||
// Do we need to retry without the field? Do we need to keep retrying and removing fields until it succeeds?
|
||||
var resp esResponse
|
||||
if err2 := json.Unmarshal([]byte(response), &resp); err2 != nil {
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
return
|
||||
}
|
||||
|
||||
payload := falcoPayloads[0]
|
||||
|
||||
if resp.Error.Type == "document_parsing_exception" {
|
||||
k := esReasonMappingFieldsRegex.FindStringSubmatch(resp.Error.Reason)
|
||||
if len(k) == 0 {
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
return
|
||||
}
|
||||
if !strings.Contains(k[0], "output_fields") {
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
return
|
||||
}
|
||||
s := strings.ReplaceAll(k[0], "[output_fields.", "")
|
||||
s = strings.ReplaceAll(s, "]", "")
|
||||
for i := range payload.OutputFields {
|
||||
if strings.HasPrefix(i, s) {
|
||||
delete(payload.OutputFields, i)
|
||||
}
|
||||
}
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "attempt to POST again the payload without the wrong field")
|
||||
err = c.Post(payload, reqOpts...)
|
||||
if err != nil {
|
||||
c.setElasticSearchErrorMetrics(sz)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setting the success status
|
||||
go c.CountMetric(Outputs, 1, []string{"output:elasticsearch", "status:ok"})
|
||||
c.Stats.Elasticsearch.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": OK}).Inc()
|
||||
go c.CountMetric(Outputs, sz, []string{"output:elasticsearch", "status:ok"})
|
||||
c.Stats.Elasticsearch.Add(OK, sz)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": OK}).Add(float64(sz))
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "elasticsearch"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
func (c *Client) ElasticsearchCreateIndexTemplate(config types.ElasticsearchOutputConfig) error {
|
||||
d := c
|
||||
indexExists, err := c.isIndexTemplateExist(config)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return err
|
||||
}
|
||||
if indexExists {
|
||||
log.Printf("[INFO] : %v - %v\n", c.OutputType, "Index template already exists")
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Index template already exists")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -137,16 +248,16 @@ func (c *Client) ElasticsearchCreateIndexTemplate(config types.ElasticsearchOutp
|
|||
m = strings.ReplaceAll(m, "${REPLICAS}", fmt.Sprintf("%v", config.NumberOfReplicas))
|
||||
j := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(m), &j); err != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return err
|
||||
}
|
||||
// create the index template by PUT
|
||||
if d.Put(j) != nil {
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
if err := d.Put(j); err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %v - %v\n", c.OutputType, "Index template created")
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Index template created")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -167,8 +278,63 @@ func (c *Client) isIndexTemplateExist(config types.ElasticsearchOutputConfig) (b
|
|||
}
|
||||
|
||||
// setElasticSearchErrorMetrics set the error stats
|
||||
func (c *Client) setElasticSearchErrorMetrics() {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:elasticsearch", "status:error"})
|
||||
c.Stats.Elasticsearch.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": Error}).Inc()
|
||||
func (c *Client) setElasticSearchErrorMetrics(n int64) {
|
||||
go c.CountMetric(Outputs, n, []string{"output:elasticsearch", "status:error"})
|
||||
c.Stats.Elasticsearch.Add(Error, n)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "elasticsearch", "status": Error}).Add(float64(n))
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "elasticsearch"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
|
||||
func (c *Client) buildESPayload(falcopayload types.FalcoPayload) eSPayload {
|
||||
payload := eSPayload{FalcoPayload: falcopayload, Timestamp: falcopayload.Time}
|
||||
|
||||
if c.Config.Elasticsearch.FlattenFields || c.Config.Elasticsearch.CreateIndexTemplate {
|
||||
for i, j := range payload.OutputFields {
|
||||
if strings.Contains(i, ".") {
|
||||
payload.OutputFields[strings.ReplaceAll(i, ".", "_")] = j
|
||||
delete(payload.OutputFields, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return payload
|
||||
}
|
||||
|
||||
func (c *Client) marshalESPayload(falcopayload types.FalcoPayload) ([]byte, error) {
|
||||
return json.Marshal(c.buildESPayload(falcopayload))
|
||||
}
|
||||
|
||||
func (c *Client) marshalESBulkPayload(falcopayload types.FalcoPayload) ([]byte, error) {
|
||||
body, err := c.marshalESPayload(falcopayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.WriteString(`{"create":{`)
|
||||
_, _ = buf.WriteString(`"_index":"`)
|
||||
_, _ = buf.WriteString(c.getIndex())
|
||||
_, _ = buf.WriteString("\"}}\n")
|
||||
|
||||
_, _ = buf.Write(body)
|
||||
_, _ = buf.WriteRune('\n')
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c *Client) getIndex() string {
|
||||
var index string
|
||||
|
||||
current := time.Now()
|
||||
switch c.Config.Elasticsearch.Suffix {
|
||||
case None:
|
||||
index = c.Config.Elasticsearch.Index
|
||||
case "monthly":
|
||||
index = c.Config.Elasticsearch.Index + "-" + current.Format("2006.01")
|
||||
case "annually":
|
||||
index = c.Config.Elasticsearch.Index + "-" + current.Format("2006")
|
||||
default:
|
||||
index = c.Config.Elasticsearch.Index + "-" + current.Format("2006.01.02")
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
|
|
@ -6,14 +6,17 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -26,7 +29,7 @@ const ServicesPath = "/services/"
|
|||
|
||||
// NewFissionClient returns a new output.Client for accessing Kubernetes.
|
||||
func NewFissionClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
if config.Fission.KubeConfig != "" {
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", config.Fission.KubeConfig)
|
||||
if err != nil {
|
||||
|
@ -41,9 +44,11 @@ func NewFissionClient(config *types.Configuration, stats *types.Statistics, prom
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
KubernetesClient: clientset,
|
||||
cfg: config.Fission.CommonConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -56,7 +61,7 @@ func NewFissionClient(config *types.Configuration, stats *types.Statistics, prom
|
|||
StatsdClient: statsdClient,
|
||||
}
|
||||
|
||||
return NewClient(Fission, endpointUrl, config.Fission.MutualTLS, config.Fission.CheckCert, *initClientArgs)
|
||||
return NewClient(Fission, endpointUrl, config.Fission.CommonConfig, *initClientArgs)
|
||||
}
|
||||
|
||||
// FissionCall .
|
||||
|
@ -79,27 +84,32 @@ func (c *Client) FissionCall(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:Fission", "status:error"})
|
||||
c.Stats.Fission.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "Fission", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %s - %v\n", Fission, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "Fission"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
log.Printf("[INFO] : %s - Function Response : %v\n", Fission, string(rawbody))
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Function Response : %v", string(rawbody)))
|
||||
} else {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader(FissionEventIDKey, uuid.New().String())
|
||||
c.ContentType = FissionContentType
|
||||
|
||||
err := c.Post(falcopayload)
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
req.Header.Set(FissionEventIDKey, uuid.New().String())
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:Fission", "status:error"})
|
||||
c.Stats.Fission.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "Fission", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %s - %v\n", Fission, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "Fission"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Printf("[INFO] : %s - Call Function \"%v\" OK\n", Fission, c.Config.Fission.Function)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Call Function \"%v\" OK", c.Config.Fission.Function))
|
||||
go c.CountMetric(Outputs, 1, []string{"output:Fission", "status:ok"})
|
||||
c.Stats.Fission.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "Fission", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "Fission"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -8,27 +8,29 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
gcpfunctions "cloud.google.com/go/functions/apiv1"
|
||||
"cloud.google.com/go/storage"
|
||||
gcpfunctionspb "google.golang.org/genproto/googleapis/cloud/functions/v1"
|
||||
|
||||
"cloud.google.com/go/pubsub"
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
gcpfunctionspb "google.golang.org/genproto/googleapis/cloud/functions/v1"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewGCPClient returns a new output.Client for accessing the GCP API.
|
||||
func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
base64decodedCredentialsData, err := base64.StdEncoding.DecodeString(config.GCP.Credentials)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP - %v\n", "Error while base64-decoding GCP Credentials")
|
||||
utils.Log(utils.ErrorLvl, "GCP", "Erroc.OutputTyper while base64-decoding GCP Credentials")
|
||||
return nil, errors.New("error while base64-decoding GCP Credentials")
|
||||
}
|
||||
|
||||
|
@ -41,19 +43,19 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
if googleCredentialsData != "" {
|
||||
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData), pubsub.ScopePubSub)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP PubSub - %v\n", "Error while loading GCP Credentials")
|
||||
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while loading GCP Credentials")
|
||||
return nil, errors.New("error while loading GCP Credentials")
|
||||
}
|
||||
pubSubClient, err := pubsub.NewClient(context.Background(), config.GCP.PubSub.ProjectID, option.WithCredentials(credentials))
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP PubSub - %v\n", "Error while creating GCP PubSub Client")
|
||||
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while creating GCP PubSub Client")
|
||||
return nil, errors.New("error while creating GCP PubSub Client")
|
||||
}
|
||||
topicClient = pubSubClient.Topic(config.GCP.PubSub.Topic)
|
||||
} else {
|
||||
pubSubClient, err := pubsub.NewClient(context.Background(), config.GCP.PubSub.ProjectID)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP PubSub - %v\n", "Error while creating GCP PubSub Client")
|
||||
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while creating GCP PubSub Client")
|
||||
return nil, errors.New("error while creating GCP PubSub Client")
|
||||
}
|
||||
topicClient = pubSubClient.Topic(config.GCP.PubSub.Topic)
|
||||
|
@ -61,14 +63,14 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
}
|
||||
|
||||
if config.GCP.Storage.Bucket != "" {
|
||||
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData))
|
||||
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData), storage.ScopeReadWrite)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP Storage - %v\n", "Error while loading GCS Credentials")
|
||||
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while loading GCS Credentials")
|
||||
return nil, errors.New("error while loading GCP Credentials")
|
||||
}
|
||||
storageClient, err = storage.NewClient(context.Background(), option.WithCredentials(credentials))
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP Storage - %v\n", "Error while creating GCP Storage Client")
|
||||
utils.Log(utils.ErrorLvl, "GCP PubSub", "Error while creating GCP Storage Client")
|
||||
return nil, errors.New("error while creating GCP Storage Client")
|
||||
}
|
||||
}
|
||||
|
@ -77,18 +79,18 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
if googleCredentialsData != "" {
|
||||
credentials, err := google.CredentialsFromJSON(context.Background(), []byte(googleCredentialsData), gcpfunctions.DefaultAuthScopes()...)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCP CloudFunctions - %v\n", "Error while loading GCS Credentials")
|
||||
utils.Log(utils.ErrorLvl, "GCP CloudFunctions", "Error while loading GCS Credentials")
|
||||
return nil, errors.New("error while loading GCP Credentials")
|
||||
}
|
||||
cloudFunctionsClient, err = gcpfunctions.NewCloudFunctionsClient(context.Background(), option.WithCredentials(credentials))
|
||||
if err != nil {
|
||||
log.Printf("[ERROR]: GCP CloudFunctions - %v\n", "Error while creating GCP CloudFunctions Client")
|
||||
utils.Log(utils.ErrorLvl, "GCP CloudFunctions", "Error while creating GCP CloudFunctions Client")
|
||||
return nil, errors.New("error while creating GCP CloudFunctions Client")
|
||||
}
|
||||
} else {
|
||||
cloudFunctionsClient, err = gcpfunctions.NewCloudFunctionsClient(context.Background())
|
||||
if err != nil {
|
||||
log.Printf("[ERROR]: GCP CloudFunctions - %v\n", "Error while creating GCP CloudFunctions Client")
|
||||
utils.Log(utils.ErrorLvl, "GCP CloudFunctions", "Error while creating GCP CloudFunctions Client")
|
||||
return nil, errors.New("error while creating GCP CloudFunctions Client")
|
||||
}
|
||||
}
|
||||
|
@ -102,6 +104,7 @@ func NewGCPClient(config *types.Configuration, stats *types.Statistics, promStat
|
|||
GCPCloudFunctionsClient: cloudFunctionsClient,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -120,15 +123,16 @@ func (c *Client) GCPCallCloudFunction(falcopayload types.FalcoPayload) {
|
|||
}, gax.WithGRPCOptions())
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCPCloudFunctions - %v - %v\n", "Error while calling CloudFunction", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" CloudFunctions", fmt.Sprintf("Error while calling CloudFunction: %v", err))
|
||||
c.Stats.GCPPubSub.Add(Error, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcpcloudfunctions", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpcloudfunctions", "status": Error}).Inc()
|
||||
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpcloudfunctions"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : GCPCloudFunctions - Call CloudFunction OK (%v)\n", result.ExecutionId)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" CloudFunctions", fmt.Sprintf("Call CloudFunction OK (%v)", result.ExecutionId))
|
||||
c.Stats.GCPCloudFunctions.Add(OK, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcpcloudfunctions", "status:ok"})
|
||||
|
||||
|
@ -147,18 +151,21 @@ func (c *Client) GCPPublishTopic(falcopayload types.FalcoPayload) {
|
|||
result := c.GCPTopicClient.Publish(context.Background(), message)
|
||||
id, err := result.Get(context.Background())
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCPPubSub - %v - %v\n", "Error while publishing message", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+" PubSub", fmt.Sprintf("Error while publishing message: %v", err))
|
||||
c.Stats.GCPPubSub.Add(Error, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcppubsub", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcppubsub", "status": Error}).Inc()
|
||||
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcppubsub"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : GCPPubSub - Send to topic OK (%v)\n", id)
|
||||
utils.Log(utils.InfoLvl, c.OutputType+" PubSub", fmt.Sprintf("Send to topic OK (%v)", id))
|
||||
c.Stats.GCPPubSub.Add(OK, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcppubsub", "status:ok"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcppubsub", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcppubsub"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// UploadGCS upload payload to
|
||||
|
@ -177,29 +184,37 @@ func (c *Client) UploadGCS(falcopayload types.FalcoPayload) {
|
|||
bucketWriter := c.GCSStorageClient.Bucket(c.Config.GCP.Storage.Bucket).Object(key).NewWriter(context.Background())
|
||||
n, err := bucketWriter.Write(payload)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : GCPStorage - %v - %v\n", "Error while Uploading message", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+"Storage", fmt.Sprintf("Error while Uploading message: %v", err))
|
||||
c.Stats.GCPStorage.Add(Error, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
log.Printf("[ERROR] : GCPStorage - %v\n", "Empty payload uploaded")
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+"Storage", "Empty payload uploaded")
|
||||
c.Stats.GCPStorage.Add(Error, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
return
|
||||
}
|
||||
if err := bucketWriter.Close(); err != nil {
|
||||
log.Printf("[ERROR] : GCPStorage - %v - %v\n", "Error while closing the writer", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+"Storage", fmt.Sprintf("Error while closing the writer: %v", err))
|
||||
c.Stats.GCPStorage.Add(Error, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : GCPStorage - Upload to bucket OK \n")
|
||||
utils.Log(utils.InfoLvl, c.OutputType+"Storage", "Upload to bucket OK")
|
||||
c.Stats.GCPStorage.Add(OK, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:gcpstorage", "status:ok"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpstorage", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpstorage"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,8 +3,11 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -12,18 +15,18 @@ import (
|
|||
func (c *Client) CloudRunFunctionPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.GCPCloudRun.Add(Total, 1)
|
||||
|
||||
if c.Config.GCP.CloudRun.JWT != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader(AuthorizationHeaderKey, Bearer+" "+c.Config.GCP.CloudRun.JWT)
|
||||
}
|
||||
|
||||
err := c.Post(falcopayload)
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
if c.Config.GCP.CloudRun.JWT != "" {
|
||||
req.Header.Set(AuthorizationHeaderKey, Bearer+" "+c.Config.GCP.CloudRun.JWT)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:gcpcloudrun", "status:error"})
|
||||
c.Stats.GCPCloudRun.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpcloudrun", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : GCPCloudRun - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpcloudrun"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType+"CloudRun", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -31,4 +34,6 @@ func (c *Client) CloudRunFunctionPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:gcpcloudrun", "status:ok"})
|
||||
c.Stats.GCPCloudRun.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gcpcloudrun", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gcpcloudrun"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,10 +4,13 @@ package outputs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -46,7 +49,7 @@ func newGooglechatPayload(falcopayload types.FalcoPayload, config *types.Configu
|
|||
if config.Googlechat.MessageFormatTemplate != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := config.Googlechat.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
|
||||
log.Printf("[ERROR] : GoogleChat - Error expanding Google Chat message %v", err)
|
||||
utils.Log(utils.ErrorLvl, "GoogleChat", fmt.Sprintf("Error expanding Google Chat message: %v", err))
|
||||
} else {
|
||||
messageText = buf.String()
|
||||
}
|
||||
|
@ -107,11 +110,15 @@ func (c *Client) GooglechatPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:googlechat", "status:error"})
|
||||
c.Stats.GoogleChat.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "googlechat", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : GoogleChat - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "googlechat"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:googlechat", "status:ok"})
|
||||
c.Stats.GoogleChat.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "googlechat", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "googlechat"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -5,10 +5,13 @@ package outputs
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
textTemplate "text/template"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -73,7 +76,7 @@ func newGotifyPayload(falcopayload types.FalcoPayload, config *types.Configurati
|
|||
err = ttmpl.Execute(&outtext, falcopayload)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Gotify - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "Gotify", err.Error())
|
||||
return g
|
||||
}
|
||||
|
||||
|
@ -93,16 +96,14 @@ func newGotifyPayload(falcopayload types.FalcoPayload, config *types.Configurati
|
|||
func (c *Client) GotifyPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Gotify.Add(Total, 1)
|
||||
|
||||
if c.Config.Gotify.Token != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader("X-Gotify-Key", c.Config.Gotify.Token)
|
||||
}
|
||||
|
||||
err := c.Post(newGotifyPayload(falcopayload, c.Config))
|
||||
err := c.Post(newGotifyPayload(falcopayload, c.Config), func(req *http.Request) {
|
||||
if c.Config.Gotify.Token != "" {
|
||||
req.Header.Set("X-Gotify-Key", c.Config.Gotify.Token)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
c.setGotifyErrorMetrics()
|
||||
log.Printf("[ERROR] : Gotify - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "Gotify", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -110,6 +111,7 @@ func (c *Client) GotifyPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:gotify", "status:ok"})
|
||||
c.Stats.Gotify.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gotify", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gotify"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// setGotifyErrorMetrics set the error stats
|
||||
|
@ -117,4 +119,6 @@ func (c *Client) setGotifyErrorMetrics() {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:gotify", "status:error"})
|
||||
c.Stats.Gotify.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "gotify", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "gotify"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,8 +4,11 @@ package outputs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -78,47 +81,54 @@ func newGrafanaOnCallPayload(falcopayload types.FalcoPayload) grafanaOnCallPaylo
|
|||
func (c *Client) GrafanaPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Grafana.Add(Total, 1)
|
||||
c.ContentType = GrafanaContentType
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader("Authorization", Bearer+" "+c.Config.Grafana.APIKey)
|
||||
for i, j := range c.Config.Grafana.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
}
|
||||
|
||||
err := c.Post(newGrafanaPayload(falcopayload, c.Config))
|
||||
err := c.Post(newGrafanaPayload(falcopayload, c.Config), func(req *http.Request) {
|
||||
req.Header.Set("Authorization", Bearer+" "+c.Config.Grafana.APIKey)
|
||||
for i, j := range c.Config.Grafana.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:grafana", "status:error"})
|
||||
c.Stats.Grafana.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "grafana", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Grafana - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafana"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:grafana", "status:ok"})
|
||||
c.Stats.Grafana.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "grafana", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafana"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// GrafanaOnCallPost posts event to grafana onCall
|
||||
func (c *Client) GrafanaOnCallPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.GrafanaOnCall.Add(Total, 1)
|
||||
c.ContentType = GrafanaContentType
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
for i, j := range c.Config.GrafanaOnCall.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
}
|
||||
|
||||
err := c.Post(newGrafanaOnCallPayload(falcopayload))
|
||||
err := c.Post(newGrafanaOnCallPayload(falcopayload), func(req *http.Request) {
|
||||
for i, j := range c.Config.GrafanaOnCall.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:grafanaoncall", "status:error"})
|
||||
c.Stats.Grafana.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "grafanaoncall", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Grafana OnCall - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafanaoncall"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:grafanaoncall", "status:ok"})
|
||||
c.Stats.Grafana.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "grafanaoncall", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "grafanaoncall"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,9 +3,12 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -40,20 +43,20 @@ func newInfluxdbPayload(falcopayload types.FalcoPayload) influxdbPayload {
|
|||
func (c *Client) InfluxdbPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Influxdb.Add(Total, 1)
|
||||
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader("Accept", "application/json")
|
||||
err := c.Post(newInfluxdbPayload(falcopayload), func(req *http.Request) {
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
if c.Config.Influxdb.Token != "" {
|
||||
c.AddHeader("Authorization", "Token "+c.Config.Influxdb.Token)
|
||||
}
|
||||
|
||||
err := c.Post(newInfluxdbPayload(falcopayload))
|
||||
if c.Config.Influxdb.Token != "" {
|
||||
req.Header.Set("Authorization", "Token "+c.Config.Influxdb.Token)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:influxdb", "status:error"})
|
||||
c.Stats.Influxdb.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "influxdb", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : InfluxDB - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "influxdb"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -61,4 +64,6 @@ func (c *Client) InfluxdbPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:influxdb", "status:ok"})
|
||||
c.Stats.Influxdb.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "influxdb", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "influxdb"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -17,12 +16,16 @@ import (
|
|||
"github.com/segmentio/kafka-go"
|
||||
"github.com/segmentio/kafka-go/sasl/plain"
|
||||
"github.com/segmentio/kafka-go/sasl/scram"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewKafkaClient returns a new output.Client for accessing the Apache Kafka.
|
||||
func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
|
||||
transport := &kafka.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
|
@ -36,7 +39,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
caCertPool, err := x509.SystemCertPool()
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Kafka - failed to initialize root CAs: %v", err)
|
||||
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("failed to initialize root CAs: %v", err))
|
||||
}
|
||||
|
||||
transport.TLS = &tls.Config{
|
||||
|
@ -73,7 +76,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Kafka - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "Kafka", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -99,7 +102,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
case "round_robin":
|
||||
kafkaWriter.Balancer = &kafka.RoundRobin{}
|
||||
default:
|
||||
log.Printf("[ERROR] : Kafka - unsupported balancer %q\n", config.Kafka.Balancer)
|
||||
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("unsupported balancer %q", config.Kafka.Balancer))
|
||||
return nil, fmt.Errorf("unsupported balancer %q", config.Kafka.Balancer)
|
||||
}
|
||||
|
||||
|
@ -115,7 +118,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
case "NONE":
|
||||
// leave as default, none
|
||||
default:
|
||||
log.Printf("[ERROR] : Kafka - unsupported compression %q\n", config.Kafka.Compression)
|
||||
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("unsupported compression %q", config.Kafka.Compression))
|
||||
return nil, fmt.Errorf("unsupported compression %q", config.Kafka.Compression)
|
||||
}
|
||||
|
||||
|
@ -127,7 +130,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
case "NONE":
|
||||
kafkaWriter.RequiredAcks = kafka.RequireNone
|
||||
default:
|
||||
log.Printf("[ERROR] : Kafka - unsupported required ACKs %q\n", config.Kafka.RequiredACKs)
|
||||
utils.Log(utils.ErrorLvl, "Kafka", fmt.Sprintf("unsupported required ACKs %q", config.Kafka.RequiredACKs))
|
||||
return nil, fmt.Errorf("unsupported required ACKs %q", config.Kafka.RequiredACKs)
|
||||
}
|
||||
|
||||
|
@ -136,6 +139,7 @@ func NewKafkaClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
KafkaProducer: kafkaWriter,
|
||||
|
@ -151,7 +155,7 @@ func (c *Client) KafkaProduce(falcopayload types.FalcoPayload) {
|
|||
falcoMsg, err := json.Marshal(falcopayload)
|
||||
if err != nil {
|
||||
c.incrKafkaErrorMetrics(1)
|
||||
log.Printf("[ERROR] : Kafka - %v - %v\n", "failed to marshalling message", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to marshalling message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -163,11 +167,11 @@ func (c *Client) KafkaProduce(falcopayload types.FalcoPayload) {
|
|||
err = c.KafkaProducer.WriteMessages(context.Background(), kafkaMsg)
|
||||
if err != nil {
|
||||
c.incrKafkaErrorMetrics(1)
|
||||
log.Printf("[ERROR] : Kafka - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
} else {
|
||||
c.incrKafkaSuccessMetrics(1)
|
||||
log.Printf("[INFO] : Kafka - Publish OK\n")
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Publish OK")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,10 +179,10 @@ func (c *Client) KafkaProduce(falcopayload types.FalcoPayload) {
|
|||
func (c *Client) handleKafkaCompletion(messages []kafka.Message, err error) {
|
||||
if err != nil {
|
||||
c.incrKafkaErrorMetrics(len(messages))
|
||||
log.Printf("[ERROR] : Kafka (%d) - %v\n", len(messages), err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("(%d) %v", len(messages), err))
|
||||
} else {
|
||||
c.incrKafkaSuccessMetrics(len(messages))
|
||||
log.Printf("[INFO] : Kafka (%d) - Publish OK\n", len(messages))
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("(%d) - Publish OK", len(messages)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,6 +191,8 @@ func (c *Client) incrKafkaSuccessMetrics(add int) {
|
|||
go c.CountMetric("outputs", int64(add), []string{"output:kafka", "status:ok"})
|
||||
c.Stats.Kafka.Add(OK, int64(add))
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kafka", "status": OK}).Add(float64(add))
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafka"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// incrKafkaErrorMetrics increments the error stats
|
||||
|
@ -194,4 +200,6 @@ func (c *Client) incrKafkaErrorMetrics(add int) {
|
|||
go c.CountMetric(Outputs, int64(add), []string{"output:kafka", "status:error"})
|
||||
c.Stats.Kafka.Add(Error, int64(add))
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kafka", "status": Error}).Add(float64(add))
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafka"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
|
|
|
@ -6,8 +6,10 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -38,7 +40,9 @@ func (c *Client) KafkaRestPost(falcopayload types.FalcoPayload) {
|
|||
if err != nil {
|
||||
c.Stats.KafkaRest.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kafkarest", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Kafka Rest - %v - %v\n", "failed to marshalling message", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafkarest"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to marshalling message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -55,7 +59,9 @@ func (c *Client) KafkaRestPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:kafkarest", "status:error"})
|
||||
c.Stats.KafkaRest.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kafkarest", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Kafka Rest - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafkarest"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -63,4 +69,6 @@ func (c *Client) KafkaRestPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:kafkarest", "status:ok"})
|
||||
c.Stats.KafkaRest.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kafkarest", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kafkarest"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -6,14 +6,17 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -26,7 +29,8 @@ const KubelessEventTypeValue = "falco"
|
|||
const KubelessContentType = "application/json"
|
||||
|
||||
// NewKubelessClient returns a new output.Client for accessing Kubernetes.
|
||||
func NewKubelessClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewKubelessClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
if config.Kubeless.Kubeconfig != "" {
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", config.Kubeless.Kubeconfig)
|
||||
if err != nil {
|
||||
|
@ -41,9 +45,11 @@ func NewKubelessClient(config *types.Configuration, stats *types.Statistics, pro
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
KubernetesClient: clientset,
|
||||
cfg: config.Kubeless.CommonConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -53,10 +59,11 @@ func NewKubelessClient(config *types.Configuration, stats *types.Statistics, pro
|
|||
Stats: stats,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
}
|
||||
|
||||
return NewClient("Kubeless", endpointUrl, config.Kubeless.MutualTLS, config.Kubeless.CheckCert, *initClientArgs)
|
||||
return NewClient("Kubeless", endpointUrl, config.Kubeless.CommonConfig, *initClientArgs)
|
||||
}
|
||||
|
||||
// KubelessCall .
|
||||
|
@ -78,29 +85,34 @@ func (c *Client) KubelessCall(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:kubeless", "status:error"})
|
||||
c.Stats.Kubeless.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kubeless", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Kubeless - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kubeless"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
log.Printf("[INFO] : Kubeless - Function Response : %v\n", string(rawbody))
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Function Response : %v", string(rawbody)))
|
||||
} else {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader(KubelessEventIDKey, uuid.New().String())
|
||||
c.AddHeader(KubelessEventTypeKey, KubelessEventTypeValue)
|
||||
c.AddHeader(KubelessEventNamespaceKey, c.Config.Kubeless.Namespace)
|
||||
c.ContentType = KubelessContentType
|
||||
|
||||
err := c.Post(falcopayload)
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
req.Header.Set(KubelessEventIDKey, uuid.New().String())
|
||||
req.Header.Set(KubelessEventTypeKey, KubelessEventTypeValue)
|
||||
req.Header.Set(KubelessEventNamespaceKey, c.Config.Kubeless.Namespace)
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:kubeless", "status:error"})
|
||||
c.Stats.Kubeless.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kubeless", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Kubeless - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kubeless"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Printf("[INFO] : Kubeless - Call Function \"%v\" OK\n", c.Config.Kubeless.Function)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Call Function \"%v\" OK", c.Config.Kubeless.Function))
|
||||
go c.CountMetric(Outputs, 1, []string{"output:kubeless", "status:ok"})
|
||||
c.Stats.Kubeless.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "kubeless", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "kubeless"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
// SPDX-License-Identifier: MIT OR Apache-2.0
|
||||
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
"github.com/telkomdev/go-stash"
|
||||
)
|
||||
|
||||
/*
|
||||
Logstash throws a jsonparse error if keys contain an index, e.g., "key[0]".
|
||||
This function is meant to get rid of the index brackets format in favor of dots.
|
||||
For the previous example, the "key[0]" value will be replaced by "key.0".
|
||||
*/
|
||||
func replaceKeysWithIndexes(data map[string]interface{}) map[string]interface{} {
|
||||
newData := make(map[string]interface{})
|
||||
re := regexp.MustCompile(`\[(\d+)\]`)
|
||||
|
||||
for key, value := range data {
|
||||
newKey := re.ReplaceAllStringFunc(key, func(match string) string {
|
||||
return "." + re.FindStringSubmatch(match)[1]
|
||||
})
|
||||
|
||||
// Recursively process nested maps
|
||||
if nestedMap, ok := value.(map[string]interface{}); ok {
|
||||
newData[newKey] = replaceKeysWithIndexes(nestedMap)
|
||||
} else {
|
||||
newData[newKey] = value
|
||||
}
|
||||
}
|
||||
return newData
|
||||
}
|
||||
|
||||
func firstValid(paths []string) string {
|
||||
for _, path := range paths {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func NewLogstashClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
var tlsCfg *tls.Config
|
||||
|
||||
if mTLS := config.Logstash.MutualTLS; mTLS {
|
||||
// Get certificates
|
||||
var MutualTLSClientCertPath, MutualTLSClientKeyPath, MutualTLSClientCaCertPath string
|
||||
|
||||
MutualTLSClientCertPath = firstValid([]string{config.Logstash.CertFile, config.MutualTLSClient.CertFile, config.MutualTLSFilesPath + "/client.crt"})
|
||||
MutualTLSClientKeyPath = firstValid([]string{config.Logstash.KeyFile, config.MutualTLSClient.KeyFile, config.MutualTLSFilesPath + "/client.key"})
|
||||
MutualTLSClientCaCertPath = firstValid([]string{config.Logstash.CaCertFile, config.MutualTLSClient.CaCertFile, config.MutualTLSFilesPath + "/ca.crt"})
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(MutualTLSClientCertPath, MutualTLSClientKeyPath)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to load logstash SSL certificate: %w", err)
|
||||
utils.Log(utils.ErrorLvl, "Logstash", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caCert, err := os.ReadFile(MutualTLSClientCaCertPath)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to load logstash SSL CA certificate: %w", err)
|
||||
utils.Log(utils.ErrorLvl, "Logstash", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Configure TLS
|
||||
|
||||
pool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
pool = x509.NewCertPool()
|
||||
}
|
||||
|
||||
tlsCfg = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: pool,
|
||||
}
|
||||
tlsCfg.RootCAs.AppendCertsFromPEM(caCert)
|
||||
|
||||
} else {
|
||||
// The check cert flag and mutual tls are mutually exclusive
|
||||
if !config.Logstash.CheckCert {
|
||||
tlsCfg = &tls.Config{
|
||||
InsecureSkipVerify: true, // #nosec G402 This is only set as a result of explicit configuration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lClient, err := stash.Connect(config.Logstash.Address, config.Logstash.Port, stash.SetTLSConfig(tlsCfg), stash.SetTLS(config.Logstash.TLS || config.Logstash.MutualTLS))
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("misconfiguration, cannot connect to the logstash server: %w", err)
|
||||
utils.Log(utils.ErrorLvl, "Logstash", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
utils.Log(utils.InfoLvl, "Logstash", "connected to logstash server")
|
||||
|
||||
return &Client{
|
||||
OutputType: "Logstash",
|
||||
Config: config,
|
||||
LogstashClient: lClient,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) LogstashPost(falcopayload types.FalcoPayload) {
|
||||
status := OK
|
||||
loglevel := utils.InfoLvl
|
||||
c.Stats.Logstash.Add(Total, 1)
|
||||
|
||||
falcopayload.OutputFields = replaceKeysWithIndexes(falcopayload.OutputFields)
|
||||
|
||||
falcopayload.Tags = append(falcopayload.Tags, c.Config.Logstash.Tags...)
|
||||
logstashPayload, err := json.Marshal(falcopayload)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("failed to marshal falcopayload: %v", err))
|
||||
|
||||
c.Stats.Logstash.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "logstash", "status": Error}).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
n, err := c.LogstashClient.Write(logstashPayload)
|
||||
if err != nil {
|
||||
status = Error
|
||||
loglevel = utils.ErrorLvl
|
||||
}
|
||||
|
||||
c.Stats.Logstash.Add(status, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "logstash", "status": status}).Inc()
|
||||
go c.CountMetric(Outputs, 1, []string{"output:logstash", fmt.Sprintf("status:%v", status)})
|
||||
|
||||
utils.Log(loglevel, c.OutputType, fmt.Sprintf("output.logstash status=%v (%v)", status, n))
|
||||
}
|
|
@ -4,13 +4,18 @@ package outputs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
const LokiOut string = "Loki"
|
||||
|
||||
type lokiPayload struct {
|
||||
Streams []lokiStream `json:"streams"`
|
||||
}
|
||||
|
@ -26,22 +31,34 @@ type lokiValue = []string
|
|||
const LokiContentType = "application/json"
|
||||
|
||||
func newLokiPayload(falcopayload types.FalcoPayload, config *types.Configuration) lokiPayload {
|
||||
s := make(map[string]string, 3+len(falcopayload.OutputFields)+len(config.Loki.ExtraLabelsList)+len(falcopayload.Tags))
|
||||
s := make(map[string]string)
|
||||
s["rule"] = falcopayload.Rule
|
||||
s["source"] = falcopayload.Source
|
||||
s["priority"] = falcopayload.Priority.String()
|
||||
|
||||
if k8sNs, ok := falcopayload.OutputFields["k8s.ns.name"].(string); ok {
|
||||
s["k8s_ns_name"] = k8sNs
|
||||
}
|
||||
if k8sPod, ok := falcopayload.OutputFields["k8s.pod.name"].(string); ok {
|
||||
s["k8s_pod_name"] = k8sPod
|
||||
}
|
||||
|
||||
for i, j := range falcopayload.OutputFields {
|
||||
switch v := j.(type) {
|
||||
case string:
|
||||
for k := range config.Customfields {
|
||||
if i == k {
|
||||
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", ""), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
|
||||
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", "_"), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
|
||||
}
|
||||
}
|
||||
for k := range config.Templatedfields {
|
||||
if i == k {
|
||||
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", "_"), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
|
||||
}
|
||||
}
|
||||
for _, k := range config.Loki.ExtraLabelsList {
|
||||
if i == k {
|
||||
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", ""), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
|
||||
s[strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(i, ".", "_"), "]", ""), "[", "")] = strings.ReplaceAll(v, "\"", "")
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
@ -58,35 +75,36 @@ func newLokiPayload(falcopayload types.FalcoPayload, config *types.Configuration
|
|||
s["tags"] = strings.Join(falcopayload.Tags, ",")
|
||||
}
|
||||
|
||||
var v string
|
||||
if config.Loki.Format == "json" {
|
||||
v = falcopayload.String()
|
||||
} else {
|
||||
v = falcopayload.Output
|
||||
}
|
||||
|
||||
return lokiPayload{Streams: []lokiStream{
|
||||
{
|
||||
Stream: s,
|
||||
Values: []lokiValue{[]string{fmt.Sprintf("%v", falcopayload.Time.UnixNano()), falcopayload.Output}},
|
||||
Values: []lokiValue{[]string{fmt.Sprintf("%v", falcopayload.Time.UnixNano()), v}},
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
func (c *Client) configureTenant() {
|
||||
if c.Config.Loki.Tenant != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader("X-Scope-OrgID", c.Config.Loki.Tenant)
|
||||
func lokiConfigureTenant(cfg *types.Configuration, req *http.Request) {
|
||||
if cfg.Loki.Tenant != "" {
|
||||
req.Header.Set("X-Scope-OrgID", cfg.Loki.Tenant)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) configureAuth() {
|
||||
if c.Config.Loki.User != "" && c.Config.Loki.APIKey != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.BasicAuth(c.Config.Loki.User, c.Config.Loki.APIKey)
|
||||
func lokiConfigureAuth(cfg *types.Configuration, req *http.Request) {
|
||||
if cfg.Loki.User != "" && cfg.Loki.APIKey != "" {
|
||||
req.SetBasicAuth(cfg.Loki.User, cfg.Loki.APIKey)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) configureCustomHeaders() {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
for i, j := range c.Config.Loki.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
func lokiConfigureCustomHeaders(cfg *types.Configuration, req *http.Request) {
|
||||
for i, j := range cfg.Loki.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,20 +113,25 @@ func (c *Client) LokiPost(falcopayload types.FalcoPayload) {
|
|||
c.Stats.Loki.Add(Total, 1)
|
||||
c.ContentType = LokiContentType
|
||||
|
||||
c.configureTenant()
|
||||
c.configureAuth()
|
||||
c.configureCustomHeaders()
|
||||
err := c.Post(newLokiPayload(falcopayload, c.Config), func(req *http.Request) {
|
||||
lokiConfigureTenant(c.Config, req)
|
||||
lokiConfigureAuth(c.Config, req)
|
||||
lokiConfigureCustomHeaders(c.Config, req)
|
||||
})
|
||||
|
||||
err := c.Post(newLokiPayload(falcopayload, c.Config))
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:loki", "status:error"})
|
||||
c.Stats.Loki.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "loki", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Loki - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "loki"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, LokiOut, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:loki", "status:ok"})
|
||||
c.Stats.Loki.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "loki", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "loki"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,10 +4,13 @@ package outputs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -78,7 +81,7 @@ func newMattermostPayload(falcopayload types.FalcoPayload, config *types.Configu
|
|||
if config.Mattermost.MessageFormatTemplate != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := config.Mattermost.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
|
||||
log.Printf("[ERROR] : Mattermost - Error expanding Mattermost message %v", err)
|
||||
utils.Log(utils.ErrorLvl, "Mattermost", fmt.Sprintf("Error expanding Mattermost message %v", err))
|
||||
} else {
|
||||
messageText = buf.String()
|
||||
}
|
||||
|
@ -131,7 +134,9 @@ func (c *Client) MattermostPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:mattermost", "status:error"})
|
||||
c.Stats.Mattermost.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "mattermost", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Mattermost - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mattermost"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -139,4 +144,6 @@ func (c *Client) MattermostPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:mattermost", "status:ok"})
|
||||
c.Stats.Mattermost.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "mattermost", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mattermost"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,18 +4,21 @@ package outputs
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"log"
|
||||
"fmt"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewMQTTClient returns a new output.Client for accessing Kubernetes.
|
||||
func NewMQTTClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
|
||||
options := mqtt.NewClientOptions()
|
||||
options.AddBroker(config.MQTT.Broker)
|
||||
|
@ -30,7 +33,7 @@ func NewMQTTClient(config *types.Configuration, stats *types.Statistics, promSta
|
|||
}
|
||||
}
|
||||
options.OnConnectionLost = func(client mqtt.Client, err error) {
|
||||
log.Printf("[ERROR] : MQTT - Connection lost: %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "MQTT", fmt.Sprintf("Connection lost: %v", err))
|
||||
}
|
||||
|
||||
client := mqtt.NewClient(options)
|
||||
|
@ -41,6 +44,7 @@ func NewMQTTClient(config *types.Configuration, stats *types.Statistics, promSta
|
|||
MQTTClient: client,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -56,7 +60,9 @@ func (c *Client) MQTTPublish(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:mqtt", "status:error"})
|
||||
c.Stats.MQTT.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "mqtt", "status": err.Error()}).Inc()
|
||||
log.Printf("[ERROR] : %s - %v\n", MQTT, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mqtt"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
defer c.MQTTClient.Disconnect(100)
|
||||
|
@ -64,12 +70,15 @@ func (c *Client) MQTTPublish(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:mqtt", "status:error"})
|
||||
c.Stats.MQTT.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "mqtt", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %s - %v\n", MQTT, err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mqtt"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : %s - Message published\n", MQTT)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Message published")
|
||||
go c.CountMetric(Outputs, 1, []string{"output:mqtt", "status:ok"})
|
||||
c.Stats.MQTT.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "mqtt", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "mqtt"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,8 +3,11 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -12,24 +15,22 @@ import (
|
|||
func (c *Client) N8NPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.N8N.Add(Total, 1)
|
||||
|
||||
if c.Config.N8N.User != "" && c.Config.N8N.Password != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.BasicAuth(c.Config.N8N.User, c.Config.N8N.Password)
|
||||
}
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
if c.Config.N8N.User != "" && c.Config.N8N.Password != "" {
|
||||
req.SetBasicAuth(c.Config.N8N.User, c.Config.N8N.Password)
|
||||
}
|
||||
|
||||
if c.Config.N8N.HeaderAuthName != "" && c.Config.N8N.HeaderAuthValue != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader(c.Config.N8N.HeaderAuthName, c.Config.N8N.HeaderAuthValue)
|
||||
}
|
||||
|
||||
err := c.Post(falcopayload)
|
||||
if c.Config.N8N.HeaderAuthName != "" && c.Config.N8N.HeaderAuthValue != "" {
|
||||
req.Header.Set(c.Config.N8N.HeaderAuthName, c.Config.N8N.HeaderAuthValue)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:n8n", "status:error"})
|
||||
c.Stats.N8N.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "n8n", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : N8N - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "n8n"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -37,4 +38,5 @@ func (c *Client) N8NPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:n8n", "status:ok"})
|
||||
c.Stats.N8N.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "n8n", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "n8n"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,49 +4,60 @@ package outputs
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
nats "github.com/nats-io/nats.go"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
var slugRegularExpression = regexp.MustCompile("[^a-z0-9]+")
|
||||
var slugRegExp = regexp.MustCompile("[^a-z0-9]+")
|
||||
|
||||
const defaultNatsSubjects = "falco.<priority>.<rule>"
|
||||
|
||||
// NatsPublish publishes event to NATS
|
||||
func (c *Client) NatsPublish(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Nats.Add(Total, 1)
|
||||
|
||||
subject := c.Config.Nats.SubjectTemplate
|
||||
if len(subject) == 0 {
|
||||
subject = defaultNatsSubjects
|
||||
}
|
||||
|
||||
subject = strings.ReplaceAll(subject, "<priority>", strings.ToLower(falcopayload.Priority.String()))
|
||||
subject = strings.ReplaceAll(subject, "<rule>", strings.Trim(slugRegExp.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_"))
|
||||
|
||||
nc, err := nats.Connect(c.EndpointURL.String())
|
||||
if err != nil {
|
||||
c.setNatsErrorMetrics()
|
||||
log.Printf("[ERROR] : NATS - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
defer nc.Flush()
|
||||
defer nc.Close()
|
||||
|
||||
r := strings.Trim(slugRegularExpression.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_")
|
||||
j, err := json.Marshal(falcopayload)
|
||||
if err != nil {
|
||||
c.setStanErrorMetrics()
|
||||
log.Printf("[ERROR] : STAN - %v\n", err.Error())
|
||||
c.setNatsErrorMetrics()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = nc.Publish("falco."+strings.ToLower(falcopayload.Priority.String())+"."+r, j)
|
||||
err = nc.Publish(subject, j)
|
||||
if err != nil {
|
||||
c.setNatsErrorMetrics()
|
||||
log.Printf("[ERROR] : NATS - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric("outputs", 1, []string{"output:nats", "status:ok"})
|
||||
c.Stats.Nats.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "nats", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : NATS - Publish OK\n")
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nats"), attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Publish OK")
|
||||
}
|
||||
|
||||
// setNatsErrorMetrics set the error stats
|
||||
|
@ -54,4 +65,7 @@ func (c *Client) setNatsErrorMetrics() {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:nats", "status:error"})
|
||||
c.Stats.Nats.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "nats", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nats"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
|
||||
}
|
||||
|
|
|
@ -3,9 +3,11 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -13,24 +15,22 @@ import (
|
|||
func (c *Client) NodeRedPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.NodeRed.Add(Total, 1)
|
||||
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
if c.Config.NodeRed.User != "" && c.Config.NodeRed.Password != "" {
|
||||
c.AddHeader("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(c.Config.NodeRed.User+":"+c.Config.NodeRed.Password)))
|
||||
}
|
||||
|
||||
if len(c.Config.NodeRed.CustomHeaders) != 0 {
|
||||
for i, j := range c.Config.NodeRed.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
if c.Config.NodeRed.User != "" && c.Config.NodeRed.Password != "" {
|
||||
req.SetBasicAuth(c.Config.NodeRed.User, c.Config.NodeRed.Password)
|
||||
}
|
||||
}
|
||||
|
||||
err := c.Post(falcopayload)
|
||||
for i, j := range c.Config.NodeRed.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:nodered", "status:error"})
|
||||
c.Stats.NodeRed.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "nodered", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : NodeRed - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nodered"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -38,4 +38,6 @@ func (c *Client) NodeRedPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:nodered", "status:ok"})
|
||||
c.Stats.NodeRed.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "nodered", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "nodered"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -6,19 +6,22 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewOpenfaasClient returns a new output.Client for accessing Kubernetes.
|
||||
func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
if config.Openfaas.Kubeconfig != "" {
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", config.Openfaas.Kubeconfig)
|
||||
if err != nil {
|
||||
|
@ -33,6 +36,7 @@ func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, pro
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
KubernetesClient: clientset,
|
||||
|
@ -45,10 +49,11 @@ func NewOpenfaasClient(config *types.Configuration, stats *types.Statistics, pro
|
|||
Stats: stats,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
}
|
||||
|
||||
return NewClient(Openfaas, endpointUrl, config.Openfaas.MutualTLS, config.Openfaas.CheckCert, *initClientArgs)
|
||||
return NewClient(Openfaas, endpointUrl, config.Openfaas.CommonConfig, *initClientArgs)
|
||||
}
|
||||
|
||||
// OpenfaasCall .
|
||||
|
@ -68,22 +73,28 @@ func (c *Client) OpenfaasCall(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:openfaas", "status:error"})
|
||||
c.Stats.Openfaas.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "openfaas", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v - %v\n", Openfaas, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openfaas"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
log.Printf("[INFO] : %v - Function Response : %v\n", Openfaas, string(rawbody))
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Function Response : %v", string(rawbody)))
|
||||
} else {
|
||||
err := c.Post(falcopayload)
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:openfaas", "status:error"})
|
||||
c.Stats.Openfaas.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "openfaas", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : %v - %v\n", Openfaas, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openfaas"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Printf("[INFO] : %v - Call Function \"%v\" OK\n", Openfaas, c.Config.Openfaas.FunctionName+"."+c.Config.Openfaas.FunctionNamespace)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Call Function \"%v\" OK", c.Config.Openfaas.FunctionName+"."+c.Config.Openfaas.FunctionNamespace))
|
||||
go c.CountMetric(Outputs, 1, []string{"output:openfaas", "status:ok"})
|
||||
c.Stats.Openfaas.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "openfaas", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openfaas"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,8 +3,11 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -12,19 +15,18 @@ import (
|
|||
func (c *Client) OpenObservePost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.OpenObserve.Add(Total, 1)
|
||||
|
||||
if c.Config.OpenObserve.Username != "" && c.Config.OpenObserve.Password != "" {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.BasicAuth(c.Config.OpenObserve.Username, c.Config.OpenObserve.Password)
|
||||
}
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
if c.Config.OpenObserve.Username != "" && c.Config.OpenObserve.Password != "" {
|
||||
req.SetBasicAuth(c.Config.OpenObserve.Username, c.Config.OpenObserve.Password)
|
||||
}
|
||||
|
||||
for i, j := range c.Config.OpenObserve.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
}
|
||||
|
||||
if err := c.Post(falcopayload); err != nil {
|
||||
for i, j := range c.Config.OpenObserve.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
c.setOpenObserveErrorMetrics()
|
||||
log.Printf("[ERROR] : OpenObserve - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -32,6 +34,8 @@ func (c *Client) OpenObservePost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:openobserve", "status:ok"})
|
||||
c.Stats.OpenObserve.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "openobserve", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openobserve"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// setOpenObserveErrorMetrics set the error stats
|
||||
|
@ -39,4 +43,6 @@ func (c *Client) setOpenObserveErrorMetrics() {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:openobserve", "status:error"})
|
||||
c.Stats.OpenObserve.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "openobserve", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "openobserve"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,9 +3,12 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -64,16 +67,17 @@ func newOpsgeniePayload(falcopayload types.FalcoPayload) opsgeniePayload {
|
|||
// OpsgeniePost posts event to OpsGenie
|
||||
func (c *Client) OpsgeniePost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Opsgenie.Add(Total, 1)
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader(AuthorizationHeaderKey, "GenieKey "+c.Config.Opsgenie.APIKey)
|
||||
|
||||
err := c.Post(newOpsgeniePayload(falcopayload))
|
||||
err := c.Post(newOpsgeniePayload(falcopayload), func(req *http.Request) {
|
||||
req.Header.Set(AuthorizationHeaderKey, "GenieKey "+c.Config.Opsgenie.APIKey)
|
||||
})
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:opsgenie", "status:error"})
|
||||
c.Stats.Opsgenie.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "opsgenie", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : OpsGenie - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "opsgenie"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -81,4 +85,6 @@ func (c *Client) OpsgeniePost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:opsgenie", "status:ok"})
|
||||
c.Stats.Opsgenie.Add("ok", 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "opsgenie", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "opsgenie"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"go.opentelemetry.io/contrib/bridges/otelslog"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
|
||||
sdklog "go.opentelemetry.io/otel/sdk/log"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func NewOtlpLogsClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
initClientArgs := &types.InitClientArgs{
|
||||
Config: config,
|
||||
Stats: stats,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
}
|
||||
otlpClient, err := NewClient("OTLP Logs", config.OTLP.Logs.Endpoint, types.CommonConfig{}, *initClientArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
loggerProvider, err := OTLPLogsInit(ctx, config)
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, "OTLP Logs", fmt.Sprintf("Error Logger creation: %v", err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
|
||||
utils.Log(utils.ErrorLvl, "OTLP", err.Error())
|
||||
}))
|
||||
|
||||
utils.Log(utils.InfoLvl, "OTLP Logs", "Client created")
|
||||
otlpClient.ShutDownFunc = func() {
|
||||
if err := loggerProvider.Shutdown(ctx); err != nil {
|
||||
utils.Log(utils.ErrorLvl, "OTLP Logs", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
otlpClient.OTLPLogsLogger = otelslog.NewLogger("falco", otelslog.WithLoggerProvider(loggerProvider))
|
||||
|
||||
return otlpClient, nil
|
||||
}
|
||||
|
||||
func OTLPLogsInit(ctx context.Context, config *types.Configuration) (*sdklog.LoggerProvider, error) {
|
||||
// As config.OTLP.Logs fields may have been set by our own config (e.g. YAML),
|
||||
// we need to set SDK environment variables accordingly.
|
||||
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_ENDPOINT", strings.TrimSpace(config.OTLP.Logs.Endpoint))
|
||||
if config.OTLP.Logs.Protocol != "" {
|
||||
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_PROTOCOL", strings.TrimSpace(config.OTLP.Logs.Protocol))
|
||||
}
|
||||
if config.OTLP.Logs.Headers != "" {
|
||||
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_HEADERS", strings.TrimSpace(config.OTLP.Logs.Headers))
|
||||
}
|
||||
if config.OTLP.Logs.Timeout != 0 {
|
||||
os.Setenv("OTEL_EXPORTER_OTLP_LOGS_TIMEOUT", fmt.Sprintf("%d", config.OTLP.Logs.Timeout))
|
||||
}
|
||||
if len(config.OTLP.Logs.ExtraEnvVars) != 0 {
|
||||
for i, j := range config.OTLP.Logs.ExtraEnvVars {
|
||||
os.Setenv(i, j)
|
||||
}
|
||||
}
|
||||
|
||||
var exporter sdklog.Exporter
|
||||
var err error
|
||||
switch config.OTLP.Logs.Protocol {
|
||||
case GRPC:
|
||||
opts := []otlploggrpc.Option{}
|
||||
if !config.OTLP.Traces.CheckCert {
|
||||
opts = append(opts, otlploggrpc.WithInsecure())
|
||||
}
|
||||
exporter, err = otlploggrpc.New(ctx, opts...)
|
||||
default:
|
||||
opts := []otlploghttp.Option{}
|
||||
if !config.OTLP.Traces.CheckCert {
|
||||
opts = append(opts, otlploghttp.WithInsecure())
|
||||
}
|
||||
exporter, err = otlploghttp.New(ctx, opts...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Logs exporter: %v", err)
|
||||
}
|
||||
|
||||
loggerProvider := sdklog.NewLoggerProvider(
|
||||
sdklog.WithProcessor(
|
||||
sdklog.NewBatchProcessor(exporter),
|
||||
),
|
||||
sdklog.WithResource(newResource()),
|
||||
)
|
||||
|
||||
return loggerProvider, nil
|
||||
}
|
||||
|
||||
func (c *Client) OTLPLogsPost(falcopayload types.FalcoPayload) {
|
||||
c.OTLPLogsLogger.Info(
|
||||
falcopayload.Output,
|
||||
"priority", falcopayload.Priority.String(),
|
||||
"source", falcopayload.Source,
|
||||
"rule", falcopayload.Rule,
|
||||
"hostname", falcopayload.Hostname,
|
||||
"tags", strings.Join(falcopayload.Tags, ","),
|
||||
slog.String("timestamp", falcopayload.Time.String()),
|
||||
)
|
||||
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Sending log")
|
||||
}
|
|
@ -0,0 +1,244 @@
|
|||
package otlp_metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.23.1"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
meterName = "falcosecurity.falco.otlpmetrics.meter"
|
||||
serviceName = "falco"
|
||||
serviceVersion = "0.1.0"
|
||||
)
|
||||
|
||||
// TODO: move logging logic out of this context
|
||||
|
||||
// Config represents config parameters for OTLP Metrics
|
||||
type Config struct {
|
||||
Endpoint string
|
||||
Protocol string
|
||||
Timeout int64
|
||||
Headers string
|
||||
ExtraEnvVars map[string]string
|
||||
CheckCert bool
|
||||
MinimumPriority string
|
||||
ExtraAttributes string
|
||||
ExtraAttributesList []string
|
||||
}
|
||||
|
||||
// InitProvider initializes a new OTLP Metrics Provider. It returns a function to shut down it.
|
||||
func InitProvider(ctx context.Context, config *Config) (func(ctx context.Context) error, error) {
|
||||
restoreEnvironment, err := initEnvironment(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init environemt: %v", err)
|
||||
}
|
||||
defer restoreEnvironment()
|
||||
|
||||
shutdownFunc, err := initMeterProvider(ctx, config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create meter provider: %v", err)
|
||||
}
|
||||
|
||||
if config.Endpoint != "" {
|
||||
utils.Log(utils.InfoLvl, "OTLP Metrics", "Client created")
|
||||
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
|
||||
utils.Log(utils.ErrorLvl, "OTLP", err.Error())
|
||||
}))
|
||||
}
|
||||
|
||||
return shutdownFunc, nil
|
||||
}
|
||||
|
||||
// initEnvironment initializes the proper environment variables to the corresponding config values. If an environment
|
||||
// variable is already set, it's value is left uncharged. It returns a function to restore the previous environment
|
||||
// context.
|
||||
func initEnvironment(config *Config) (cleanupFunc func(), err error) {
|
||||
cleanupFuncs := make([]func(), 0, 5)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, fn := range cleanupFuncs {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var unsetEnv func()
|
||||
// As OTLPMetrics fields may have been set by our own config (e.g. YAML), We need to set SDK environment variables
|
||||
// accordingly.
|
||||
if endpoint := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"); endpoint == "" {
|
||||
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT", strings.TrimSpace(config.Endpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, unsetEnv)
|
||||
}
|
||||
|
||||
if protocol := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"); protocol == "" {
|
||||
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_PROTOCOL", strings.TrimSpace(config.Protocol))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, unsetEnv)
|
||||
}
|
||||
|
||||
if headers := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_HEADERS"); headers == "" {
|
||||
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_HEADERS", strings.TrimSpace(config.Headers))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, unsetEnv)
|
||||
}
|
||||
|
||||
if timeout := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"); timeout == "" {
|
||||
unsetEnv, err = setEnv("OTEL_EXPORTER_OTLP_METRICS_TIMEOUT", fmt.Sprintf("%d", config.Timeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, unsetEnv)
|
||||
}
|
||||
|
||||
for envVar, value := range config.ExtraEnvVars {
|
||||
if configValue := os.Getenv(envVar); configValue != "" {
|
||||
continue
|
||||
}
|
||||
unsetEnv, err = setEnv(envVar, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, unsetEnv)
|
||||
}
|
||||
|
||||
return func() {
|
||||
for _, fn := range cleanupFuncs {
|
||||
fn()
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setEnv(envVar, value string) (func(), error) {
|
||||
if err := os.Setenv(envVar, value); err != nil {
|
||||
return nil, fmt.Errorf("failed to set %v to %v: %v", envVar, value, err)
|
||||
}
|
||||
return func() {
|
||||
if err := os.Setenv(envVar, ""); err != nil {
|
||||
utils.Log(utils.ErrorLvl, "OTLP Metrics", fmt.Sprintf("Error unsetting env variable %q: %v", envVar, err))
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// initMeterProvider initializes an OTEL meter provider (and the corresponding exporter). It returns a function to shut
|
||||
// down the meter provider.
|
||||
func initMeterProvider(ctx context.Context, config *Config) (func(context.Context) error, error) {
|
||||
var exporter sdkmetric.Exporter
|
||||
var err error
|
||||
switch config.Protocol {
|
||||
case "grpc":
|
||||
opts := []otlpmetricgrpc.Option{}
|
||||
if !config.CheckCert {
|
||||
opts = append(opts, otlpmetricgrpc.WithInsecure())
|
||||
}
|
||||
exporter, err = otlpmetricgrpc.New(ctx, opts...)
|
||||
default:
|
||||
opts := []otlpmetrichttp.Option{}
|
||||
if !config.CheckCert {
|
||||
opts = append(opts, otlpmetrichttp.WithInsecure())
|
||||
}
|
||||
exporter, err = otlpmetrichttp.New(ctx, opts...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Logs exporter: %v", err)
|
||||
}
|
||||
|
||||
res, err := sdkresource.New(ctx,
|
||||
sdkresource.WithSchemaURL(semconv.SchemaURL),
|
||||
sdkresource.WithAttributes(
|
||||
semconv.ServiceName(serviceName),
|
||||
semconv.ServiceVersion(serviceVersion),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create resource: %v", err)
|
||||
}
|
||||
|
||||
meterProvider := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(sdkmetric.NewPeriodicReader(exporter)),
|
||||
sdkmetric.WithResource(res),
|
||||
)
|
||||
|
||||
otel.SetMeterProvider(meterProvider)
|
||||
return meterProvider.Shutdown, nil
|
||||
}
|
||||
|
||||
type Counter interface {
|
||||
With(attributes ...attribute.KeyValue) Counter
|
||||
Inc()
|
||||
}
|
||||
|
||||
type OTLPMetrics struct {
|
||||
Falco Counter
|
||||
Inputs Counter
|
||||
Outputs Counter
|
||||
}
|
||||
|
||||
type counterInstrument struct {
|
||||
name string
|
||||
description string
|
||||
supportedAttributes map[string]struct{}
|
||||
attributes []attribute.KeyValue
|
||||
}
|
||||
|
||||
func NewCounter(name string, description string, supportedAttributes []string) Counter {
|
||||
counter := &counterInstrument{
|
||||
name: name,
|
||||
description: description,
|
||||
supportedAttributes: make(map[string]struct{}),
|
||||
}
|
||||
for _, attr := range supportedAttributes {
|
||||
counter.supportedAttributes[attr] = struct{}{}
|
||||
}
|
||||
return counter
|
||||
}
|
||||
|
||||
func (c *counterInstrument) With(attributes ...attribute.KeyValue) Counter {
|
||||
filteredAttributes := c.filterAttributes(attributes)
|
||||
counter := &counterInstrument{
|
||||
name: c.name,
|
||||
supportedAttributes: c.supportedAttributes,
|
||||
attributes: append(c.attributes, filteredAttributes...),
|
||||
}
|
||||
return counter
|
||||
}
|
||||
|
||||
func (c *counterInstrument) filterAttributes(attributes []attribute.KeyValue) []attribute.KeyValue {
|
||||
filteredAttributes := make([]attribute.KeyValue, 0, len(c.attributes))
|
||||
for _, attr := range attributes {
|
||||
if _, ok := c.supportedAttributes[string(attr.Key)]; ok {
|
||||
filteredAttributes = append(filteredAttributes, attr)
|
||||
}
|
||||
}
|
||||
return filteredAttributes
|
||||
}
|
||||
|
||||
func (c *counterInstrument) Inc() {
|
||||
meter := otel.Meter(meterName)
|
||||
ruleCounter, err := meter.Int64Counter(c.name, metric.WithDescription(c.description))
|
||||
if err != nil {
|
||||
utils.Log(utils.ErrorLvl, "OTLP Metrics", fmt.Sprintf("Error generating metric: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
ruleCounter.Add(context.Background(), 1, metric.WithAttributes(c.attributes...))
|
||||
}
|
|
@ -8,37 +8,48 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// Unit-testing helper
|
||||
var getTracerProvider = otel.GetTracerProvider
|
||||
|
||||
func NewOtlpTracesClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewOtlpTracesClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
initClientArgs := &types.InitClientArgs{
|
||||
Config: config,
|
||||
Stats: stats,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
}
|
||||
otlpClient, err := NewClient("OTLPTraces", config.OTLP.Traces.Endpoint, false, false, *initClientArgs)
|
||||
otlpClient, err := NewClient("OTLP Traces", config.OTLP.Traces.Endpoint, types.CommonConfig{}, *initClientArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
shutDownFunc, err := otlpInit(config)
|
||||
|
||||
ctx := context.Background()
|
||||
shutDownFunc, err := OTLPTracesInit(otlpClient, config, ctx)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : OLTP Traces - Error client creation: %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "OTLP Traces", fmt.Sprintf("Error client creation: %v", err))
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("[INFO] : OTLP Traces - %+v\n", config.OTLP.Traces)
|
||||
|
||||
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
|
||||
utils.Log(utils.ErrorLvl, "OTLP", err.Error())
|
||||
}))
|
||||
|
||||
utils.Log(utils.InfoLvl, "OTLP Traces", "Client created")
|
||||
otlpClient.ShutDownFunc = shutDownFunc
|
||||
return otlpClient, nil
|
||||
}
|
||||
|
@ -67,16 +78,16 @@ func (c *Client) newTrace(falcopayload types.FalcoPayload) (*trace.Span, error)
|
|||
span.SetAttributes(attribute.String("source", falcopayload.Source))
|
||||
span.SetAttributes(attribute.String("priority", falcopayload.Priority.String()))
|
||||
span.SetAttributes(attribute.String("rule", falcopayload.Rule))
|
||||
span.SetAttributes(attribute.String("output", falcopayload.Output))
|
||||
span.SetAttributes(attribute.String("hostname", falcopayload.Hostname))
|
||||
span.SetAttributes(attribute.StringSlice("tags", falcopayload.Tags))
|
||||
for k, v := range falcopayload.OutputFields {
|
||||
span.SetAttributes(attribute.String(k, fmt.Sprintf("%v", v)))
|
||||
}
|
||||
span.AddEvent(falcopayload.Output, trace.EventOption(trace.WithTimestamp(falcopayload.Time)))
|
||||
span.End(trace.WithTimestamp(endTime))
|
||||
|
||||
if c.Config.Debug {
|
||||
log.Printf("[DEBUG] : OTLP Traces - payload generated successfully for traceid=%s", span.SpanContext().TraceID())
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("Payload generated successfully for traceid=%s", span.SpanContext().TraceID()))
|
||||
}
|
||||
|
||||
return &span, nil
|
||||
|
@ -90,17 +101,10 @@ func (c *Client) OTLPTracesPost(falcopayload types.FalcoPayload) {
|
|||
|
||||
_, err := c.newTrace(falcopayload)
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:otlptraces", "status:error"})
|
||||
c.Stats.OTLPTraces.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "otlptraces", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : OLTP Traces - Error generating trace: %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error generating trace: %v", err))
|
||||
return
|
||||
}
|
||||
// Setting the success status
|
||||
go c.CountMetric(Outputs, 1, []string{"output:otlptraces", "status:ok"})
|
||||
c.Stats.OTLPTraces.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "otlptraces", "status": OK}).Inc()
|
||||
log.Println("[INFO] : OLTP Traces - OK")
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Sending trace")
|
||||
}
|
||||
|
||||
func generateTraceID(falcopayload types.FalcoPayload) (trace.TraceID, error) {
|
|
@ -5,17 +5,18 @@ package outputs
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
otelresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.23.1"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -31,18 +32,25 @@ func newResource() *otelresource.Resource {
|
|||
)
|
||||
}
|
||||
|
||||
func installExportPipeline(config *types.Configuration, ctx context.Context) (func(context.Context) error, error) {
|
||||
var client otlptrace.Client
|
||||
switch config.OTLP.Traces.CheckCert {
|
||||
case true:
|
||||
client = otlptracehttp.NewClient()
|
||||
case false:
|
||||
client = otlptracehttp.NewClient(otlptracehttp.WithInsecure())
|
||||
func installTracesExportPipeline(config *types.Configuration, ctx context.Context) (func(context.Context) error, error) {
|
||||
var exporter sdktrace.SpanExporter
|
||||
var err error
|
||||
switch config.OTLP.Traces.Protocol {
|
||||
case GRPC:
|
||||
opts := []otlptracegrpc.Option{}
|
||||
if !config.OTLP.Traces.CheckCert {
|
||||
opts = append(opts, otlptracegrpc.WithInsecure())
|
||||
}
|
||||
exporter, err = otlptracegrpc.New(ctx, opts...)
|
||||
default:
|
||||
opts := []otlptracehttp.Option{}
|
||||
if !config.OTLP.Traces.CheckCert {
|
||||
opts = append(opts, otlptracehttp.WithInsecure())
|
||||
}
|
||||
exporter, err = otlptracehttp.New(ctx, opts...)
|
||||
}
|
||||
|
||||
exporter, err := otlptrace.New(ctx, client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating OTLP trace exporter: %w", err)
|
||||
return nil, fmt.Errorf("failed to create Traces exporter: %v", err)
|
||||
}
|
||||
|
||||
withBatcher := sdktrace.WithBatcher(exporter)
|
||||
|
@ -58,7 +66,7 @@ func installExportPipeline(config *types.Configuration, ctx context.Context) (fu
|
|||
return tracerProvider.Shutdown, nil
|
||||
}
|
||||
|
||||
func otlpInit(config *types.Configuration) (func(), error) {
|
||||
func OTLPTracesInit(client *Client, config *types.Configuration, ctx context.Context) (func(), error) {
|
||||
// As config.OTLP.Traces fields may have been set by our own config (e.g. YAML),
|
||||
// we need to set SDK environment variables accordingly.
|
||||
os.Setenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", strings.TrimSpace(config.OTLP.Traces.Endpoint))
|
||||
|
@ -76,15 +84,14 @@ func otlpInit(config *types.Configuration) (func(), error) {
|
|||
os.Setenv(i, j)
|
||||
}
|
||||
}
|
||||
ctx := context.Background()
|
||||
// Registers a tracer Provider globally.
|
||||
shutdown, err := installExportPipeline(config, ctx)
|
||||
shutdown, err := installTracesExportPipeline(config, ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
shutDownCallback := func() {
|
||||
if err := shutdown(ctx); err != nil {
|
||||
log.Printf("[ERROR] : OLTP Traces - Error: %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "OTLP Traces", err.Error())
|
||||
|
||||
}
|
||||
}
|
|
@ -6,10 +6,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func TestOtlpNewTrace(t *testing.T) {
|
||||
|
@ -91,7 +90,7 @@ func TestOtlpNewTrace(t *testing.T) {
|
|||
PromStats: promStats,
|
||||
}
|
||||
|
||||
client, _ := NewClient("OTLP", "http://localhost:4317", false, false, *initClientArgs)
|
||||
client, _ := NewClient("OTLP", "http://localhost:4317", types.CommonConfig{}, *initClientArgs)
|
||||
// Test newTrace()
|
||||
span, err := client.newTrace(c.fp)
|
||||
require.Nil(t, err)
|
|
@ -4,13 +4,14 @@ package outputs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/PagerDuty/go-pagerduty"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -35,14 +36,18 @@ func (c *Client) PagerdutyPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:pagerduty", "status:error"})
|
||||
c.Stats.Pagerduty.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "pagerduty", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : PagerDuty - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "pagerduty"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:pagerduty", "status:ok"})
|
||||
c.Stats.Pagerduty.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "pagerduty", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : Pagerduty - Create Incident OK\n")
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "pagerduty"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Create Incident OK")
|
||||
}
|
||||
|
||||
func createPagerdutyEvent(falcopayload types.FalcoPayload, config types.PagerdutyConfig) pagerduty.V2Event {
|
||||
|
|
|
@ -4,12 +4,11 @@ package outputs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
errorsv1 "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -18,6 +17,10 @@ import (
|
|||
"k8s.io/client-go/tools/clientcmd"
|
||||
wgpolicy "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/api/wgpolicyk8s.io/v1alpha2"
|
||||
crd "sigs.k8s.io/wg-policy-prototypes/policy-report/pkg/generated/v1alpha2/clientset/versioned"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
type resource struct {
|
||||
|
@ -30,18 +33,18 @@ const (
|
|||
policyReportName string = "falco-policy-report"
|
||||
policyReportSource string = "Falco"
|
||||
|
||||
update string = "Update"
|
||||
create string = "Create"
|
||||
updateStr string = "Update"
|
||||
createStr string = "Create"
|
||||
|
||||
high wgpolicy.PolicyResultSeverity = "high"
|
||||
low wgpolicy.PolicyResultSeverity = "low"
|
||||
medium wgpolicy.PolicyResultSeverity = "medium"
|
||||
info wgpolicy.PolicyResultSeverity = "info"
|
||||
critical wgpolicy.PolicyResultSeverity = "critical"
|
||||
highStr wgpolicy.PolicyResultSeverity = "high"
|
||||
lowStr wgpolicy.PolicyResultSeverity = "low"
|
||||
mediumStr wgpolicy.PolicyResultSeverity = "medium"
|
||||
infoStr wgpolicy.PolicyResultSeverity = "info"
|
||||
criticalStr wgpolicy.PolicyResultSeverity = "critical"
|
||||
|
||||
fail wgpolicy.PolicyResult = "fail"
|
||||
warn wgpolicy.PolicyResult = "warn"
|
||||
skip wgpolicy.PolicyResult = "skip"
|
||||
failStr wgpolicy.PolicyResult = "fail"
|
||||
warnStr wgpolicy.PolicyResult = "warn"
|
||||
skipStr wgpolicy.PolicyResult = "skip"
|
||||
|
||||
k8sPodName string = "k8s.pod.name"
|
||||
k8sNsName string = "k8s.ns.name"
|
||||
|
@ -54,28 +57,6 @@ const (
|
|||
var (
|
||||
defaultNamespace string = "default"
|
||||
|
||||
// default policy report
|
||||
defaultPolicyReport *wgpolicy.PolicyReport = &wgpolicy.PolicyReport{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: policyReportName,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/managed-by": "falcosidekick",
|
||||
},
|
||||
},
|
||||
Summary: wgpolicy.PolicyReportSummary{},
|
||||
}
|
||||
|
||||
// default cluster policy report
|
||||
defaultClusterPolicyReport *wgpolicy.ClusterPolicyReport = &wgpolicy.ClusterPolicyReport{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterPolicyReportName,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/managed-by": "falcosidekick",
|
||||
},
|
||||
},
|
||||
Summary: wgpolicy.PolicyReportSummary{},
|
||||
}
|
||||
|
||||
// used resources in the k8saudit ruleset
|
||||
resourceMapping = map[string]resource{
|
||||
"pods": {"v1", "Pod"},
|
||||
|
@ -96,12 +77,37 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func newPolicyReport() *wgpolicy.PolicyReport {
|
||||
return &wgpolicy.PolicyReport{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: policyReportName,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/managed-by": "falcosidekick",
|
||||
},
|
||||
},
|
||||
Summary: wgpolicy.PolicyReportSummary{},
|
||||
}
|
||||
}
|
||||
|
||||
func newClusterPolicyReport() *wgpolicy.ClusterPolicyReport {
|
||||
return &wgpolicy.ClusterPolicyReport{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterPolicyReportName,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/managed-by": "falcosidekick",
|
||||
},
|
||||
},
|
||||
Summary: wgpolicy.PolicyReportSummary{},
|
||||
}
|
||||
}
|
||||
|
||||
func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
clientConfig, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
clientConfig, err = clientcmd.BuildConfigFromFlags("", config.PolicyReport.Kubeconfig)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Unable to load kube config file: %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "PolicyReport", fmt.Sprintf("Unable to load kube config file: %v", err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +123,7 @@ func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics,
|
|||
if config.PolicyReport.FalcoNamespace == "" {
|
||||
dat, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Unable to get the Falcosidekick's namespace, '%v' used instead\n", defaultNamespace)
|
||||
utils.Log(utils.ErrorLvl, "PolicyReport", fmt.Sprintf("Unable to get the Falcosidekick's namespace, '%v' used instead", defaultNamespace))
|
||||
} else {
|
||||
defaultNamespace = string(dat)
|
||||
}
|
||||
|
@ -130,6 +136,7 @@ func NewPolicyReportClient(config *types.Configuration, stats *types.Statistics,
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
KubernetesClient: clientset,
|
||||
|
@ -154,10 +161,14 @@ func (c *Client) UpdateOrCreatePolicyReport(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:policyreport", "status:" + OK})
|
||||
c.Stats.PolicyReport.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "policyreport", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "policyreport"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
} else {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:policyreport", "status:" + Error})
|
||||
c.Stats.PolicyReport.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "policyreport", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "policyreport"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,7 +184,7 @@ func newResult(falcopayload types.FalcoPayload) *wgpolicy.PolicyReportResult {
|
|||
Rule: falcopayload.Rule,
|
||||
Category: "SI - System and Information Integrity",
|
||||
Source: policyReportSource,
|
||||
Timestamp: metav1.Timestamp{Seconds: int64(falcopayload.Time.Second()), Nanos: int32(falcopayload.Time.Nanosecond())},
|
||||
Timestamp: metav1.Timestamp{Seconds: int64(falcopayload.Time.Second()), Nanos: int32(falcopayload.Time.Nanosecond())}, //nolint:gosec // disable G115
|
||||
Severity: mapSeverity(falcopayload),
|
||||
Result: mapResult(falcopayload),
|
||||
Description: falcopayload.Output,
|
||||
|
@ -183,12 +194,12 @@ func newResult(falcopayload types.FalcoPayload) *wgpolicy.PolicyReportResult {
|
|||
}
|
||||
|
||||
func (c *Client) createOrUpdatePolicyReport(result *wgpolicy.PolicyReportResult, namespace string) error {
|
||||
action := update
|
||||
action := updateStr
|
||||
|
||||
_, err := c.KubernetesClient.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errorsv1.IsNotFound(err) {
|
||||
log.Printf("[INFO] : PolicyReport - Can't find the namespace '%v', fallback to '%v'\n", namespace, defaultNamespace)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Can't find the namespace '%v', fallback to '%v'", namespace, defaultNamespace))
|
||||
namespace = defaultNamespace
|
||||
result.Subjects[0].Namespace = defaultNamespace
|
||||
}
|
||||
|
@ -201,8 +212,8 @@ func (c *Client) createOrUpdatePolicyReport(result *wgpolicy.PolicyReportResult,
|
|||
}
|
||||
}
|
||||
if policyr.Name == "" {
|
||||
policyr = defaultPolicyReport
|
||||
action = create
|
||||
policyr = newPolicyReport()
|
||||
action = createStr
|
||||
}
|
||||
|
||||
policyr.Results = append(policyr.Results, *result)
|
||||
|
@ -213,41 +224,41 @@ func (c *Client) createOrUpdatePolicyReport(result *wgpolicy.PolicyReportResult,
|
|||
|
||||
policyr.Summary = getSummary(policyr.Results)
|
||||
|
||||
if action == create {
|
||||
if action == createStr {
|
||||
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Create(context.Background(), policyr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if errorsv1.IsAlreadyExists(err) {
|
||||
action = update
|
||||
action = updateStr
|
||||
policyr, err = c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Get(context.Background(), policyReportName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Error with with the Policy Report %v in namespace %v: %v\n", policyReportName, namespace, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error with with the Policy Report %v in namespace %v: %v", policyReportName, namespace, err))
|
||||
return err
|
||||
}
|
||||
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Update(context.Background(), policyr, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Can't %v the Policy Report %v in namespace %v: %v\n", action, policyReportName, namespace, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Policy Report %v in namespace %v: %v", action, policyReportName, namespace, err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("[ERROR] : PolicyReport - Can't %v the Policy Report %v in namespace %v: %v\n", action, policyReportName, namespace, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Policy Report %v in namespace %v: %v", action, policyReportName, namespace, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Printf("[INFO] : PolicyReport - %v the Policy Report %v in namespace %v\n", action, policyReportName, namespace)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v the Policy Report %v in namespace %v", action, policyReportName, namespace))
|
||||
return nil
|
||||
} else {
|
||||
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Update(context.Background(), policyr, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Can't %v the Policy Report %v in namespace %v: %v\n", action, policyReportName, namespace, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Policy Report %v in namespace %v: %v", action, policyReportName, namespace, err))
|
||||
return err
|
||||
}
|
||||
log.Printf("[INFO] : PolicyReport - %v the Policy Report %v in namespace %v\n", action, policyReportName, namespace)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v the Policy Report %v in namespace %v", action, policyReportName, namespace))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) createOrUpdateClusterPolicyReport(result *wgpolicy.PolicyReportResult) error {
|
||||
action := update
|
||||
action := updateStr
|
||||
|
||||
cpolicyr, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Get(context.Background(), clusterPolicyReportName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -255,9 +266,9 @@ func (c *Client) createOrUpdateClusterPolicyReport(result *wgpolicy.PolicyReport
|
|||
return err
|
||||
}
|
||||
}
|
||||
if cpolicyr == nil {
|
||||
cpolicyr = defaultClusterPolicyReport
|
||||
action = create
|
||||
if cpolicyr.Name == "" {
|
||||
cpolicyr = newClusterPolicyReport()
|
||||
action = createStr
|
||||
}
|
||||
|
||||
cpolicyr.Results = append(cpolicyr.Results, *result)
|
||||
|
@ -268,35 +279,35 @@ func (c *Client) createOrUpdateClusterPolicyReport(result *wgpolicy.PolicyReport
|
|||
|
||||
cpolicyr.Summary = getSummary(cpolicyr.Results)
|
||||
|
||||
if action == create {
|
||||
if action == createStr {
|
||||
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Create(context.Background(), cpolicyr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if errorsv1.IsAlreadyExists(err) {
|
||||
action = update
|
||||
action = updateStr
|
||||
cpolicyr, err = c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Get(context.Background(), policyReportName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Error with with the Cluster Policy Report %v: %v\n", policyReportName, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error with with the Cluster Policy Report %v: %v", policyReportName, err))
|
||||
return err
|
||||
}
|
||||
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Update(context.Background(), cpolicyr, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Can't %v the Cluster Policy Report %v: %v\n", action, policyReportName, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Cluster Policy Report %v: %v", action, policyReportName, err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("[ERROR] : PolicyReport - Can't %v the Cluster Policy Report %v: %v\n", action, clusterPolicyReportName, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Cluster Policy Report %v: %v", action, clusterPolicyReportName, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Printf("[INFO] : PolicyReport - %v Cluster the Policy Report %v\n", action, policyReportName)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v Cluster the Policy Report %v", action, policyReportName))
|
||||
return nil
|
||||
} else {
|
||||
_, err := c.Crdclient.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Update(context.Background(), cpolicyr, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : PolicyReport - Can't %v the Cluster Policy Report %v: %v\n", action, clusterPolicyReportName, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Can't %v the Cluster Policy Report %v: %v", action, clusterPolicyReportName, err))
|
||||
return err
|
||||
}
|
||||
log.Printf("[INFO] : PolicyReport - %v the ClusterPolicy Report %v\n", action, policyReportName)
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("%v the ClusterPolicy Report %v", action, policyReportName))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -322,25 +333,25 @@ func getSummary(results []wgpolicy.PolicyReportResult) wgpolicy.PolicyReportSumm
|
|||
|
||||
func mapResult(event types.FalcoPayload) wgpolicy.PolicyResult {
|
||||
if event.Priority <= types.Notice {
|
||||
return skip
|
||||
return skipStr
|
||||
} else if event.Priority == types.Warning {
|
||||
return warn
|
||||
return warnStr
|
||||
} else {
|
||||
return fail
|
||||
return failStr
|
||||
}
|
||||
}
|
||||
|
||||
func mapSeverity(event types.FalcoPayload) wgpolicy.PolicyResultSeverity {
|
||||
if event.Priority <= types.Informational {
|
||||
return info
|
||||
return infoStr
|
||||
} else if event.Priority <= types.Notice {
|
||||
return low
|
||||
return lowStr
|
||||
} else if event.Priority <= types.Warning {
|
||||
return medium
|
||||
return mediumStr
|
||||
} else if event.Priority <= types.Error {
|
||||
return high
|
||||
return highStr
|
||||
} else {
|
||||
return critical
|
||||
return criticalStr
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,8 +4,11 @@ package outputs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -48,12 +51,12 @@ func (c *Client) checkQuickwitIndexAlreadyExists(args types.InitClientArgs) bool
|
|||
config := args.Config.Quickwit
|
||||
|
||||
endpointUrl := fmt.Sprintf("%s/%s/indexes/%s/describe", config.HostPort, config.ApiEndpoint, config.Index)
|
||||
quickwitCheckClient, err := NewClient("QuickwitCheckAlreadyExists", endpointUrl, config.MutualTLS, config.CheckCert, args)
|
||||
quickwitCheckClient, err := NewClient("QuickwitCheckAlreadyExists", endpointUrl, config.CommonConfig, args)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if nil != quickwitCheckClient.sendRequest("GET", "") {
|
||||
if nil != quickwitCheckClient.Get() {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -68,7 +71,7 @@ func (c *Client) AutoCreateQuickwitIndex(args types.InitClientArgs) error {
|
|||
}
|
||||
|
||||
endpointUrl := fmt.Sprintf("%s/%s/indexes", config.HostPort, config.ApiEndpoint)
|
||||
quickwitInitClient, err := NewClient("QuickwitInit", endpointUrl, config.MutualTLS, config.CheckCert, args)
|
||||
quickwitInitClient, err := NewClient("QuickwitInit", endpointUrl, config.CommonConfig, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -142,7 +145,7 @@ func (c *Client) AutoCreateQuickwitIndex(args types.InitClientArgs) error {
|
|||
}
|
||||
|
||||
if args.Config.Debug {
|
||||
log.Printf("[DEBUG] : Quickwit - mapping: %#v\n", mapping)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("mapping: %v", mapping))
|
||||
}
|
||||
|
||||
err = quickwitInitClient.Post(mapping)
|
||||
|
@ -158,25 +161,23 @@ func (c *Client) AutoCreateQuickwitIndex(args types.InitClientArgs) error {
|
|||
func (c *Client) QuickwitPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Quickwit.Add(Total, 1)
|
||||
|
||||
if len(c.Config.Quickwit.CustomHeaders) != 0 {
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
for i, j := range c.Config.Quickwit.CustomHeaders {
|
||||
c.AddHeader(i, j)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Config.Debug {
|
||||
log.Printf("[DEBUG] : Quickwit - ingesting payload: %v\n", falcopayload)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("ingesting payload: %v", falcopayload))
|
||||
}
|
||||
|
||||
err := c.Post(falcopayload)
|
||||
err := c.Post(falcopayload, func(req *http.Request) {
|
||||
for i, j := range c.Config.Quickwit.CustomHeaders {
|
||||
req.Header.Set(i, j)
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:quickwit", "status:error"})
|
||||
c.Stats.Quickwit.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "quickwit", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Quickwit - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "quickwit"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -184,4 +185,6 @@ func (c *Client) QuickwitPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:quickwit", "status:ok"})
|
||||
c.Stats.Quickwit.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "quickwit", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "quickwit"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -5,26 +5,31 @@ package outputs
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"fmt"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
// NewRabbitmqClient returns a new output.Client for accessing the RabbitmMQ API.
|
||||
func NewRabbitmqClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewRabbitmqClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
|
||||
var channel *amqp.Channel
|
||||
if config.Rabbitmq.URL != "" && config.Rabbitmq.Queue != "" {
|
||||
conn, err := amqp.Dial(config.Rabbitmq.URL)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Rabbitmq - %v\n", "Error while connecting rabbitmq")
|
||||
utils.Log(utils.ErrorLvl, "Rabbitmq", "Error while connecting rabbitmq")
|
||||
return nil, errors.New("error while connecting Rabbitmq")
|
||||
}
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Rabbitmq Channel - %v\n", "Error while creating rabbitmq channel")
|
||||
utils.Log(utils.ErrorLvl, "Rabbitmq", "Error while creating rabbitmq channel")
|
||||
return nil, errors.New("error while creating rabbitmq channel")
|
||||
}
|
||||
channel = ch
|
||||
|
@ -36,6 +41,7 @@ func NewRabbitmqClient(config *types.Configuration, stats *types.Statistics, pro
|
|||
RabbitmqClient: channel,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -53,16 +59,20 @@ func (c *Client) Publish(falcopayload types.FalcoPayload) {
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : RabbitMQ - %v - %v\n", "Error while publishing message", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Error while publishing message: %v", err))
|
||||
c.Stats.Rabbitmq.Add(Error, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:rabbitmq", "status:error"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "rabbitmq", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rabbitmq"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : RabbitMQ - Send to message OK \n")
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Message published OK")
|
||||
c.Stats.Rabbitmq.Add(OK, 1)
|
||||
go c.CountMetric("outputs", 1, []string{"output:rabbitmq", "status:ok"})
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "rabbitmq", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rabbitmq"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -5,23 +5,29 @@ package outputs
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func (c *Client) ReportError(err error) {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:redis", "status:error"})
|
||||
c.Stats.Redis.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "redis", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Redis - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "redis"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
}
|
||||
|
||||
func NewRedisClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
|
||||
rClient := redis.NewClient(&redis.Options{
|
||||
Addr: config.Redis.Address,
|
||||
|
@ -31,9 +37,9 @@ func NewRedisClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
// Ping the Redis server to check if it's running
|
||||
pong, err := rClient.Ping(context.Background()).Result()
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Redis - Misconfiguration, cannot connect to the server %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "Redis", fmt.Sprintf("Misconfiguration, cannot connect to the server: %v", err))
|
||||
}
|
||||
log.Printf("[INFO] : Redis - Connected to redis server: %v\n", pong)
|
||||
utils.Log(utils.InfoLvl, "Redis", fmt.Sprintf("Connected to redis server: %v", pong))
|
||||
|
||||
return &Client{
|
||||
OutputType: "Redis",
|
||||
|
@ -41,6 +47,7 @@ func NewRedisClient(config *types.Configuration, stats *types.Statistics, promSt
|
|||
RedisClient: rClient,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -65,4 +72,5 @@ func (c *Client) RedisPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:redis", "status:ok"})
|
||||
c.Stats.Redis.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "redis", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "redis"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,10 +4,13 @@ package outputs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -73,7 +76,7 @@ func newRocketchatPayload(falcopayload types.FalcoPayload, config *types.Configu
|
|||
if config.Rocketchat.MessageFormatTemplate != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := config.Rocketchat.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
|
||||
log.Printf("[ERROR] : RocketChat - Error expanding RocketChat message %v", err)
|
||||
utils.Log(utils.ErrorLvl, "RocketChat", fmt.Sprintf("Error expanding RocketChat message: %v", err))
|
||||
} else {
|
||||
messageText = buf.String()
|
||||
}
|
||||
|
@ -127,7 +130,9 @@ func (c *Client) RocketchatPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:rocketchat", "status:error"})
|
||||
c.Stats.Rocketchat.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "rocketchat", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : RocketChat - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rocketchat"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -135,4 +140,6 @@ func (c *Client) RocketchatPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:rocketchat", "status:ok"})
|
||||
c.Stats.Rocketchat.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "rocketchat", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "rocketchat"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,10 +4,13 @@ package outputs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -103,7 +106,7 @@ func newSlackPayload(falcopayload types.FalcoPayload, config *types.Configuratio
|
|||
if config.Slack.MessageFormatTemplate != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := config.Slack.MessageFormatTemplate.Execute(buf, falcopayload); err != nil {
|
||||
log.Printf("[ERROR] : Slack - Error expanding Slack message %v", err)
|
||||
utils.Log(utils.ErrorLvl, "Slack", fmt.Sprintf("Error expanding Slack message: %v", err))
|
||||
} else {
|
||||
messageText = buf.String()
|
||||
}
|
||||
|
@ -154,7 +157,9 @@ func (c *Client) SlackPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:slack", "status:error"})
|
||||
c.Stats.Slack.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "slack", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Slack - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "slack"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -162,4 +167,5 @@ func (c *Client) SlackPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:slack", "status:ok"})
|
||||
c.Stats.Slack.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "slack", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "slack"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -5,8 +5,8 @@ package outputs
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
htmlTemplate "html/template"
|
||||
"log"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -17,6 +17,8 @@ import (
|
|||
sasl "github.com/emersion/go-sasl"
|
||||
smtp "github.com/emersion/go-smtp"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -31,10 +33,11 @@ type SMTPPayload struct {
|
|||
}
|
||||
|
||||
// NewSMTPClient returns a new output.Client for accessing a SMTP server.
|
||||
func NewSMTPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewSMTPClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
reg := regexp.MustCompile(`.*:[0-9]+`)
|
||||
if !reg.MatchString(config.SMTP.HostPort) {
|
||||
log.Printf("[ERROR] : SMTP - Bad Host:Port\n")
|
||||
utils.Log(utils.ErrorLvl, "SMTP", "Bad Host:Port")
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
|
||||
|
@ -43,6 +46,7 @@ func NewSMTPClient(config *types.Configuration, stats *types.Statistics, promSta
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -71,7 +75,7 @@ func newSMTPPayload(falcopayload types.FalcoPayload, config *types.Configuration
|
|||
var outtext bytes.Buffer
|
||||
err := ttmpl.Execute(&outtext, falcopayload)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : SMTP - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "SMTP", err.Error())
|
||||
return s
|
||||
}
|
||||
s.Body += outtext.String()
|
||||
|
@ -87,7 +91,7 @@ func newSMTPPayload(falcopayload types.FalcoPayload, config *types.Configuration
|
|||
var outhtml bytes.Buffer
|
||||
err = htmpl.Execute(&outhtml, falcopayload)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : SMTP - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, "SMTP", err.Error())
|
||||
return s
|
||||
}
|
||||
s.Body += outhtml.String()
|
||||
|
@ -98,7 +102,7 @@ func newSMTPPayload(falcopayload types.FalcoPayload, config *types.Configuration
|
|||
func (c *Client) ReportErr(message string, err error) {
|
||||
go c.CountMetric("outputs", 1, []string{"output:smtp", "status:error"})
|
||||
c.Stats.SMTP.Add(Error, 1)
|
||||
log.Printf("[ERROR] : SMTP - %s : %v\n", message, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("%s : %v", message, err))
|
||||
}
|
||||
|
||||
func (c *Client) GetAuth() (sasl.Client, error) {
|
||||
|
@ -160,11 +164,11 @@ func (c *Client) SendMail(falcopayload types.FalcoPayload) {
|
|||
body := sp.Subject + "\n" + sp.Body
|
||||
|
||||
if c.Config.Debug {
|
||||
log.Printf("[DEBUG] : SMTP payload : \nServer: %v\n%v\n%v\nSubject: %v\n", c.Config.SMTP.HostPort, sp.From, sp.To, sp.Subject)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("payload : \nServer: %v\n%v\n%v\nSubject: %v", c.Config.SMTP.HostPort, sp.From, sp.To, sp.Subject))
|
||||
if c.Config.SMTP.AuthMechanism != "" {
|
||||
log.Printf("[DEBUG] : SMTP - SASL Auth : \nMechanisms: %v\nUser: %v\nToken: %v\nIdentity: %v\nTrace: %v\n", c.Config.SMTP.AuthMechanism, c.Config.SMTP.User, c.Config.SMTP.Token, c.Config.SMTP.Identity, c.Config.SMTP.Trace)
|
||||
utils.Log(utils.DebugLvl, c.OutputType, fmt.Sprintf("SASL Auth : \nMechanisms: %v\nUser: %v\nToken: %v\nIdentity: %v\nTrace: %v", c.Config.SMTP.AuthMechanism, c.Config.SMTP.User, c.Config.SMTP.Token, c.Config.SMTP.Identity, c.Config.SMTP.Trace))
|
||||
} else {
|
||||
log.Printf("[DEBUG] : SMTP - SASL Auth : Disabled\n")
|
||||
utils.Log(utils.DebugLvl, c.OutputType, "SASL Auth : Disabled")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,7 +179,7 @@ func (c *Client) SendMail(falcopayload types.FalcoPayload) {
|
|||
return
|
||||
}
|
||||
|
||||
log.Printf("[INFO] : SMTP - Sent OK\n")
|
||||
utils.Log(utils.InfoLvl, c.OutputType, " SMTP - Sent OK\n")
|
||||
go c.CountMetric("outputs", 1, []string{"output:smtp", "status:ok"})
|
||||
c.Stats.SMTP.Add(OK, 1)
|
||||
}
|
||||
|
|
|
@ -8,15 +8,18 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
const Falcosidekick_ string = "falcosidekick_"
|
||||
|
@ -175,7 +178,7 @@ func newSpyderbatPayload(falcopayload types.FalcoPayload) (spyderbatPayload, err
|
|||
MonotonicTime: time.Now().Nanosecond(),
|
||||
OrcTime: nowTime,
|
||||
Time: eventTime,
|
||||
PID: int32(pid),
|
||||
PID: int32(pid), //nolint:gosec // disable G115
|
||||
Level: level,
|
||||
Message: message,
|
||||
Arguments: arguments,
|
||||
|
@ -184,17 +187,17 @@ func newSpyderbatPayload(falcopayload types.FalcoPayload) (spyderbatPayload, err
|
|||
}
|
||||
|
||||
func NewSpyderbatClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
|
||||
hasSource, err := isSourcePresent(config)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
if !hasSource {
|
||||
if err := makeSource(config); err != nil {
|
||||
if hasSource, err2 := isSourcePresent(config); err2 != nil || !hasSource {
|
||||
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
}
|
||||
|
@ -203,49 +206,51 @@ func NewSpyderbatClient(config *types.Configuration, stats *types.Statistics, pr
|
|||
source := Falcosidekick_ + config.Spyderbat.OrgUID
|
||||
data_url, err := url.JoinPath(config.Spyderbat.APIUrl, APIv1Path+config.Spyderbat.OrgUID+SourcePath+source+"/data/sb-agent")
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
endpointURL, err := url.Parse(data_url)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, "Spyderbat", err.Error())
|
||||
return nil, ErrClientCreation
|
||||
}
|
||||
return &Client{
|
||||
OutputType: "Spyderbat",
|
||||
EndpointURL: endpointURL,
|
||||
MutualTLSEnabled: false,
|
||||
CheckCert: true,
|
||||
ContentType: "application/ndjson",
|
||||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
OutputType: "Spyderbat",
|
||||
EndpointURL: endpointURL,
|
||||
cfg: types.CommonConfig{MutualTLS: false, CheckCert: true, MaxConcurrentRequests: 1},
|
||||
ContentType: "application/ndjson",
|
||||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) SpyderbatPost(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Spyderbat.Add(Total, 1)
|
||||
|
||||
c.httpClientLock.Lock()
|
||||
defer c.httpClientLock.Unlock()
|
||||
c.AddHeader("Authorization", "Bearer "+c.Config.Spyderbat.APIKey)
|
||||
c.AddHeader("Content-Encoding", "gzip")
|
||||
|
||||
payload, err := newSpyderbatPayload(falcopayload)
|
||||
if err == nil {
|
||||
err = c.Post(payload)
|
||||
err = c.Post(payload, func(req *http.Request) {
|
||||
req.Header.Set("Authorization", "Bearer "+c.Config.Spyderbat.APIKey)
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
go c.CountMetric(Outputs, 1, []string{"output:spyderbat", "status:error"})
|
||||
c.Stats.Spyderbat.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "spyderbat", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Spyderbat - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "spyderbat"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:spyderbat", "status:ok"})
|
||||
c.Stats.Spyderbat.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "spyderbat", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "spyderbat"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -4,11 +4,12 @@ package outputs
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
stan "github.com/nats-io/stan.go"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -16,26 +17,33 @@ import (
|
|||
func (c *Client) StanPublish(falcopayload types.FalcoPayload) {
|
||||
c.Stats.Stan.Add(Total, 1)
|
||||
|
||||
subject := c.Config.Stan.SubjectTemplate
|
||||
if len(subject) == 0 {
|
||||
subject = defaultNatsSubjects
|
||||
}
|
||||
|
||||
subject = strings.ReplaceAll(subject, "<priority>", strings.ToLower(falcopayload.Priority.String()))
|
||||
subject = strings.ReplaceAll(subject, "<rule>", strings.Trim(slugRegExp.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_"))
|
||||
|
||||
nc, err := stan.Connect(c.Config.Stan.ClusterID, c.Config.Stan.ClientID, stan.NatsURL(c.EndpointURL.String()))
|
||||
if err != nil {
|
||||
c.setStanErrorMetrics()
|
||||
log.Printf("[ERROR] : STAN - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
defer nc.Close()
|
||||
|
||||
r := strings.Trim(slugRegularExpression.ReplaceAllString(strings.ToLower(falcopayload.Rule), "_"), "_")
|
||||
j, err := json.Marshal(falcopayload)
|
||||
if err != nil {
|
||||
c.setStanErrorMetrics()
|
||||
log.Printf("[ERROR] : STAN - %v\n", err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = nc.Publish("falco."+strings.ToLower(falcopayload.Priority.String())+"."+r, j)
|
||||
err = nc.Publish(subject, j)
|
||||
if err != nil {
|
||||
c.setStanErrorMetrics()
|
||||
log.Printf("[ERROR] : STAN - %v\n", err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -43,7 +51,8 @@ func (c *Client) StanPublish(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:stan", "status:ok"})
|
||||
c.Stats.Stan.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "stan", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : STAN - Publish OK\n")
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "stan"), attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType, "Publish OK")
|
||||
}
|
||||
|
||||
// setStanErrorMetrics set the error stats
|
||||
|
@ -51,4 +60,7 @@ func (c *Client) setStanErrorMetrics() {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:stan", "status:error"})
|
||||
c.Stats.Stan.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "stan", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "stan"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
|
||||
}
|
||||
|
|
|
@ -3,11 +3,13 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -25,7 +27,7 @@ func NewStatsdClient(outputType string, config *types.Configuration, stats *type
|
|||
fwd = config.Dogstatsd.Forwarder
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] : Can't configure %v client for %v - %v", outputType, fwd, err)
|
||||
utils.Log(utils.ErrorLvl, outputType, fmt.Sprintf("Can't configure client for %v - %v", fwd, err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -47,14 +49,18 @@ func (c *Client) CountMetric(metric string, value int64, tags []string) {
|
|||
if err := c.StatsdClient.Count(metric+t, value, []string{}, 1); err != nil {
|
||||
c.Stats.Statsd.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "statsd", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : StatsD - Unable to send metric (%v%v%v) : %v\n", c.Config.Statsd.Namespace, metric, t, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "statsd"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Unable to send metric (%v%v%v) : %v", c.Config.Statsd.Namespace, metric, t, err))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
c.Stats.Statsd.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "statsd", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : StatsD - Send Metric OK (%v%v%v)\n", c.Config.Statsd.Namespace, metric, t)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "statsd"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Send Metric OK (%v%v%v)", c.Config.Statsd.Namespace, metric, t))
|
||||
}
|
||||
|
||||
if c.DogstatsdClient != nil {
|
||||
|
@ -62,13 +68,17 @@ func (c *Client) CountMetric(metric string, value int64, tags []string) {
|
|||
if err := c.DogstatsdClient.Count(metric, value, tags, 1); err != nil {
|
||||
c.Stats.Dogstatsd.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "dogstatsd", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : DogStatsD - Send Metric Error (%v%v%v) : %v\n", c.Config.Statsd.Namespace, metric, tags, err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dogstatsd"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, fmt.Sprintf("Send Metric Error (%v%v%v) : %v", c.Config.Statsd.Namespace, metric, tags, err))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
c.Stats.Dogstatsd.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "dogstatsd", "status": OK}).Inc()
|
||||
log.Printf("[INFO] : DogStatsD - Send Metric OK (%v%v %v)\n", c.Config.Statsd.Namespace, metric, tags)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "dogstatsd"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
utils.Log(utils.InfoLvl, c.OutputType, fmt.Sprintf("Send Metric OK (%v%v %v)", c.Config.Statsd.Namespace, metric, tags))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,12 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -16,28 +19,32 @@ func (c *Client) SumoLogicPost(falcopayload types.FalcoPayload) {
|
|||
endpointURL, err := url.Parse(c.Config.SumoLogic.ReceiverURL)
|
||||
if err != nil {
|
||||
c.setSumoLogicErrorMetrics()
|
||||
log.Printf("[ERROR] : %v - %v\n", c.OutputType, err.Error())
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.EndpointURL = endpointURL
|
||||
err = c.Post(falcopayload,
|
||||
func(req *http.Request) {
|
||||
if c.Config.SumoLogic.SourceCategory != "" {
|
||||
req.Header.Set("X-Sumo-Category", c.Config.SumoLogic.SourceCategory)
|
||||
}
|
||||
|
||||
if c.Config.SumoLogic.SourceCategory != "" {
|
||||
c.AddHeader("X-Sumo-Category", c.Config.SumoLogic.SourceCategory)
|
||||
}
|
||||
if c.Config.SumoLogic.SourceHost != "" {
|
||||
req.Header.Set("X-Sumo-Host", c.Config.SumoLogic.SourceHost)
|
||||
}
|
||||
|
||||
if c.Config.SumoLogic.SourceHost != "" {
|
||||
c.AddHeader("X-Sumo-Host", c.Config.SumoLogic.SourceHost)
|
||||
}
|
||||
if c.Config.SumoLogic.Name != "" {
|
||||
req.Header.Set("X-Sumo-Name", c.Config.SumoLogic.Name)
|
||||
}
|
||||
},
|
||||
func(req *http.Request) {
|
||||
req.URL = endpointURL
|
||||
},
|
||||
)
|
||||
|
||||
if c.Config.SumoLogic.Name != "" {
|
||||
c.AddHeader("X-Sumo-Name", c.Config.SumoLogic.Name)
|
||||
}
|
||||
|
||||
err = c.Post(falcopayload)
|
||||
if err != nil {
|
||||
c.setSumoLogicErrorMetrics()
|
||||
log.Printf("[ERROR] : %x - %v\n", c.OutputType, err)
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -45,6 +52,8 @@ func (c *Client) SumoLogicPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:sumologic", "status:ok"})
|
||||
c.Stats.SumoLogic.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "sumologic", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "sumologic"),
|
||||
attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
||||
// setSumoLogicErrorMetrics set the error stats
|
||||
|
@ -52,4 +61,6 @@ func (c *Client) setSumoLogicErrorMetrics() {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:sumologic", "status:error"})
|
||||
c.Stats.SumoLogic.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "sumologic", "status": Error}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "sumologic"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
}
|
||||
|
|
|
@ -5,16 +5,20 @@ package outputs
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/syslog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
otlpmetrics "github.com/falcosecurity/falcosidekick/outputs/otlp_metrics"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
func NewSyslogClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
func NewSyslogClient(config *types.Configuration, stats *types.Statistics, promStats *types.PromStatistics,
|
||||
otlpMetrics *otlpmetrics.OTLPMetrics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {
|
||||
ok := isValidProtocolString(strings.ToLower(config.Syslog.Protocol))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to configure Syslog client: invalid protocol %s", config.Syslog.Protocol)
|
||||
|
@ -25,6 +29,7 @@ func NewSyslogClient(config *types.Configuration, stats *types.Statistics, promS
|
|||
Config: config,
|
||||
Stats: stats,
|
||||
PromStats: promStats,
|
||||
OTLPMetrics: otlpMetrics,
|
||||
StatsdClient: statsdClient,
|
||||
DogstatsdClient: dogstatsdClient,
|
||||
}, nil
|
||||
|
@ -86,7 +91,9 @@ func (c *Client) SyslogPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:syslog", "status:error"})
|
||||
c.Stats.Syslog.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "syslog", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Syslog - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "syslog"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -122,11 +129,14 @@ func (c *Client) SyslogPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:syslog", "status:error"})
|
||||
c.Stats.Syslog.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "syslog", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Syslog - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "syslog"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go c.CountMetric(Outputs, 1, []string{"output:syslog", "status:ok"})
|
||||
c.Stats.Syslog.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "syslog", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "syslog"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -18,13 +18,15 @@ func (c *Client) TalonPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:talon", "status:error"})
|
||||
c.Stats.Talon.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "talon", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Talon - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "talon"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Setting the success status
|
||||
go c.CountMetric(Outputs, 1, []string{"output:talon", "status:ok"})
|
||||
c.Stats.Talon.Add(OK, 1)
|
||||
fmt.Println("aaaaa")
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "talon", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "talon"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -3,10 +3,12 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -39,7 +41,7 @@ func newTeamsPayload(falcopayload types.FalcoPayload, config *types.Configuratio
|
|||
fact teamsFact
|
||||
)
|
||||
|
||||
section.ActivityTitle = "Falco Sidekick"
|
||||
section.ActivityTitle = "Falcosidekick"
|
||||
section.ActivitySubTitle = falcopayload.Time.String()
|
||||
|
||||
if config.Teams.OutputFormat == All || config.Teams.OutputFormat == Text || config.Teams.OutputFormat == "" {
|
||||
|
@ -123,7 +125,9 @@ func (c *Client) TeamsPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:teams", "status:error"})
|
||||
c.Stats.Teams.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "teams", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Teams - %v\n", err)
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "teams"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -131,4 +135,5 @@ func (c *Client) TeamsPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:teams", "status:ok"})
|
||||
c.Stats.Teams.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "teams", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "teams"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestNewTeamsPayload(t *testing.T) {
|
|||
ThemeColor: "ccfff2",
|
||||
Sections: []teamsSection{
|
||||
{
|
||||
ActivityTitle: "Falco Sidekick",
|
||||
ActivityTitle: "Falcosidekick",
|
||||
ActivitySubTitle: "2001-01-01 01:10:00 +0000 UTC",
|
||||
ActivityImage: "",
|
||||
Text: "This is a test from falcosidekick",
|
||||
|
|
|
@ -3,8 +3,9 @@
|
|||
package outputs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/falcosecurity/falcosidekick/internal/pkg/utils"
|
||||
"github.com/falcosecurity/falcosidekick/types"
|
||||
)
|
||||
|
||||
|
@ -17,7 +18,9 @@ func (c *Client) TektonPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:tekton", "status:error"})
|
||||
c.Stats.Tekton.Add(Error, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "tekton", "status": Error}).Inc()
|
||||
log.Printf("[ERROR] : Tekton - %v\n", err.Error())
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "tekton"),
|
||||
attribute.String("status", Error)).Inc()
|
||||
utils.Log(utils.ErrorLvl, c.OutputType, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -25,4 +28,5 @@ func (c *Client) TektonPost(falcopayload types.FalcoPayload) {
|
|||
go c.CountMetric(Outputs, 1, []string{"output:tekton", "status:ok"})
|
||||
c.Stats.Tekton.Add(OK, 1)
|
||||
c.PromStats.Outputs.With(map[string]string{"destination": "tekton", "status": OK}).Inc()
|
||||
c.OTLPMetrics.Outputs.With(attribute.String("destination", "tekton"), attribute.String("status", OK)).Inc()
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue