Flowmill open-source release.
Cleared history to ensure we remove individual and customer names. Wish we could have shared the full commit history.. The original repo had 11,388 commits from 16 contributors over 4+ years -- thank you all! The team is looking forward to continuing development with the community!
This commit is contained in:
commit
3598385a6f
|
|
@ -0,0 +1,74 @@
|
|||
*.o
|
||||
*.pyc
|
||||
*.bak
|
||||
*.so
|
||||
*.a
|
||||
*~
|
||||
!.*
|
||||
*.backup
|
||||
*.aux
|
||||
*.log
|
||||
*.out
|
||||
*.profile
|
||||
*.bbl
|
||||
*.blg
|
||||
*.brf
|
||||
.autotools
|
||||
.cproject
|
||||
.*.cmd
|
||||
.*.d
|
||||
.deps
|
||||
.dirstamp
|
||||
*.la
|
||||
*.lo
|
||||
.libs
|
||||
.pydevproject
|
||||
.Rhistory
|
||||
.settings
|
||||
.settings/
|
||||
.~lock*#
|
||||
*.xxd
|
||||
payload_trace.dat
|
||||
trace_stats.csv
|
||||
exec_at
|
||||
generated/
|
||||
.DS_Store
|
||||
*.pb.*
|
||||
*.sw?
|
||||
*.orig
|
||||
|
||||
MANIFEST
|
||||
.sconsign.dblite
|
||||
|
||||
CMakeFiles/
|
||||
cmake_install.cmake
|
||||
CMakeCache.txt
|
||||
CTestTestfile.cmake
|
||||
Testing/
|
||||
install_manifest.txt
|
||||
|
||||
.idea/
|
||||
|
||||
aclocal.m4
|
||||
configure
|
||||
autom4te.cache
|
||||
build-aux
|
||||
install-sh
|
||||
missing
|
||||
py-compile
|
||||
release/
|
||||
|
||||
stamp-h1
|
||||
src/Makefile
|
||||
Makefile
|
||||
config.h
|
||||
config.status
|
||||
libtool
|
||||
|
||||
perf.data
|
||||
perf.data.old
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode/settings.json
|
||||
# Visual C++ cache files
|
||||
ipch/
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
cmake_minimum_required(VERSION 3.12)
|
||||
|
||||
project(
|
||||
flowmill-collector
|
||||
VERSION 0.9.0
|
||||
)
|
||||
|
||||
list(
|
||||
APPEND
|
||||
CMAKE_MODULE_PATH
|
||||
${CMAKE_CURRENT_LIST_DIR}/cmake
|
||||
)
|
||||
|
||||
include(FindPkgConfig)
|
||||
|
||||
# Custom modules
|
||||
#
|
||||
if(NOT FLOWMILL_SUBPROJECT)
|
||||
include(cpp-compiler)
|
||||
include(ccache)
|
||||
include(docker-utils)
|
||||
include(sanitizer)
|
||||
include(executable)
|
||||
include(xxd)
|
||||
include(shell)
|
||||
include(debug)
|
||||
include(lz4)
|
||||
include(openssl)
|
||||
include(c-ares)
|
||||
include(curl)
|
||||
include(curlpp)
|
||||
include(spdlog)
|
||||
include(aws-sdk)
|
||||
include(golang)
|
||||
include(protobuf)
|
||||
include(llvm)
|
||||
include(clang)
|
||||
include(libelf)
|
||||
include(bcc)
|
||||
include(test)
|
||||
include(uv)
|
||||
include(breakpad)
|
||||
include(abseil)
|
||||
include(yamlcpp)
|
||||
include(render)
|
||||
endif(NOT FLOWMILL_SUBPROJECT)
|
||||
|
||||
include_directories(
|
||||
${PROJECT_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}
|
||||
${CMAKE_INSTALL_PREFIX}/include
|
||||
)
|
||||
|
||||
configure_file(config.h.cmake_in config.h)
|
||||
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(CONFIG_H_DIR ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
add_subdirectory(renderc)
|
||||
add_subdirectory(render)
|
||||
add_subdirectory(config)
|
||||
add_subdirectory(channel)
|
||||
add_subdirectory(platform)
|
||||
add_subdirectory(scheduling)
|
||||
add_subdirectory(util)
|
||||
add_subdirectory(jitbuf)
|
||||
add_subdirectory(collector)
|
||||
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,196 @@
|
|||
# Flowmill telemetry collector #
|
||||
|
||||
Flowmill telemetry collector is an agent that can collect low level telemetry
|
||||
straight from the Linux Kernel using the [eBPF technology](https://ebpf.io/).
|
||||
It does so with negligible overhead towards compute and network resources.
|
||||
|
||||
This telemetry is then sent to a pipeline that can enrich it and provide
|
||||
invaluable insight about you distributed application.
|
||||
|
||||
## Building the collector ##
|
||||
|
||||
There's a docker build image provided with all dependencies pre-installed,
|
||||
ready to build the collectors.
|
||||
|
||||
Building the collectors images is as simple as running the build image within
|
||||
docker with the following setup:
|
||||
|
||||
```
|
||||
docker run \
|
||||
-it --rm \
|
||||
--mount "type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock" \
|
||||
--mount "type=bind,source=$(git rev-parse --show-toplevel),destination=/root/src,readonly" \
|
||||
--env FLOWMILL_SRC=/root/src \
|
||||
--env FLOWMILL_OUT_DIR=/root/out \
|
||||
--workdir=/root/out \
|
||||
build-env \
|
||||
../build.sh docker
|
||||
```
|
||||
|
||||
The resulting docker image will be placed in the host's docker daemon under the
|
||||
name `kernel-collector`.
|
||||
|
||||
The images can also be automatically pushed to a docker registry after they're built.
|
||||
By default, they're pushed to a local docker registry at `localhost:5000`. The registry
|
||||
can be changed by setting the environment variable `FLOWMILL_DOCKER_REGISTRY` in the
|
||||
build image, as so:
|
||||
|
||||
```
|
||||
docker run \
|
||||
-it --rm \
|
||||
--mount "type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock" \
|
||||
--mount "type=bind,source=$(git rev-parse --show-toplevel),destination=/root/src,readonly" \
|
||||
--env FLOWMILL_SRC=/root/src \
|
||||
--env FLOWMILL_OUT_DIR=/root/out \
|
||||
--env FLOWMILL_DOCKER_REGISTRY="localhost:5000" \
|
||||
--workdir=/root/out \
|
||||
build-env \
|
||||
../build.sh docker-registry
|
||||
```
|
||||
|
||||
The source code for the build image as well as instructions on how to build it
|
||||
can be found in its repo [at github.com/Flowmill/flowmill-build-env](
|
||||
https://github.com/Flowmill/flowmill-build-env).
|
||||
|
||||
## Running the collector ##
|
||||
|
||||
Running the Flowmill collector should be as easy as running a docker image:
|
||||
|
||||
```
|
||||
docker run -it --rm \
|
||||
--env FLOWMILL_INTAKE_PORT="${FLOWMILL_INTAKE_PORT}" \
|
||||
--env FLOWMILL_INTAKE_HOST="${FLOWMILL_INTAKE_HOST}" \
|
||||
--env FLOWMILL_AUTH_KEY_ID="KFIIHR5SFKS3TQFPZWZK" \
|
||||
--env FLOWMILL_AUTH_SECRET="DatfNxs42qP1v8u281G9lyNNmFvWLmehNwVHQ9LT" \
|
||||
--env FLOWMILL_INTAKE_NAME="oss" \
|
||||
--privileged \
|
||||
--pid host \
|
||||
--network host \
|
||||
--log-console \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume /sys/fs/cgroup:/hostfs/sys/fs/cgroup \
|
||||
--volume /etc:/var/run/flowmill/host/etc \
|
||||
--volume /var/cache:/var/run/flowmill/host/cache \
|
||||
--volume /usr/src:/var/run/flowmill/host/usr/src \
|
||||
--volume /lib/modules:/var/run/flowmill/host/lib/modules \
|
||||
kernel-collector \
|
||||
--log-console
|
||||
```
|
||||
|
||||
### Collector settings ###
|
||||
|
||||
Environment variables:
|
||||
|
||||
- `FLOWMILL_AUTH_KEY_ID`: this is the agent key id used to authenticate with Flowmill
|
||||
- `FLOWMILL_AUTH_SECRET`: this is the agent secret used to authenticate with Flowmill
|
||||
- `FLOWMILL_INTAKE_NAME`: this is the name of the Flowmill intake server
|
||||
- `FLOWMILL_INTAKE_HOST`: this is the hostname or IP address of the Flowmill intake server
|
||||
- `FLOWMILL_INTAKE_PORT`: this is the port of the Flowmill intake server
|
||||
- `FLOWMILL_AUTHZ_SERVER`: this is the host:port of Flowmill auth server (default: app.flowmill.com)
|
||||
|
||||
Volumes:
|
||||
|
||||
- `/var/run/docker.sock`: enables the collector to talk to the local Docker daemon
|
||||
- `/sys/fs/cgroup`: allows the collector to read cgroup information
|
||||
- `/etc`: allows the collector to read package manager settings in order to
|
||||
fetch kernel headers in case they're not pre-installed on the host (necessary
|
||||
for eBPF - optional if pre-installed kernel headers are available on the host)
|
||||
- `/var/cache`: cache fetched kernel headers on the host (optional)
|
||||
- `/usr/src` / `/lib/modules`: allows the collector to use kernel headers
|
||||
pre-installed on the host(necessary for eBPF)
|
||||
|
||||
Docker settings:
|
||||
|
||||
The collector needs privileged access since it uses the eBPF mechanism from the
|
||||
Linux kernel, therefore these settings need to be passed to docker: `--privileged`,
|
||||
`--pid host` and `--network host`.
|
||||
|
||||
## Integration with OpenTelemetry Collector ##
|
||||
|
||||
Flowmill collector can alternatively send telemetry to OpenTelemetry Collector
|
||||
(otel-col) in the form of Log entries.
|
||||
|
||||
### Configuring otel-col to receive telemetry ###
|
||||
|
||||
A few changes need to be made to otel-col's config file. Please refer to
|
||||
otel-col's documentation for details on [how to run the
|
||||
collector](https://opentelemetry.io/docs/collector/getting-started/#docker).
|
||||
|
||||
First you need to set up an HTTP endpoint for an OTLP receiver. The example
|
||||
below binds the receiver to all interfaces (`0.0.0.0`) on port `8000`. For more
|
||||
information, refer to [otel-col's
|
||||
documentation](https://opentelemetry.io/docs/collector/configuration/#receivers):
|
||||
```
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
http:
|
||||
endpoint: 0.0.0.0:8000
|
||||
```
|
||||
|
||||
If you need to enable TLS on the endpoint, check [the
|
||||
documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md#server-configuration).
|
||||
|
||||
Then make sure the Log Service is also enabled and connected to the OTLP HTTP
|
||||
receiver. For more information, refer to [otel-col's
|
||||
documentation](https://opentelemetry.io/docs/collector/configuration/#service):
|
||||
|
||||
```
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
# processors: # TODO: add processors here
|
||||
# exporters: # TODO: add exporters here
|
||||
```
|
||||
|
||||
By making sure the Log Service is enabled in otel-col and receiving HTTP
|
||||
requests in OTLP format, now Flowmill collector is able to send telemetry to
|
||||
otel-col on port `8000`.
|
||||
|
||||
For more information on the OTLP receiver, refer to [otel-col's
|
||||
documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md).
|
||||
|
||||
### Configuring Flowmill collector send telemetry to otel-col ###
|
||||
|
||||
The flowmill collector needs to know a few things in order to connect to
|
||||
otel-col's receiver as its intake. The difference between connecting to the
|
||||
standard Flowmill intake vs connecting to otel-col's receiver is the intake
|
||||
encoding. For otel-col's receiver the encoding must be set to `otlp_log`.
|
||||
|
||||
Intake settings are controlled by environment variables set on Flowmill
|
||||
Collector's container (e.g.: can be set with `docker`'s --env command line
|
||||
argument). Below is a list of settings along with the name of the environment
|
||||
variable and suggested values for a proof-of-concept:
|
||||
|
||||
- host: `FLOWMILL_INTAKE_HOST=127.0.0.1`
|
||||
- port: `FLOWMILL_INTAKE_PORT=8000`
|
||||
- tls: `FLOWMILL_INTAKE_DISABLE_TLS=true`
|
||||
- encoder: `FLOWMILL_INTAKE_ENCODER=otlp_log`
|
||||
- name (if [TLS SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) is
|
||||
needed): `FLOWMILL_INTAKE_NAME=oss`
|
||||
|
||||
Here's an example:
|
||||
|
||||
```
|
||||
docker run -it --rm \
|
||||
--env FLOWMILL_INTAKE_HOST="127.0.0.1" \
|
||||
--env FLOWMILL_INTAKE_PORT="8000" \
|
||||
--env FLOWMILL_INTAKE_DISABLE_TLS=true \
|
||||
--env FLOWMILL_INTAKE_ENCODER="otlp_log" \
|
||||
--env FLOWMILL_AUTH_KEY_ID="KFIIHR5SFKS3TQFPZWZK" \
|
||||
--env FLOWMILL_AUTH_SECRET="DatfNxs42qP1v8u281G9lyNNmFvWLmehNwVHQ9LT" \
|
||||
--env FLOWMILL_INTAKE_NAME="oss" \
|
||||
--privileged \
|
||||
--pid host \
|
||||
--network host \
|
||||
--log-console \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume /sys/fs/cgroup:/hostfs/sys/fs/cgroup \
|
||||
--volume /etc:/var/run/flowmill/host/etc \
|
||||
--volume /var/cache:/var/run/flowmill/host/cache \
|
||||
--volume /usr/src:/var/run/flowmill/host/usr/src \
|
||||
--volume /lib/modules:/var/run/flowmill/host/lib/modules \
|
||||
kernel-collector \
|
||||
--log-console
|
||||
```
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
add_library(
|
||||
file_channel
|
||||
STATIC
|
||||
file_channel.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
file_channel
|
||||
file_ops
|
||||
logging
|
||||
)
|
||||
|
||||
add_library(
|
||||
double_write_channel
|
||||
STATIC
|
||||
double_write_channel.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
double_write_channel
|
||||
logging
|
||||
)
|
||||
|
||||
add_library(
|
||||
buffered_writer
|
||||
STATIC
|
||||
buffered_writer.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
buffered_writer
|
||||
logging
|
||||
)
|
||||
|
||||
add_library(
|
||||
tcp_channel
|
||||
STATIC
|
||||
tcp_channel.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
tcp_channel
|
||||
http_proxy_config
|
||||
error_handling
|
||||
uv_helpers
|
||||
libuv-interface
|
||||
logging
|
||||
)
|
||||
|
||||
add_library(
|
||||
lz4_channel
|
||||
STATIC
|
||||
lz4_channel.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
lz4_channel
|
||||
logging
|
||||
lz4
|
||||
)
|
||||
|
||||
add_library(
|
||||
upstream_connection
|
||||
STATIC
|
||||
upstream_connection.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
upstream_connection
|
||||
double_write_channel
|
||||
lz4_channel
|
||||
buffered_writer
|
||||
logging
|
||||
)
|
||||
|
||||
add_library(
|
||||
tls_channel
|
||||
STATIC
|
||||
tls_channel.cc
|
||||
tls_handler.cc
|
||||
tls_error.cc
|
||||
tls_over_tcp_channel.cc
|
||||
internal/private_key.cc
|
||||
internal/certificate.cc
|
||||
internal/ssl_context.cc
|
||||
internal/tls_shim.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
tls_channel
|
||||
tcp_channel
|
||||
lz4_channel
|
||||
buffered_writer
|
||||
logging
|
||||
error_handling
|
||||
OpenSSL::SSL
|
||||
OpenSSL::Crypto
|
||||
)
|
||||
|
||||
add_library(
|
||||
reconnecting_channel
|
||||
STATIC
|
||||
reconnecting_channel.cc
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
reconnecting_channel
|
||||
upstream_connection
|
||||
tls_channel
|
||||
spdlog
|
||||
libuv-interface
|
||||
render_flowmill_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
connection_caretaker
|
||||
STATIC
|
||||
connection_caretaker.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
connection_caretaker
|
||||
authz_fetcher
|
||||
aws_instance_metadata
|
||||
gcp_instance_metadata
|
||||
intake_config
|
||||
element_queue_writer
|
||||
fastpass_util
|
||||
render_flowmill_artifacts
|
||||
logging
|
||||
tls_channel
|
||||
absl::strings
|
||||
)
|
||||
|
||||
#######
|
||||
# tests
|
||||
#######
|
||||
|
||||
add_gtest(
|
||||
buffered_writer_test
|
||||
SRCS
|
||||
buffered_writer_test.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
buffered_writer_test
|
||||
buffered_writer
|
||||
element_queue_writer
|
||||
llvm
|
||||
)
|
||||
add_dependencies(unit_tests buffered_writer_test)
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/buffered_writer.h>
|
||||
#include <channel/channel.h>
|
||||
#include <util/log.h>
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
namespace channel {
|
||||
|
||||
BufferedWriter::BufferedWriter(Channel &channel, u32 buf_size)
|
||||
: buf_size_(buf_size), write_start_loc_(0), write_finish_loc_(0),
|
||||
channel_(channel)
|
||||
{
|
||||
buf_ = (u8 *)malloc(buf_size * sizeof(u8));
|
||||
if (buf_ == NULL)
|
||||
throw std::runtime_error("BufferedWriter: failed to allocate memory\n");
|
||||
}
|
||||
|
||||
BufferedWriter::~BufferedWriter()
|
||||
{
|
||||
/* if we're in a consistent state, try flushing the buffer */
|
||||
if (write_start_loc_ == write_finish_loc_) {
|
||||
flush();
|
||||
}
|
||||
|
||||
free(buf_);
|
||||
}
|
||||
|
||||
Expected<u8 *, std::error_code> BufferedWriter::start_write(u32 length)
|
||||
{
|
||||
/* if requesting more than buffer maximum size, bad request */
|
||||
if (length > buf_size_) {
|
||||
LOG::error(
|
||||
"BufferedWriter::start_write: requesting more than buffer maximum size"
|
||||
" (requested={}, buf_size={})",
|
||||
length, buf_size_
|
||||
);
|
||||
return {unexpected, std::make_error_code(std::errc::no_buffer_space)};
|
||||
}
|
||||
|
||||
/* is there enough space in the current buffer? */
|
||||
if (buf_size_ - write_start_loc_ < length) {
|
||||
if (auto error = flush()) {
|
||||
LOG::error(
|
||||
"BufferedWriter::start_write: failed to flush the channel and there's"
|
||||
" not enough space in the current buffer to return - check for channel"
|
||||
" errors prior to this one (requested={}, buf_size={} offset={})",
|
||||
length, buf_size_, write_start_loc_
|
||||
);
|
||||
return {unexpected, error};
|
||||
}
|
||||
}
|
||||
assert(buf_size_ - write_start_loc_ >= length);
|
||||
|
||||
/* mark the end of the write */
|
||||
write_finish_loc_ = write_start_loc_ + length;
|
||||
|
||||
/* return a pointer to the start of the write */
|
||||
return &buf_[write_start_loc_];
|
||||
}
|
||||
|
||||
void BufferedWriter::finish_write()
|
||||
{
|
||||
write_start_loc_ = write_finish_loc_;
|
||||
}
|
||||
|
||||
std::error_code BufferedWriter::flush()
|
||||
{
|
||||
/* we shouldn't be in the middle of a write */
|
||||
assert(write_start_loc_ == write_finish_loc_);
|
||||
|
||||
if (write_start_loc_ == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// TODO: it should never throw
|
||||
try {
|
||||
if (is_writable()) {
|
||||
if (auto error = channel_.send(buf_, write_start_loc_)) {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...) {
|
||||
return std::make_error_code(std::errc::invalid_argument);
|
||||
}
|
||||
|
||||
write_start_loc_ = write_finish_loc_ = 0;
|
||||
return {};
|
||||
}
|
||||
|
||||
void BufferedWriter::reset()
|
||||
{
|
||||
write_start_loc_ = write_finish_loc_ = 0;
|
||||
}
|
||||
|
||||
u32 BufferedWriter::buf_size() const
|
||||
{
|
||||
return buf_size_;
|
||||
}
|
||||
|
||||
bool BufferedWriter::is_writable() const {
|
||||
return channel_.is_open();
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/ibuffered_writer.h>
|
||||
#include <platform/platform.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class Channel;
|
||||
|
||||
/**
|
||||
* A class that enables writing through a buffer so send() calls don't have to
|
||||
* happen for every message. When buffers are exhausted, they are sent into
|
||||
* the given Channel.
|
||||
*/
|
||||
class BufferedWriter : public ::IBufferedWriter {
|
||||
public:
|
||||
/**
|
||||
* c'tor
|
||||
* throws if buff_ can't be malloc-ed
|
||||
* @param channel: the channel on which to send messages
|
||||
* @param buffsize: how many bytes used to batch the sent messages
|
||||
*/
|
||||
BufferedWriter(Channel &channel, u32 buf_size);
|
||||
|
||||
/**
|
||||
* d'tor
|
||||
*/
|
||||
virtual ~BufferedWriter();
|
||||
|
||||
/**
|
||||
* batches as many entries into buffer as possible before calling
|
||||
* send_buffer(). always flushes the buffer at the end.
|
||||
* @see PerfPoller::poll
|
||||
*
|
||||
* @returns: on success, where caller should write the data. nullptr when
|
||||
* the requested length is larger than buf_size_, or if call to
|
||||
* flush() fails.
|
||||
*/
|
||||
Expected<u8 *, std::error_code> start_write(u32 length) override;
|
||||
|
||||
/**
|
||||
* Finishes the current write
|
||||
*/
|
||||
void finish_write() override;
|
||||
|
||||
/**
|
||||
* Flushes the buffer to the channel if it's non-empty.
|
||||
*
|
||||
* @return -EINVAL if a send() fails, 0 if successful
|
||||
*/
|
||||
std::error_code flush() override;
|
||||
|
||||
/**
|
||||
* Abandons the current buffered data
|
||||
*/
|
||||
void reset();
|
||||
|
||||
/**
|
||||
* Returns the buffer size
|
||||
*/
|
||||
u32 buf_size() const override;
|
||||
|
||||
bool is_writable() const override;
|
||||
|
||||
private:
|
||||
u8 *buf_;
|
||||
const u32 buf_size_;
|
||||
|
||||
/* where next or active write starts */
|
||||
u32 write_start_loc_;
|
||||
/* where active write will finish */
|
||||
u32 write_finish_loc_;
|
||||
|
||||
Channel &channel_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/buffered_writer.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <channel/mock_channel.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace {
|
||||
using ::testing::_;
|
||||
using ::testing::NotNull;
|
||||
using ::testing::IsFalse;
|
||||
using ::testing::IsTrue;
|
||||
using ::testing::Test;
|
||||
using ::testing::Return;
|
||||
|
||||
static constexpr u32 default_buffer_size = 32;
|
||||
|
||||
class BufferedWriterTest : public Test {
|
||||
protected:
|
||||
void SetUp() override
|
||||
{
|
||||
writer_.reset(new channel::BufferedWriter(mock_channel_, default_buffer_size));
|
||||
}
|
||||
|
||||
::channel::MockChannel mock_channel_;
|
||||
std::unique_ptr<channel::BufferedWriter> writer_;
|
||||
};
|
||||
|
||||
TEST_F(BufferedWriterTest, empty_writer)
|
||||
{
|
||||
EXPECT_CALL(mock_channel_, send(_, _)).Times(0);
|
||||
|
||||
EXPECT_EQ(writer_->buf_size(), default_buffer_size);
|
||||
}
|
||||
|
||||
TEST_F(BufferedWriterTest, one_flush)
|
||||
{
|
||||
ON_CALL(mock_channel_, is_open()).WillByDefault(Return(true));
|
||||
EXPECT_CALL(mock_channel_, send(_, 24)).Times(1);
|
||||
|
||||
EXPECT_THAT(writer_->start_write(24), IsTrue());
|
||||
EXPECT_THAT(*writer_->start_write(24), NotNull());
|
||||
writer_->finish_write();
|
||||
|
||||
EXPECT_THAT(writer_->flush(), IsFalse());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <platform/types.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class Callbacks {
|
||||
public:
|
||||
/**
|
||||
* virtual d'tor
|
||||
*/
|
||||
virtual ~Callbacks() {}
|
||||
|
||||
/**
|
||||
* Callback with ready data.
|
||||
*
|
||||
* The default implementation ignores received data.
|
||||
*
|
||||
* @returns how many bytes were consumed
|
||||
*/
|
||||
virtual u32 received_data(u8 const *data, int length) { return length; }
|
||||
|
||||
u32 received_data(std::basic_string_view<u8> data) {
|
||||
return received_data(data.data(), data.size());
|
||||
}
|
||||
|
||||
u32 received_data(std::string_view data) {
|
||||
return received_data(reinterpret_cast<u8 const *>(data.data()), data.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* An error occurred on the channel, or -ENOLINK on EOF
|
||||
*/
|
||||
virtual void on_error(int error) {}
|
||||
|
||||
/**
|
||||
* The link finished closing
|
||||
*/
|
||||
virtual void on_closed() {}
|
||||
|
||||
/**
|
||||
* Connected
|
||||
*/
|
||||
virtual void on_connect() {}
|
||||
};
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <platform/types.h>
|
||||
|
||||
#include <string_view>
|
||||
#include <system_error>
|
||||
|
||||
namespace channel {
|
||||
|
||||
/**
|
||||
* An interface that allows reading and writing data to a pipe/socket/etc
|
||||
*/
|
||||
class Channel {
|
||||
public:
|
||||
/**
|
||||
* Virtual d'tor
|
||||
*/
|
||||
virtual ~Channel() {}
|
||||
|
||||
/**
|
||||
* Sends data onto the channel.
|
||||
*/
|
||||
virtual std::error_code send(const u8 *data, int data_len) = 0;
|
||||
|
||||
inline std::error_code send(std::basic_string_view<u8> data) {
|
||||
return send(data.data(), data.size());
|
||||
}
|
||||
|
||||
inline std::error_code send(std::string_view data) {
|
||||
return send(reinterpret_cast<u8 const *>(data.data()), data.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes any internal buffers.
|
||||
*/
|
||||
virtual std::error_code flush() { return {}; }
|
||||
|
||||
/**
|
||||
* Closes the channel.
|
||||
*/
|
||||
virtual void close() {}
|
||||
|
||||
virtual bool is_open() const = 0;
|
||||
};
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/enum.h>
|
||||
|
||||
#define ENUM_NAMESPACE channel
|
||||
#define ENUM_NAME Component
|
||||
#define ENUM_TYPE std::uint8_t
|
||||
#define ENUM_ELEMENTS(X) \
|
||||
X(none, 0) \
|
||||
X(tls, 1) \
|
||||
X(reconnecting_channel, 2) \
|
||||
X(tcp, 3) \
|
||||
X(upstream, 4)
|
||||
#define ENUM_DEFAULT none
|
||||
#include <util/enum_operators.inl>
|
||||
|
|
@ -0,0 +1,299 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/connection_caretaker.h>
|
||||
|
||||
#include <collector/component.h>
|
||||
#include <common/cloud_platform.h>
|
||||
#include <common/constants.h>
|
||||
#include <util/boot_time.h>
|
||||
#include <util/log.h>
|
||||
#include <util/log_formatters.h>
|
||||
|
||||
#include <absl/strings/match.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
namespace {
|
||||
|
||||
void heartbeat_timer_cb(uv_timer_t *timer)
|
||||
{
|
||||
auto *caretaker = (ConnectionCaretaker *)(timer->data);
|
||||
caretaker->send_heartbeat();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ConnectionCaretaker::ConnectionCaretaker(
|
||||
std::string_view hostname, ClientType client_type,
|
||||
AuthzFetcher &authz_fetcher,
|
||||
config::ConfigFile::LabelsMap const &config_data,
|
||||
uv_loop_t *loop, flowmill::ingest::Writer &writer,
|
||||
std::chrono::milliseconds metadata_timeout,
|
||||
std::chrono::milliseconds heartbeat_interval, std::function<void()> flush_cb,
|
||||
std::function<void(bool)> set_compression_cb,
|
||||
std::function<void()> on_authenticated_cb)
|
||||
: hostname_(hostname),
|
||||
client_type_(client_type),
|
||||
authz_fetcher_(authz_fetcher),
|
||||
config_data_(config_data),
|
||||
loop_(loop),
|
||||
heartbeat_interval_(heartbeat_interval),
|
||||
flush_cb_(std::move(flush_cb)),
|
||||
set_compression_cb_(std::move(set_compression_cb)),
|
||||
on_authenticated_cb_(std::move(on_authenticated_cb)),
|
||||
writer_(writer)
|
||||
{
|
||||
assert(loop != nullptr);
|
||||
assert(heartbeat_interval_.count() > 0);
|
||||
|
||||
LOG::trace_in(CloudPlatform::aws, "--- resolving AWS metadata ---");
|
||||
if (auto aws_metadata = AwsMetadata::fetch(metadata_timeout)) {
|
||||
aws_metadata_.emplace(std::move(aws_metadata.value()));
|
||||
aws_metadata_->print_instance_metadata();
|
||||
aws_metadata_->print_interfaces();
|
||||
} else {
|
||||
LOG::warn("Unable to fetch AWS metadata: {}", aws_metadata.error());
|
||||
}
|
||||
|
||||
LOG::trace_in(CloudPlatform::gcp, "--- resolving GCP metadata ---");
|
||||
if (auto gcp_metadata = GcpInstanceMetadata::fetch(metadata_timeout)) {
|
||||
gcp_metadata_.emplace(std::move(gcp_metadata.value()));
|
||||
gcp_metadata_->print();
|
||||
} else {
|
||||
LOG::warn("Unable to fetch GCP metadata: {}", gcp_metadata.error());
|
||||
}
|
||||
|
||||
int res = uv_timer_init(loop_, &heartbeat_timer_);
|
||||
if (res != 0) {
|
||||
throw std::runtime_error("Cannot init heartbeat_timer");
|
||||
}
|
||||
heartbeat_timer_.data = this;
|
||||
|
||||
authz_fetcher_.auto_refresh(*loop);
|
||||
}
|
||||
|
||||
ConnectionCaretaker::~ConnectionCaretaker()
|
||||
{
|
||||
stop_heartbeat();
|
||||
uv_close((uv_handle_t *)&heartbeat_timer_, NULL);
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::send_metadata_header()
|
||||
{
|
||||
set_compression_cb_(false);
|
||||
LOG::info("initiating authentication of {} collector version {}",
|
||||
client_type_, versions::release);
|
||||
writer_.version_info(
|
||||
versions::release.major(), versions::release.minor(), versions::release.build());
|
||||
flush();
|
||||
set_compression_cb_(true);
|
||||
|
||||
auto const &token = *authz_fetcher_.token();
|
||||
LOG::info(
|
||||
"sending authz token with {}s left until expiration (iat={}s exp={}s)",
|
||||
token.time_left<std::chrono::seconds>(std::chrono::system_clock::now())
|
||||
.count(),
|
||||
token.issued_at<std::chrono::seconds>().count(),
|
||||
token.expiration<std::chrono::seconds>().count());
|
||||
writer_.authz_authenticate(jb_blob(token.payload()),
|
||||
static_cast<u8>(client_type_),
|
||||
jb_blob(hostname_));
|
||||
|
||||
writer_.report_cpu_cores(std::thread::hardware_concurrency());
|
||||
|
||||
flush();
|
||||
|
||||
# define make_buf_from_field(struct_name, field, buf_name) \
|
||||
struct struct_name __##struct_name##__##buf_name; \
|
||||
char buf_name[sizeof(__##struct_name##__##buf_name.field)] = {};
|
||||
|
||||
for (auto const &label : config_data_) {
|
||||
writer_.set_config_label(jb_blob{label.first}, jb_blob{label.second});
|
||||
}
|
||||
flush();
|
||||
|
||||
if (aws_metadata_) {
|
||||
writer_.cloud_platform(static_cast<u16>(CloudPlatform::aws));
|
||||
if (auto const &account_id = aws_metadata_->account_id()) {
|
||||
LOG::trace_in(
|
||||
std::make_tuple(CloudPlatform::aws, collector::Component::auth),
|
||||
"reporting aws account id: {}", account_id.value()
|
||||
);
|
||||
writer_.cloud_platform_account_info(jb_blob{account_id.value()});
|
||||
} else {
|
||||
LOG::trace_in(
|
||||
std::make_tuple(CloudPlatform::aws, collector::Component::auth),
|
||||
"no aws account id to report"
|
||||
);
|
||||
}
|
||||
|
||||
auto id = aws_metadata_->id().value();
|
||||
if (absl::StartsWith(id, std::string_view("i-"))) {
|
||||
id.remove_prefix(2);
|
||||
}
|
||||
|
||||
writer_.set_node_info(
|
||||
jb_blob{aws_metadata_->az().value()},
|
||||
jb_blob{aws_metadata_->iam_role().value()},
|
||||
jb_blob{id},
|
||||
jb_blob{aws_metadata_->type().value()}
|
||||
);
|
||||
flush();
|
||||
|
||||
for (auto const &interface: aws_metadata_->network_interfaces()) {
|
||||
for (auto const &ipv4: interface.private_ipv4s()) {
|
||||
struct sockaddr_in private_sa;
|
||||
int res = inet_pton(AF_INET, ipv4.c_str(), &(private_sa.sin_addr));
|
||||
if (res != 1) {
|
||||
continue;
|
||||
}
|
||||
make_buf_from_field(jb_ingest__private_ipv4_addr, vpc_id, vpc_id_buf);
|
||||
strncpy(vpc_id_buf, interface.vpc_id().c_str(), sizeof(vpc_id_buf));
|
||||
writer_.private_ipv4_addr(private_sa.sin_addr.s_addr, (u8 *)vpc_id_buf);
|
||||
}
|
||||
|
||||
for (auto const &ipv6: interface.ipv6s()) {
|
||||
struct sockaddr_in6 sa;
|
||||
int res = inet_pton(AF_INET6, ipv6.c_str(), &(sa.sin6_addr));
|
||||
if (res != 1) {
|
||||
continue;
|
||||
}
|
||||
make_buf_from_field(jb_ingest__ipv6_addr, vpc_id, vpc_id_buf);
|
||||
strncpy(vpc_id_buf, interface.vpc_id().c_str(), sizeof(vpc_id_buf));
|
||||
writer_.ipv6_addr(sa.sin6_addr.s6_addr, (u8 *)vpc_id_buf);
|
||||
}
|
||||
|
||||
for (auto const &mapped_ipv4: interface.mapped_ipv4s()) {
|
||||
struct sockaddr_in public_sa;
|
||||
int res =
|
||||
inet_pton(AF_INET, mapped_ipv4.first.c_str(), &(public_sa.sin_addr));
|
||||
if (res != 1) {
|
||||
continue;
|
||||
}
|
||||
struct sockaddr_in private_sa;
|
||||
res = inet_pton(AF_INET, mapped_ipv4.second.c_str(),
|
||||
&(private_sa.sin_addr));
|
||||
if (res != 1) {
|
||||
continue;
|
||||
}
|
||||
make_buf_from_field(jb_ingest__public_to_private_ipv4, vpc_id,
|
||||
vpc_id_buf);
|
||||
strncpy(vpc_id_buf, interface.vpc_id().c_str(), sizeof(vpc_id_buf));
|
||||
writer_.public_to_private_ipv4(public_sa.sin_addr.s_addr,
|
||||
private_sa.sin_addr.s_addr,
|
||||
(u8 *)vpc_id_buf);
|
||||
}
|
||||
}
|
||||
} else if (gcp_metadata_) {
|
||||
writer_.cloud_platform(static_cast<u16>(CloudPlatform::gcp));
|
||||
// TODO: obtain account_id for GCP and uncomment below
|
||||
// writer_.cloud_platform_account_info(jb_blob{account_id});
|
||||
|
||||
writer_.set_node_info(
|
||||
jb_blob{gcp_metadata_->az()},
|
||||
jb_blob{gcp_metadata_->role()},
|
||||
jb_blob{gcp_metadata_->hostname()},
|
||||
jb_blob{gcp_metadata_->type()}
|
||||
);
|
||||
flush();
|
||||
|
||||
for (auto const &interface: gcp_metadata_->network_interfaces()) {
|
||||
if (auto const ipv4 = interface.ipv4()) {
|
||||
make_buf_from_field(jb_ingest__private_ipv4_addr, vpc_id, vpc_id_buf);
|
||||
strncpy(vpc_id_buf, interface.vpc_id().c_str(), sizeof(vpc_id_buf));
|
||||
writer_.private_ipv4_addr(ipv4->as_int(), (u8 *)vpc_id_buf);
|
||||
|
||||
for (auto const &public_ip: interface.public_ips()) {
|
||||
make_buf_from_field(jb_ingest__public_to_private_ipv4, vpc_id, vpc_id_buf);
|
||||
strncpy(vpc_id_buf, interface.vpc_id().c_str(), sizeof(vpc_id_buf));
|
||||
writer_.public_to_private_ipv4(public_ip.as_int(), ipv4->as_int(), (u8 *)vpc_id_buf);
|
||||
}
|
||||
} else if (auto const ipv6 = interface.ipv6()) {
|
||||
uint8_t ipv6_buffer[16];
|
||||
ipv6->write_to(ipv6_buffer);
|
||||
make_buf_from_field(jb_ingest__ipv6_addr, vpc_id, vpc_id_buf);
|
||||
strncpy(vpc_id_buf, interface.vpc_id().c_str(), sizeof(vpc_id_buf));
|
||||
writer_.ipv6_addr(ipv6_buffer, (u8 *)vpc_id_buf);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writer_.cloud_platform(static_cast<u16>(CloudPlatform::unknown));
|
||||
|
||||
writer_.set_node_info(
|
||||
jb_blob{/* az */},
|
||||
jb_blob{/* role */},
|
||||
jb_blob{hostname_},
|
||||
jb_blob{/* instance_type */}
|
||||
);
|
||||
|
||||
// no network interface data (public/private ip) to send
|
||||
}
|
||||
|
||||
writer_.metadata_complete(0);
|
||||
|
||||
flush();
|
||||
# undef make_buf_from_field
|
||||
|
||||
on_authenticated_cb_();
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::refresh_authz_token()
|
||||
{
|
||||
authz_fetcher_.sync_refresh();
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::flush()
|
||||
{
|
||||
flush_cb_();
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::start_heartbeat() {
|
||||
int res = uv_timer_start(&heartbeat_timer_, heartbeat_timer_cb,
|
||||
heartbeat_interval_.count(), heartbeat_interval_.count());
|
||||
|
||||
if (res != 0) {
|
||||
LOG::error("Cannot start heartbeat_timer: {}", uv_err_name(res));
|
||||
}
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::stop_heartbeat() {
|
||||
uv_timer_stop(&heartbeat_timer_);
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::send_heartbeat()
|
||||
{
|
||||
LOG::debug("sending heartbeat for {} collector", client_type_);
|
||||
if (writer_.is_writable()) {
|
||||
writer_.heartbeat();
|
||||
flush();
|
||||
}
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::set_connected()
|
||||
{
|
||||
LOG::info("collector {} connected to host", hostname_);
|
||||
send_metadata_header();
|
||||
start_heartbeat();
|
||||
}
|
||||
|
||||
void ConnectionCaretaker::set_disconnected()
|
||||
{
|
||||
LOG::info("collector {} disconnected from host", hostname_);
|
||||
stop_heartbeat();
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,113 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/channel.h>
|
||||
#include <common/client_type.h>
|
||||
#include <config/config_file.h>
|
||||
#include <generated/flowmill/ingest/writer.h>
|
||||
#include <util/authz_fetcher.h>
|
||||
#include <util/aws_instance_metadata.h>
|
||||
#include <util/curl_engine.h>
|
||||
#include <util/gcp_instance_metadata.h>
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
|
||||
namespace channel {
|
||||
|
||||
// ConnectionCaretaker handles common tasks of agent->server connection.
|
||||
//
|
||||
// Current implementation does followings:
|
||||
// 1. Sends back initial metadata, including agent version, api key &
|
||||
// configuration labels.
|
||||
// 2. Sends back heartbeat signal to server periodically.
|
||||
//
|
||||
// This class is NOT thread-safe.
|
||||
class ConnectionCaretaker {
|
||||
public:
|
||||
using config_data_map = std::unordered_map<std::string, std::string>;
|
||||
|
||||
// Constructor
|
||||
//
|
||||
// |config_data|: Configuration labels, read from a yaml file.
|
||||
// |loop|: Libuv event loop.
|
||||
// |channel|: Underline channel connecting agent and server.
|
||||
// |heartbeat_interval|: How often a heartbeat signal is sent back to server.
|
||||
// |flush_cb|: Callback to flush any downstream buffer.
|
||||
ConnectionCaretaker(std::string_view hostname, ClientType client_type,
|
||||
AuthzFetcher &authz_fetcher,
|
||||
config::ConfigFile::LabelsMap const &config_data,
|
||||
uv_loop_t *loop, flowmill::ingest::Writer &writer,
|
||||
std::chrono::milliseconds metadata_timeout,
|
||||
std::chrono::milliseconds heartbeat_interval,
|
||||
std::function<void()> flush_cb,
|
||||
std::function<void(bool)> set_compression_cb,
|
||||
std::function<void()> on_authenticated_cb);
|
||||
|
||||
~ConnectionCaretaker();
|
||||
|
||||
// Note, this function triggers metadata to be sent back, and starts
|
||||
// heartbeat signal.
|
||||
void set_connected();
|
||||
|
||||
// Note, this function stops heartbeat timer implicitly
|
||||
void set_disconnected();
|
||||
|
||||
// Sends one heartbeat. It is public so that timer callback can use it.
|
||||
void send_heartbeat();
|
||||
|
||||
// Forces a synchronous refresh of the authz token if it's expired or due to
|
||||
// expire within the notice period.
|
||||
void refresh_authz_token();
|
||||
|
||||
private:
|
||||
// Sends following information:
|
||||
// agent verstion, api_key (including tenant) and any config labels.
|
||||
// TODO: Send agent type as well.
|
||||
void send_metadata_header();
|
||||
void start_heartbeat();
|
||||
void stop_heartbeat();
|
||||
|
||||
void flush();
|
||||
|
||||
std::string_view const hostname_;
|
||||
ClientType const client_type_;
|
||||
|
||||
AuthzFetcher &authz_fetcher_;
|
||||
const config::ConfigFile::LabelsMap config_data_;
|
||||
|
||||
uv_loop_t *loop_ = nullptr; // not owned
|
||||
|
||||
std::optional<AwsMetadata> aws_metadata_;
|
||||
std::optional<GcpInstanceMetadata> gcp_metadata_;
|
||||
|
||||
const std::chrono::milliseconds heartbeat_interval_;
|
||||
|
||||
std::function<void()> flush_cb_;
|
||||
std::function<void(bool)> set_compression_cb_;
|
||||
std::function<void()> on_authenticated_cb_;
|
||||
|
||||
uv_timer_t heartbeat_timer_;
|
||||
|
||||
flowmill::ingest::Writer &writer_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/double_write_channel.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
DoubleWriteChannel::DoubleWriteChannel(Channel &first, Channel &second):
|
||||
first_(first),
|
||||
second_(second)
|
||||
{}
|
||||
|
||||
std::error_code DoubleWriteChannel::send(const u8 *data, int size) {
|
||||
if (auto error = first_.send(data, size)) {
|
||||
return error;
|
||||
}
|
||||
|
||||
if (second_.is_open()) {
|
||||
if (auto error = second_.send(data, size)) {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void DoubleWriteChannel::close() {
|
||||
first_.close();
|
||||
second_.close();
|
||||
}
|
||||
|
||||
std::error_code DoubleWriteChannel::flush() {
|
||||
if (auto error = first_.flush()) {
|
||||
return error;
|
||||
}
|
||||
|
||||
if (second_.is_open()) {
|
||||
if (auto error = second_.flush()) {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/channel.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class DoubleWriteChannel: public Channel {
|
||||
public:
|
||||
DoubleWriteChannel(Channel &first, Channel &second);
|
||||
|
||||
std::error_code send(const u8 *data, int size) override;
|
||||
|
||||
void close() override;
|
||||
std::error_code flush() override;
|
||||
|
||||
bool is_open() const override {
|
||||
return first_.is_open() && second_.is_open();
|
||||
}
|
||||
|
||||
private:
|
||||
Channel &first_;
|
||||
Channel &second_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/file_channel.h>
|
||||
|
||||
#include <util/log.h>
|
||||
#include <util/log_formatters.h>
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include <cassert>
|
||||
|
||||
namespace channel {
|
||||
|
||||
FileChannel::FileChannel(FileDescriptor fd):
|
||||
fd_(std::move(fd))
|
||||
{}
|
||||
|
||||
std::error_code FileChannel::send(const u8 *data, int size) {
|
||||
std::string_view const buffer{
|
||||
reinterpret_cast<char const *>(data),
|
||||
static_cast<std::string_view::size_type>(size)
|
||||
};
|
||||
|
||||
if (auto const error = fd_.write_all(buffer)) {
|
||||
LOG::error("error while writing {} bytes into file channel: {}", size, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void FileChannel::close() {
|
||||
fd_.close();
|
||||
}
|
||||
|
||||
std::error_code FileChannel::flush() {
|
||||
auto const error = fd_.flush_data();
|
||||
if (error) {
|
||||
LOG::error("error while flushing data for file channel: {}", error);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/channel.h>
|
||||
#include <util/file_ops.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class FileChannel: public Channel {
|
||||
public:
|
||||
FileChannel(FileDescriptor fd);
|
||||
|
||||
std::error_code send(const u8 *data, int size) override;
|
||||
|
||||
void close() override;
|
||||
std::error_code flush() override;
|
||||
|
||||
bool valid() const { return fd_.valid(); }
|
||||
|
||||
explicit operator bool() const { return valid(); }
|
||||
bool operator !() const { return !valid(); }
|
||||
|
||||
bool is_open() const override { return fd_.valid(); }
|
||||
|
||||
private:
|
||||
FileDescriptor fd_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <platform/platform.h>
|
||||
#include <util/expected.h>
|
||||
|
||||
#include <system_error>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
// Interface for classes that write through a buffer.
|
||||
//
|
||||
class IBufferedWriter {
|
||||
public:
|
||||
virtual ~IBufferedWriter() {}
|
||||
|
||||
// Starts a new write.
|
||||
//
|
||||
// Returns the memory where caller should write the data, or nullptr in case
|
||||
// of an error.
|
||||
//
|
||||
virtual Expected<u8 *, std::error_code> start_write(u32 length) = 0;
|
||||
|
||||
// Finishes the current write.
|
||||
//
|
||||
virtual void finish_write() = 0;
|
||||
|
||||
// Writes the given payload in smaller batches to fit the internal buffer
|
||||
Expected<bool, std::error_code> write_as_chunks(std::string_view payload) {
|
||||
for (auto const max = buf_size(); !payload.empty(); ) {
|
||||
auto const size = max <= payload.size() ? max : static_cast<u32>(payload.size());
|
||||
|
||||
auto const allocated = start_write(size);
|
||||
if (!allocated) {
|
||||
return {unexpected, allocated.error()};
|
||||
}
|
||||
|
||||
memcpy(*allocated, payload.data(), size);
|
||||
finish_write();
|
||||
payload.remove_prefix(size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Flushes the buffer.
|
||||
//
|
||||
virtual std::error_code flush() = 0;
|
||||
|
||||
// Returns the buffer size.
|
||||
//
|
||||
virtual u32 buf_size() const = 0;
|
||||
|
||||
virtual bool is_writable() const = 0;
|
||||
};
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/internal/certificate.h>
|
||||
#include <channel/tls_error.h>
|
||||
#include <util/log.h>
|
||||
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/pem.h>
|
||||
|
||||
channel::internal::Certificate::Certificate(const std::string &cert)
|
||||
{
|
||||
BIO *bio = BIO_new_mem_buf(cert.c_str(), cert.length());
|
||||
if (bio == nullptr) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not allocate BIO for TLS cert: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
cert_ = PEM_read_bio_X509(bio, nullptr, nullptr, nullptr);
|
||||
|
||||
/* get error, so it doesn't get overriden by BIO_free */
|
||||
int err = ERR_get_error();
|
||||
/* free the bio. we need to free it regardless of success in reading */
|
||||
BIO_free(bio);
|
||||
|
||||
/* now check success. */
|
||||
if (cert_ == nullptr) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not read TLS cert: {}", TLSError(err))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
channel::internal::Certificate::~Certificate()
|
||||
{
|
||||
X509_free(cert_);
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <openssl/x509.h>
|
||||
#include <string>
|
||||
|
||||
namespace channel {
|
||||
namespace internal {
|
||||
|
||||
class Certificate {
|
||||
public:
|
||||
/**
|
||||
* C'tor
|
||||
* @param key: the certificate in text
|
||||
*/
|
||||
Certificate(const std::string &cert);
|
||||
|
||||
/**
|
||||
* D'tor
|
||||
*/
|
||||
~Certificate();
|
||||
|
||||
/**
|
||||
* Accessor.
|
||||
*/
|
||||
X509 *get() { return cert_; }
|
||||
|
||||
private:
|
||||
X509 *cert_;
|
||||
};
|
||||
|
||||
} /* namespace internal */
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/internal/private_key.h>
|
||||
#include <channel/tls_error.h>
|
||||
#include <util/log.h>
|
||||
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/pem.h>
|
||||
|
||||
channel::internal::PrivateKey::PrivateKey(const std::string &key)
|
||||
{
|
||||
BIO *bio = BIO_new_mem_buf(key.c_str(), key.length());
|
||||
if (bio == nullptr) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not allocate BIO for TLS private key: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
pkey_ = PEM_read_bio_PrivateKey(bio, nullptr, nullptr, nullptr);
|
||||
|
||||
/* get error, so it doesn't get overriden by BIO_free */
|
||||
int err = ERR_get_error();
|
||||
/* free the bio. we need to free it regardless of success in reading */
|
||||
BIO_free(bio);
|
||||
|
||||
/* now check success. */
|
||||
if (pkey_ == nullptr) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not read TLS private key: {}", TLSError(err))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
channel::internal::PrivateKey::~PrivateKey()
|
||||
{
|
||||
EVP_PKEY_free(pkey_);
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <openssl/evp.h>
|
||||
#include <string>
|
||||
|
||||
namespace channel {
|
||||
namespace internal {
|
||||
|
||||
class PrivateKey {
|
||||
public:
|
||||
/**
|
||||
* C'tor
|
||||
* @param key: the RSA private key in text
|
||||
*/
|
||||
PrivateKey(const std::string &key);
|
||||
|
||||
/**
|
||||
* D'tor
|
||||
*/
|
||||
~PrivateKey();
|
||||
|
||||
/**
|
||||
* Accessor.
|
||||
*/
|
||||
EVP_PKEY *get() { return pkey_; }
|
||||
|
||||
private:
|
||||
EVP_PKEY *pkey_;
|
||||
};
|
||||
|
||||
} /* namespace internal */
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/internal/ssl_context.h>
|
||||
#include <channel/tls_error.h>
|
||||
#include <exception>
|
||||
#include <openssl/err.h>
|
||||
|
||||
#include <util/log.h>
|
||||
|
||||
static const char *CIPHER_LIST = "ECDHE-RSA-AES256-GCM-SHA384";
|
||||
|
||||
channel::internal::SSLContext::SSLContext(const std::string &key,
|
||||
const std::string &cert)
|
||||
{
|
||||
if (!key.empty()) { private_key_.emplace(key); }
|
||||
if (!cert.empty()) { client_cert_.emplace(cert); }
|
||||
|
||||
const SSL_METHOD *method = TLS_client_method();
|
||||
if (method == NULL) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not initialize TLS method: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
ctx_ = SSL_CTX_new(method);
|
||||
if (ctx_ == nullptr) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not make new TLS context: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
/**
|
||||
* - remove compression
|
||||
* - remove renegotiation so calls to SSL_write will fail only if
|
||||
* buffer space is required (and not because multiple round trips
|
||||
* are required to renegotiate. this simlplifies state machine
|
||||
*/
|
||||
long old_opts = SSL_CTX_set_options(ctx_, SSL_OP_NO_COMPRESSION |
|
||||
SSL_OP_NO_RENEGOTIATION);
|
||||
(void)(old_opts); /* unused */
|
||||
|
||||
/* we only support TLS1.2 and above */
|
||||
int res = SSL_CTX_set_min_proto_version(ctx_, TLS1_2_VERSION);
|
||||
if (res != 1) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not set minimum TLS version: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
res = SSL_CTX_set_cipher_list(ctx_, CIPHER_LIST);
|
||||
if (res != 1) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not select TLS cipher: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Use CA certificates defined from the host
|
||||
* This needs to be `SSL_CTX_set_default_verify_paths`
|
||||
* because `SSL_CTX_set_default_verify_dir` was not picking up
|
||||
* the SSL_CERT_DIR or SSL_CERT_FILE environment variables,
|
||||
* which are important for configuring OpenSSL across different distros
|
||||
*
|
||||
* For more info see:
|
||||
* https://www.happyassassin.net/2015/01/12/a-note-about-ssltls-trusted-certificate-stores-and-platforms/
|
||||
*/
|
||||
res = SSL_CTX_set_default_verify_paths(ctx_);
|
||||
if (res != 1) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not set host certificate authorities", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Client certificates
|
||||
*/
|
||||
if (client_cert_.has_value()) {
|
||||
LOG::debug("using a client certificate");
|
||||
res = SSL_CTX_use_certificate(ctx_, client_cert_->get());
|
||||
if (res != 1) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not configure TLS client cert: {}", TLSError())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (private_key_.has_value()) {
|
||||
LOG::debug("using a client private key");
|
||||
res = SSL_CTX_use_PrivateKey(ctx_, private_key_->get());
|
||||
if (res != 1) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not configure TLS private key: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
res = SSL_CTX_check_private_key(ctx_);
|
||||
if (res != 1) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("TLS private key check failed: {}", TLSError())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
long old_mode =
|
||||
SSL_CTX_set_mode(ctx_, SSL_MODE_ENABLE_PARTIAL_WRITE |
|
||||
SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
|
||||
(void)old_mode; /* unused */
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
SSL_CTX_free(ctx_);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
channel::internal::SSLContext::~SSLContext()
|
||||
{
|
||||
SSL_CTX_free(ctx_);
|
||||
}
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/internal/certificate.h>
|
||||
#include <channel/internal/private_key.h>
|
||||
#include <memory>
|
||||
#include <openssl/ssl.h>
|
||||
#include <string>
|
||||
#include <optional>
|
||||
|
||||
namespace channel {
|
||||
namespace internal {
|
||||
|
||||
class SSLContext {
|
||||
public:
|
||||
/**
|
||||
* C'tor
|
||||
* @param key: the client (private) key in PEM
|
||||
* @param cert: the client certificate, in PEM
|
||||
*/
|
||||
SSLContext(const std::string &key, const std::string &cert);
|
||||
|
||||
/**
|
||||
* D'tor
|
||||
*/
|
||||
~SSLContext();
|
||||
|
||||
/**
|
||||
* Accessor.
|
||||
*/
|
||||
SSL_CTX *get() { return ctx_; }
|
||||
|
||||
private:
|
||||
std::optional<channel::internal::PrivateKey> private_key_;
|
||||
std::optional<channel::internal::Certificate> client_cert_;
|
||||
SSL_CTX *ctx_;
|
||||
};
|
||||
|
||||
} /* namespace internal */
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/internal/ssl_context.h>
|
||||
#include <channel/internal/tls_shim.h>
|
||||
#include <channel/tls_error.h>
|
||||
#include <util/log.h>
|
||||
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/ssl.h>
|
||||
|
||||
channel::internal::TLSShim::TLSShim(SSLContext &ctx)
|
||||
: ssl_(nullptr), transport_bio_(nullptr)
|
||||
{
|
||||
ssl_ = SSL_new(ctx.get());
|
||||
if (ssl_ == nullptr) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not instantiate TLS object: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
BIO *ssl_obj_bio = nullptr;
|
||||
int res = BIO_new_bio_pair(&transport_bio_, 0, &ssl_obj_bio, 0);
|
||||
if (res != 1) {
|
||||
SSL_free(ssl_);
|
||||
ssl_ = nullptr;
|
||||
throw std::runtime_error(
|
||||
fmt::format("could not allocate TLS bio pair: {}", TLSError())
|
||||
);
|
||||
}
|
||||
|
||||
/* we will pass two references of ssl_obj_bio_ to ssl_: read and write */
|
||||
BIO_up_ref(ssl_obj_bio);
|
||||
SSL_set0_rbio(ssl_, ssl_obj_bio);
|
||||
SSL_set0_wbio(ssl_, ssl_obj_bio);
|
||||
/* from this point onwards, ssl_ will free ssl_obj_bio_ */
|
||||
}
|
||||
|
||||
channel::internal::TLSShim::~TLSShim()
|
||||
{
|
||||
/* free ssl_, this will also free ssl_obj_bio */
|
||||
SSL_free(ssl_);
|
||||
ssl_ = nullptr;
|
||||
|
||||
/* free transport_bio_ */
|
||||
BIO_free(transport_bio_);
|
||||
transport_bio_ = nullptr;
|
||||
}
|
||||
|
||||
bool channel::internal::TLSShim::is_closed()
|
||||
{
|
||||
return (SSL_is_init_finished(ssl_) == 0 || SSL_get_shutdown(ssl_) != 0);
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/ssl.h>
|
||||
#include <string>
|
||||
|
||||
namespace channel {
|
||||
namespace internal {
|
||||
|
||||
class SSLContext;
|
||||
|
||||
/**
|
||||
* A class that maintains the objects for an OpenSSL encrypting shim (gets TLS
|
||||
* data from TCP, decrypts and passes to the application, and back)
|
||||
*/
|
||||
class TLSShim {
|
||||
public:
|
||||
/**
|
||||
* C'tor
|
||||
* @param key: the client (private) key in PEM
|
||||
* @param cert: the client certificate, in PEM
|
||||
*/
|
||||
TLSShim(SSLContext &ctx);
|
||||
|
||||
/**
|
||||
* D'tor
|
||||
*/
|
||||
~TLSShim();
|
||||
|
||||
/**
|
||||
* @returns true if the TLS connection is shutdown, false otherwise
|
||||
*/
|
||||
bool is_closed();
|
||||
|
||||
/**
|
||||
* Get SSL object
|
||||
*/
|
||||
SSL *get_SSL() { return ssl_; }
|
||||
|
||||
/**
|
||||
* Get tcp-facing BIO
|
||||
*/
|
||||
BIO *get_transport_bio() { return transport_bio_; }
|
||||
|
||||
private:
|
||||
SSL *ssl_;
|
||||
BIO *transport_bio_;
|
||||
};
|
||||
|
||||
} /* namespace internal */
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include "channel/lz4_channel.h"
|
||||
#include <stdexcept>
|
||||
|
||||
namespace channel {
|
||||
|
||||
Lz4Channel::Lz4Channel(Channel &channel, u32 max_data_length):
|
||||
compression_enabled_(false),
|
||||
channel_(channel),
|
||||
buffer_(LZ4F_compressBound(max_data_length, NULL) + LZ4F_HEADER_SIZE_MAX)
|
||||
{
|
||||
if (LZ4F_cctx *lz4_context = nullptr;
|
||||
LZ4F_isError(LZ4F_createCompressionContext(&lz4_context, LZ4F_VERSION))
|
||||
) {
|
||||
throw std::runtime_error("Lz4Channel: Failed to create LZ4 context.");
|
||||
} else {
|
||||
lz4_ctx_.reset(lz4_context);
|
||||
}
|
||||
}
|
||||
|
||||
void Lz4Channel::set_compression(bool enabled) {
|
||||
compression_enabled_ = enabled;
|
||||
}
|
||||
|
||||
#define _CHECK_LZ4_ERROR(code) \
|
||||
if (LZ4F_isError(code)) { \
|
||||
throw std::runtime_error(std::string("Lz4Channel: compression failed: ") + \
|
||||
std::string(LZ4F_getErrorName(code))); \
|
||||
}
|
||||
|
||||
std::error_code Lz4Channel::send(const u8 *data, int data_len) {
|
||||
if (!compression_enabled_) {
|
||||
return channel_.send(data, data_len);
|
||||
}
|
||||
|
||||
// Reference: https://github.com/lz4/lz4/blob/dev/lib/lz4frame.h#L248
|
||||
size_t tail = 0;
|
||||
size_t res = LZ4F_compressBegin(lz4_ctx_.get(), (void *)buffer_.data(), buffer_.size(), NULL);
|
||||
_CHECK_LZ4_ERROR(res);
|
||||
tail += res;
|
||||
|
||||
res = LZ4F_compressUpdate(lz4_ctx_.get(), (void *)(buffer_.data() + tail), buffer_.size() - tail,
|
||||
(void *)data, data_len, NULL);
|
||||
_CHECK_LZ4_ERROR(res);
|
||||
tail += res;
|
||||
|
||||
res =
|
||||
LZ4F_compressEnd(lz4_ctx_.get(), (void *)(buffer_.data() + tail), buffer_.size() - tail, NULL);
|
||||
_CHECK_LZ4_ERROR(res);
|
||||
tail += res;
|
||||
|
||||
return channel_.send(buffer_.data(), tail);
|
||||
}
|
||||
|
||||
void Lz4Channel::close() {
|
||||
channel_.close();
|
||||
}
|
||||
|
||||
std::error_code Lz4Channel::flush() {
|
||||
return channel_.flush();
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/channel.h>
|
||||
#include <platform/types.h>
|
||||
#include <util/raii.h>
|
||||
|
||||
#include <lz4frame.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace channel {
|
||||
|
||||
// Lz4Channel serves as an adapter between upstream data source and downstream
|
||||
// channel.
|
||||
//
|
||||
// When the compression is disabled, the Lz4Channel will pass any incoming
|
||||
// data packets to downstream channel directly.
|
||||
//
|
||||
// When the compression is enabled, the Lz4Channel will compress the incoming
|
||||
// data packets before relay then.
|
||||
class Lz4Channel : public Channel {
|
||||
public:
|
||||
// |channel|: the downstream channel which will actually send out the data.
|
||||
// |max_data_length|: max number of bytes of any incoming data packet sent
|
||||
// via send() function. Note that it's caller's
|
||||
// responsibility to honor this constraint.
|
||||
Lz4Channel(Channel &channel, u32 max_data_length);
|
||||
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
void set_compression(bool enabled);
|
||||
|
||||
void close() override;
|
||||
std::error_code flush() override;
|
||||
|
||||
bool is_open() const override { return channel_.is_open(); }
|
||||
|
||||
private:
|
||||
bool compression_enabled_;
|
||||
|
||||
Channel &channel_;
|
||||
std::vector<u8> buffer_;
|
||||
|
||||
pod_unique_ptr<LZ4F_cctx, LZ4F_errorCode_t, LZ4F_freeCompressionContext> lz4_ctx_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/channel.h>
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class MockChannel : public Channel {
|
||||
public:
|
||||
MockChannel() = default;
|
||||
~MockChannel() override = default;
|
||||
|
||||
MOCK_METHOD2(send, std::error_code(const u8 *, int));
|
||||
MOCK_CONST_METHOD0(is_open, bool());
|
||||
}; // class MockChannel
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/channel.h>
|
||||
#include <platform/platform.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
/**
|
||||
* An interface that allows reading and writing data to a pipe/socket/etc
|
||||
*/
|
||||
class NetworkChannel: public Channel {
|
||||
public:
|
||||
/**
|
||||
* Connects to an endpoint and starts negotiating
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
*/
|
||||
virtual void connect(Callbacks &callbacks) = 0;
|
||||
|
||||
/**
|
||||
* Returns the address (in binary format) that this channel is connected to,
|
||||
* if available. `nullptr` otherwise.
|
||||
*/
|
||||
virtual in_addr_t const *connected_address() const = 0;
|
||||
};
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,275 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/reconnecting_channel.h>
|
||||
#include <channel/component.h>
|
||||
|
||||
#include <util/log.h>
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
namespace channel {
|
||||
namespace {
|
||||
// Callbacks passed to libuv
|
||||
void connection_timer_cb(uv_timer_t *timer)
|
||||
{
|
||||
ReconnectingChannel *channel = (ReconnectingChannel *)(timer->data);
|
||||
|
||||
LOG::error("ReconnectingChannel: Connection timeout.");
|
||||
channel->on_error(UV_ETIMEDOUT);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void start_timer_cb(uv_timer_t *timer)
|
||||
{
|
||||
ReconnectingChannel *channel = (ReconnectingChannel *)(timer->data);
|
||||
channel->to_connecting_state();
|
||||
}
|
||||
|
||||
ReconnectingChannel::ReconnectingChannel(config::IntakeConfig intake_config,
|
||||
uv_loop_t &loop,
|
||||
std::size_t buffer_size)
|
||||
: loop_(loop),
|
||||
intake_config_(std::move(intake_config)),
|
||||
network_channel_(intake_config_.make_channel(loop)),
|
||||
upstream_connection_(buffer_size, intake_config.allow_compression(), *network_channel_),
|
||||
state_(State::INACTIVE)
|
||||
{
|
||||
int res = uv_timer_init(&loop_, &start_timer_);
|
||||
if (res != 0) {
|
||||
LOG::error("ReconnectingChannel: Cannot init start_timer");
|
||||
}
|
||||
start_timer_.data = this;
|
||||
|
||||
res = uv_timer_init(&loop_, &connection_timer_);
|
||||
if (res != 0) {
|
||||
LOG::error("ReconnectingChannel: Cannot init connection_timer");
|
||||
}
|
||||
connection_timer_.data = this;
|
||||
}
|
||||
|
||||
ReconnectingChannel::~ReconnectingChannel()
|
||||
{
|
||||
close();
|
||||
uv_close((uv_handle_t *)&connection_timer_, NULL);
|
||||
uv_close((uv_handle_t *)&start_timer_, NULL);
|
||||
}
|
||||
|
||||
void ReconnectingChannel::register_pipeline_observer(Callbacks *observer) {
|
||||
pipeline_observers_.insert(observer);
|
||||
}
|
||||
|
||||
void ReconnectingChannel::unregister_pipeline_observer(Callbacks *observer) {
|
||||
pipeline_observers_.erase(observer);
|
||||
}
|
||||
|
||||
//// Callbacks interface ////
|
||||
u32 ReconnectingChannel::received_data(const u8 *data, int data_len)
|
||||
{
|
||||
// Do nothing for now.
|
||||
return data_len;
|
||||
}
|
||||
|
||||
void ReconnectingChannel::on_error(int err)
|
||||
{
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: on_error(). prev_state: {}", state_string());
|
||||
LOG::warn("ReconnectingChannel: Connection error: {}", uv_err_name(err));
|
||||
for (auto *observer : pipeline_observers_) {
|
||||
observer->on_error(err);
|
||||
}
|
||||
|
||||
to_closing_state();
|
||||
}
|
||||
|
||||
void ReconnectingChannel::on_closed()
|
||||
{
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: on_closed(). State: {}", state_string());
|
||||
|
||||
for (auto *observer : pipeline_observers_) {
|
||||
observer->on_closed();
|
||||
}
|
||||
|
||||
to_backoff_state();
|
||||
}
|
||||
|
||||
void ReconnectingChannel::on_connect()
|
||||
{
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: on_connect(). State: {}", state_string());
|
||||
LOG::info("ReconnectingChannel: Remote connection established.");
|
||||
|
||||
num_bytes_sent_ = 0;
|
||||
set_compression(false);
|
||||
|
||||
stop_all_timers();
|
||||
assert(state_ == State::CONNECTING);
|
||||
state_ = State::CONNECTED;
|
||||
|
||||
for (auto *observer : pipeline_observers_) {
|
||||
observer->on_connect();
|
||||
}
|
||||
}
|
||||
|
||||
void ReconnectingChannel::set_compression(bool enabled) {
|
||||
upstream_connection_.set_compression(enabled);
|
||||
}
|
||||
|
||||
//// Channel interface ////
|
||||
std::error_code ReconnectingChannel::send(const u8 *data, int data_len)
|
||||
{
|
||||
if (state_ != State::CONNECTED) {
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: Attempt to send when the channel is NOT connected.");
|
||||
return std::make_error_code(std::errc::not_connected);
|
||||
}
|
||||
|
||||
auto &buffered_writer = upstream_connection_.buffered_writer();
|
||||
|
||||
num_bytes_sent_ += data_len;
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"Sending ReconnectingChannel: {} bytes. {} bytes sent in total",
|
||||
data_len, num_bytes_sent_);
|
||||
|
||||
auto buffer = buffered_writer.start_write(data_len);
|
||||
if (!buffer) {
|
||||
LOG::error("ReconnectingChannel: buffered writer overflow: {}", buffer.error());
|
||||
return buffer.error();
|
||||
}
|
||||
memcpy(*buffer, data, data_len);
|
||||
buffered_writer.finish_write();
|
||||
return {};
|
||||
}
|
||||
|
||||
BufferedWriter &ReconnectingChannel::buffered_writer() {
|
||||
return upstream_connection_.buffered_writer();
|
||||
}
|
||||
|
||||
void ReconnectingChannel::close()
|
||||
{
|
||||
state_ = State::INACTIVE;
|
||||
stop_all_timers();
|
||||
upstream_connection_.close();
|
||||
}
|
||||
|
||||
std::error_code ReconnectingChannel::flush()
|
||||
{
|
||||
return upstream_connection_.flush();
|
||||
}
|
||||
|
||||
u64 ReconnectingChannel::get_start_wait_time() const
|
||||
{
|
||||
// TODO: better back-off mechanism here.
|
||||
return 1000;
|
||||
}
|
||||
|
||||
void ReconnectingChannel::to_connecting_state()
|
||||
{
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: to_connecting_state(). State: {}", state_string());
|
||||
|
||||
state_ = State::CONNECTING;
|
||||
|
||||
try {
|
||||
upstream_connection_.connect(*this);
|
||||
} catch (std::exception &e) {
|
||||
LOG::warn("ReconnectingChannel: Connection attempt failed; will backoff "
|
||||
"and retry. Error: {}",
|
||||
e.what());
|
||||
to_backoff_state();
|
||||
return;
|
||||
}
|
||||
stop_all_timers();
|
||||
int res = uv_timer_start(&connection_timer_, connection_timer_cb,
|
||||
connection_timeout_ms_, 0);
|
||||
|
||||
if (res != 0) {
|
||||
LOG::error("ReconnectingChannel: Cannot start connection_timer {}",
|
||||
uv_err_name(res));
|
||||
}
|
||||
}
|
||||
|
||||
void ReconnectingChannel::to_backoff_state()
|
||||
{
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: start_timer(). State: {}", state_string());
|
||||
state_ = State::BACKOFF;
|
||||
|
||||
stop_all_timers();
|
||||
int res =
|
||||
uv_timer_start(&start_timer_, start_timer_cb, get_start_wait_time(), 0);
|
||||
|
||||
if (res != 0) {
|
||||
LOG::error("ReconnectingChannel: Cannot start start_timer {}",
|
||||
uv_err_name(res));
|
||||
}
|
||||
}
|
||||
|
||||
void ReconnectingChannel::start_connect()
|
||||
{
|
||||
LOG::trace_in(Component::reconnecting_channel,
|
||||
"ReconnectingChannel: start_connect(). State: {}", state_string());
|
||||
|
||||
assert(state_ == State::INACTIVE);
|
||||
|
||||
to_connecting_state();
|
||||
}
|
||||
|
||||
void ReconnectingChannel::to_closing_state()
|
||||
{
|
||||
state_ = State::CLOSING;
|
||||
stop_all_timers();
|
||||
try {
|
||||
upstream_connection_.close();
|
||||
} catch (std::exception &e) {
|
||||
LOG::warn("ReconnectingChannel: Cannot close connection: {}", e.what());
|
||||
}
|
||||
// to_backoff_state();
|
||||
}
|
||||
|
||||
const char *ReconnectingChannel::state_string() const
|
||||
{
|
||||
switch (state_) {
|
||||
case State::INACTIVE:
|
||||
return "INACTIVE";
|
||||
case State::CONNECTING:
|
||||
return "CONNECTING";
|
||||
case State::CONNECTED:
|
||||
return "CONNECTED";
|
||||
case State::CLOSING:
|
||||
return "CLOSING";
|
||||
case State::BACKOFF:
|
||||
return "BACKOFF";
|
||||
}
|
||||
|
||||
// Make compiler happy.
|
||||
// TODO: remove this line if we switch to clang++
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
ReconnectingChannel::State ReconnectingChannel::state() const
|
||||
{
|
||||
return state_;
|
||||
}
|
||||
|
||||
void ReconnectingChannel::stop_all_timers()
|
||||
{
|
||||
uv_timer_stop(&connection_timer_);
|
||||
uv_timer_stop(&start_timer_);
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/channel.h>
|
||||
#include <channel/channel.h>
|
||||
#include <channel/tls_handler.h>
|
||||
#include <channel/upstream_connection.h>
|
||||
#include <config/intake_config.h>
|
||||
|
||||
#include <set>
|
||||
|
||||
namespace channel {
|
||||
|
||||
// ReconnectingChannel manages the connection to a remote server.
|
||||
//
|
||||
// It maintains a TLS over TCP connection, and retries connection when
|
||||
// network error occurs.
|
||||
//
|
||||
// Note that this class is NOT thread safe.
|
||||
class ReconnectingChannel : public Channel,
|
||||
public Callbacks {
|
||||
public:
|
||||
ReconnectingChannel(config::IntakeConfig intake_config,
|
||||
uv_loop_t &loop,
|
||||
std::size_t buffer_size);
|
||||
~ReconnectingChannel() final;
|
||||
|
||||
// Registers/unregisters a observer.
|
||||
//
|
||||
// They are expected to use during initialization or clean-up phase.
|
||||
void register_pipeline_observer(Callbacks *cb);
|
||||
void unregister_pipeline_observer(Callbacks *cb);
|
||||
|
||||
// Enables or disables compression.
|
||||
void set_compression(bool enabled);
|
||||
|
||||
// Channel interface
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
// Callbacks interface.
|
||||
u32 received_data(const u8 *data, int data_len) override;
|
||||
void on_error(int err) override;
|
||||
void on_closed() override;
|
||||
void on_connect() override;
|
||||
|
||||
// Starts the connection to remote server.
|
||||
//
|
||||
// It can only be called once.
|
||||
void start_connect();
|
||||
|
||||
BufferedWriter &buffered_writer();
|
||||
|
||||
void close() override;
|
||||
|
||||
// Flushes and sends out any remaining messages in the send buffer.
|
||||
std::error_code flush() override;
|
||||
|
||||
enum class State : int { INACTIVE, CONNECTING, CONNECTED, CLOSING, BACKOFF };
|
||||
|
||||
const char *state_string() const;
|
||||
State state() const;
|
||||
|
||||
config::IntakeConfig const &intake_config() const { return intake_config_; }
|
||||
|
||||
bool is_open() const override { return upstream_connection_.is_open(); }
|
||||
|
||||
private:
|
||||
friend void start_timer_cb(uv_timer_t *timer);
|
||||
|
||||
// How long we should wait for the connection to be established, before
|
||||
// it times out and reconnects again.
|
||||
static constexpr u64 connection_timeout_ms_ = 10000;
|
||||
|
||||
// Starts the connection_timer_ to track if the TCP connection is
|
||||
// established within |connection_timeout_ms_|
|
||||
void to_connecting_state();
|
||||
|
||||
// Starts the start_timer_, to let the service sleep for certain period
|
||||
// of time before start a new connection.
|
||||
void to_backoff_state();
|
||||
|
||||
// Closes the TCP connection, and timers.
|
||||
void to_closing_state();
|
||||
|
||||
// Stops all active timers
|
||||
void stop_all_timers();
|
||||
|
||||
// Returns how much time the system should wait, in microsecond,
|
||||
// before it tries to start a new connection.
|
||||
u64 get_start_wait_time() const;
|
||||
|
||||
// UV loop that this object runs on.
|
||||
uv_loop_t &loop_;
|
||||
|
||||
// Intake endpoint config
|
||||
config::IntakeConfig const intake_config_;
|
||||
|
||||
// Handles low-level TLS, TCP connection.
|
||||
std::unique_ptr<NetworkChannel> network_channel_;
|
||||
UpstreamConnection upstream_connection_;
|
||||
|
||||
// Current state of the pipeline.
|
||||
State state_;
|
||||
|
||||
// The timer to clock the waiting period before TCP connection restarts.
|
||||
// ([INACTIVE | BACKOFF] -> CONNECTING)
|
||||
uv_timer_t start_timer_;
|
||||
|
||||
// The timer to track if connection is establish successfully.
|
||||
// (CONNECTING -> CONNECTED)
|
||||
uv_timer_t connection_timer_;
|
||||
|
||||
// Observers interested in the status of the pipeline.
|
||||
std::set<Callbacks *> pipeline_observers_;
|
||||
|
||||
// Number of bytes this channel has sent, or is about to send, back to
|
||||
// flowmill pipeline server.
|
||||
u64 num_bytes_sent_ = 0;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,401 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/tcp_channel.h>
|
||||
|
||||
#include <channel/component.h>
|
||||
#include <platform/platform.h>
|
||||
#include <util/defer.h>
|
||||
#include <util/error_handling.h>
|
||||
#include <util/log.h>
|
||||
#include <util/log_formatters.h>
|
||||
#include <util/uv_helpers.h>
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <netdb.h>
|
||||
#include <netinet/tcp.h> //for disabling Nagle's
|
||||
#include <poll.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
#define INVALID_FD -1
|
||||
|
||||
namespace channel {
|
||||
|
||||
static constexpr std::string_view CONNECTED_DISCONNECTED[2] = {"disconnected", "connected"};
|
||||
/**
|
||||
* Callback passed to uv_read_start that allocates memory for the read callback
|
||||
*/
|
||||
void TCPChannel::conn_read_alloc_cb(uv_handle_t *handle, size_t suggested_size,
|
||||
uv_buf_t *buf)
|
||||
{
|
||||
uv_tcp_t *tcp_conn = (uv_tcp_t *)handle;
|
||||
TCPChannel *conn = (TCPChannel *)tcp_conn->data;
|
||||
|
||||
if (conn->allocated_) {
|
||||
buf->base = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
/* assign the free buffer, reserving bytes for overflow */
|
||||
buf->base = (char *)conn->rx_buffer_ + conn->rx_len_;
|
||||
buf->len = TCPChannel::rx_buffer_size - conn->rx_len_;
|
||||
conn->allocated_ = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read callback
|
||||
*/
|
||||
void TCPChannel::conn_read_cb(uv_stream_t *stream, ssize_t nread,
|
||||
const uv_buf_t *buf)
|
||||
{
|
||||
uv_tcp_t *tcp_conn = (uv_tcp_t *)stream;
|
||||
TCPChannel *conn = (TCPChannel *)tcp_conn->data;
|
||||
|
||||
if (nread < 0) {
|
||||
/* oh-oh, read error */
|
||||
/* free buffer if it is valid */
|
||||
if (buf->base) {
|
||||
conn->allocated_ = false;
|
||||
}
|
||||
|
||||
/* need to notify */
|
||||
conn->connected_ = false;
|
||||
conn->callbacks_->on_error(nread);
|
||||
return;
|
||||
}
|
||||
|
||||
/* "merge" the read data into the buffer */
|
||||
conn->rx_len_ += nread;
|
||||
conn->allocated_ = false;
|
||||
|
||||
/* read all complete messages from buffer */
|
||||
try {
|
||||
u32 res = conn->callbacks_->received_data((u8 *)conn->rx_buffer_, conn->rx_len_);
|
||||
|
||||
if (res > 0) {
|
||||
ASSUME(res <= conn->rx_len_);
|
||||
conn->rx_len_ -= res;
|
||||
memmove(conn->rx_buffer_, (u8 *)conn->rx_buffer_ + res, conn->rx_len_);
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
LOG::error("TCPChannel: error handling received data: '{}'", e.what());
|
||||
conn->connected_ = false;
|
||||
conn->callbacks_->on_error(-EPROTO);
|
||||
return;
|
||||
}
|
||||
|
||||
/* check that we don't exceed the buffer size */
|
||||
if (conn->rx_len_ == TCPChannel::rx_buffer_size) {
|
||||
conn->connected_ = false;
|
||||
conn->callbacks_->on_error(-EOVERFLOW);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void TCPChannel::conn_close_cb(uv_handle_t *handle)
|
||||
{
|
||||
TCPChannel *conn = (TCPChannel *)handle->data;
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}: calling on_closed()", __func__);
|
||||
conn->callbacks_->on_closed();
|
||||
}
|
||||
|
||||
void TCPChannel::conn_close_and_reinit_cb(uv_handle_t *handle)
|
||||
{
|
||||
TCPChannel *conn = (TCPChannel *)handle->data;
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}: calling on_closed()", __func__);
|
||||
conn->callbacks_->on_closed();
|
||||
|
||||
/* re-initialize so we can reuse the handle */
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}: calling reinit()", __func__);
|
||||
conn->reinit(handle->loop);
|
||||
}
|
||||
|
||||
void TCPChannel::conn_connect_cb(uv_connect_t *req, int status)
|
||||
{
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
auto tcp = (TCPChannel *)req->handle->data;
|
||||
|
||||
if (status < 0) {
|
||||
LOG::trace_in(channel::Component::tcp,
|
||||
"TCPChannel::{}(): error {}",
|
||||
__func__, static_cast<std::errc>(-status));
|
||||
/* error occurred */
|
||||
tcp->connected_ = false;
|
||||
tcp->callbacks_->on_error(status);
|
||||
return;
|
||||
}
|
||||
|
||||
tcp->connected_ = true;
|
||||
|
||||
LOG::trace_in(channel::Component::tcp,
|
||||
"TCPChannel::{}(): calling callback::on_connect()", __func__);
|
||||
tcp->callbacks_->on_connect();
|
||||
|
||||
tcp->start_processing();
|
||||
}
|
||||
|
||||
void TCPChannel::conn_write_cb(uv_write_t *req, int status)
|
||||
{
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
auto tcp = (TCPChannel *)req->handle->data;
|
||||
|
||||
if (status < 0) {
|
||||
/* no need to notify if close() was called, otherwise -- notify */
|
||||
if (!uv_is_closing((uv_handle_t *)req->handle)) {
|
||||
LOG::trace_in(channel::Component::tcp,
|
||||
"TCPChannel::{}: connection not closing, calling close on handle()", __func__);
|
||||
tcp->connected_ = false;
|
||||
tcp->callbacks_->on_error(status);
|
||||
}
|
||||
}
|
||||
|
||||
/* assumes req is the first field in send_buffer_t */
|
||||
free(req);
|
||||
}
|
||||
|
||||
TCPChannel::TCPChannel(uv_loop_t &loop) {
|
||||
reinit(&loop);
|
||||
}
|
||||
|
||||
TCPChannel::TCPChannel(
|
||||
uv_loop_t &loop,
|
||||
std::string addr,
|
||||
std::string port,
|
||||
std::optional<config::HttpProxyConfig> proxy
|
||||
):
|
||||
addr_(std::move(addr)),
|
||||
port_(std::move(port)),
|
||||
proxy_(std::move(proxy))
|
||||
{
|
||||
reinit(&loop);
|
||||
}
|
||||
|
||||
TCPChannel::~TCPChannel()
|
||||
{
|
||||
LOG::trace_in(channel::Component::tcp,
|
||||
"TCPChannel::{}: connection not closing, calling close on handle()", __func__);
|
||||
ASSUME(uv_is_closing((uv_handle_t *)&conn_));
|
||||
}
|
||||
|
||||
void TCPChannel::connect(Callbacks &callbacks) {
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
|
||||
if (proxy_) {
|
||||
callback_wrapper_ = proxy_->make_callback(addr_, port_ ,*this, callbacks);
|
||||
}
|
||||
|
||||
callbacks_ = callback_wrapper_
|
||||
? callback_wrapper_.get()
|
||||
: &callbacks;
|
||||
|
||||
struct addrinfo hints;
|
||||
memset(&hints, 0, sizeof hints);
|
||||
hints.ai_family = AF_UNSPEC;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
|
||||
struct addrinfo *res = nullptr;
|
||||
auto const addr = proxy_ ? proxy_->host().c_str() : addr_.c_str();
|
||||
auto const port = proxy_ ? proxy_->port().c_str() : port_.c_str();
|
||||
LOG::debug(
|
||||
"TCPChannel::{}: Connecting to {} @ {}:{}",
|
||||
__func__, proxy_ ? "proxy" : "intake", addr, port
|
||||
);
|
||||
int status = getaddrinfo(addr, port, &hints, &res);
|
||||
|
||||
if (status != 0) {
|
||||
LOG::critical("getaddrinfo failed: {} - calling abort", static_cast<std::errc>(status));
|
||||
// TODO: gracefully handle getaddrinfo errors
|
||||
std::abort();
|
||||
}
|
||||
|
||||
Defer free_addrinfo([&res] { freeaddrinfo(res); });
|
||||
|
||||
if (res->ai_addr->sa_family == AF_INET) {
|
||||
struct sockaddr_in *sa = (struct sockaddr_in *)(res->ai_addr);
|
||||
connected_address_available_ = true;
|
||||
connected_address_ = sa->sin_addr.s_addr;
|
||||
}
|
||||
|
||||
if (auto const error = ::uv_tcp_connect(&connect_req_, &conn_, res->ai_addr, &conn_connect_cb)) {
|
||||
LOG::error(
|
||||
"TCPChannel::{}: failed to establish connection to {}:{}: {}",
|
||||
__func__, addr, port, uv_error_t{error}
|
||||
);
|
||||
callbacks_->on_error(error);
|
||||
}
|
||||
}
|
||||
|
||||
void TCPChannel::accept(Callbacks &callbacks, uv_tcp_t *listener) {
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
callbacks_ = &callbacks;
|
||||
|
||||
if (auto const error = ::uv_accept(
|
||||
reinterpret_cast<uv_stream_t *>(listener),
|
||||
reinterpret_cast<uv_stream_t *>(&conn_)
|
||||
)) {
|
||||
// this is guaranteed to succeed when called from the connection callback:
|
||||
// http://docs.libuv.org/en/v1.x/stream.html#c.uv_accept
|
||||
LOG::error(
|
||||
"TCPChannel::{}: failed to accept incoming connections: {}",
|
||||
__func__, uv_error_t{error}
|
||||
);
|
||||
CHECK_UV(error); // TODO: verify that users of `accept` properly handle `on_error`
|
||||
callbacks_->on_error(error);
|
||||
} else {
|
||||
connected_ = true;
|
||||
start_processing();
|
||||
}
|
||||
}
|
||||
|
||||
void TCPChannel::open_fd(Callbacks &callbacks, const uv_os_sock_t fd) {
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
callbacks_ = &callbacks;
|
||||
|
||||
if (auto const error = ::uv_tcp_open(&conn_, fd)) {
|
||||
LOG::error(
|
||||
"TCPChannel::{}: failed to open existing file descriptor as a TCP handle: {}",
|
||||
__func__, uv_error_t{error}
|
||||
);
|
||||
CHECK_UV(error); // TODO: verify that users of `open_fd` properly handle `on_error`
|
||||
callbacks_->on_error(error);
|
||||
} else {
|
||||
connected_ = true;
|
||||
start_processing();
|
||||
}
|
||||
}
|
||||
|
||||
void TCPChannel::close() {
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
close_internal(&conn_close_and_reinit_cb);
|
||||
}
|
||||
|
||||
void TCPChannel::close_permanently()
|
||||
{
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
close_internal(&conn_close_cb);
|
||||
}
|
||||
|
||||
std::error_code TCPChannel::send(const u8 *data, int data_len) {
|
||||
LOG::trace_in(channel::Component::tcp,
|
||||
"TCPChannel::{}(len:{})", __func__, data_len);
|
||||
auto send_buffer = allocate_send_buffer(data_len);
|
||||
if (!send_buffer) {
|
||||
// TODO: gracefully handle out-of-memory errors
|
||||
std::abort();
|
||||
return std::make_error_code(std::errc::not_enough_memory);
|
||||
}
|
||||
|
||||
memcpy(send_buffer->data, data, data_len);
|
||||
send_buffer->len = data_len;
|
||||
|
||||
return send(send_buffer);
|
||||
}
|
||||
|
||||
struct TCPChannel::send_buffer_t *TCPChannel::allocate_send_buffer(u32 size)
|
||||
{
|
||||
u32 mem_size = ((sizeof(struct send_buffer_t) + 7) & ~7) + size;
|
||||
struct send_buffer_t *ret = (struct send_buffer_t *)malloc(mem_size);
|
||||
if (ret == nullptr) {
|
||||
LOG::critical("Failed to allocate send buffer of size {} mem_size {}", size, mem_size);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// clear memory to ensure we don't exfiltrate uninitialized data
|
||||
memset(ret, 0, mem_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::error_code TCPChannel::send(struct send_buffer_t *send_buffer)
|
||||
{
|
||||
uv_buf_t uv_buf = {.base = (char *)send_buffer->data,
|
||||
.len = send_buffer->len};
|
||||
|
||||
if (auto const error = ::uv_write(
|
||||
&send_buffer->req,
|
||||
reinterpret_cast<uv_stream_t *>(&conn_),
|
||||
&uv_buf, 1, conn_write_cb
|
||||
)) {
|
||||
LOG::error(
|
||||
"TCPChannel::{}: failed to write {} bytes into {} channel: {}",
|
||||
__func__, send_buffer->len, CONNECTED_DISCONNECTED[connected_], uv_error_t{error}
|
||||
);
|
||||
|
||||
callbacks_->on_error(error);
|
||||
return {error, libuv_category()};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void TCPChannel::reinit(uv_loop_t *loop)
|
||||
{
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
/* reinit RX buffers */
|
||||
rx_len_ = 0;
|
||||
allocated_ = false;
|
||||
|
||||
/* re-init handle */
|
||||
CHECK_UV(uv_tcp_init(loop, &conn_));
|
||||
conn_.data = this;
|
||||
}
|
||||
|
||||
void TCPChannel::start_processing() {
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
|
||||
if (auto const error = ::uv_tcp_nodelay(&conn_, true)) {
|
||||
LOG::error(
|
||||
"TCPChannel::{}: failed to disable Nagle's algorithm: {}",
|
||||
__func__, uv_error_t{error}
|
||||
);
|
||||
// this error is not critical, we may continue
|
||||
}
|
||||
|
||||
if (auto const error = ::uv_read_start(
|
||||
reinterpret_cast<uv_stream_t *>(&conn_),
|
||||
&conn_read_alloc_cb,
|
||||
conn_read_cb
|
||||
)) {
|
||||
LOG::error(
|
||||
"TCPChannel::{}: failed to start read loop on channel: {}",
|
||||
__func__, uv_error_t{error}
|
||||
);
|
||||
|
||||
callbacks_->on_error(error);
|
||||
}
|
||||
}
|
||||
|
||||
in_addr_t const *channel::TCPChannel::connected_address() const {
|
||||
std::array<in_addr_t const *, 2> choice = {nullptr, &connected_address_};
|
||||
return choice[connected_address_available_];
|
||||
}
|
||||
|
||||
void channel::TCPChannel::close_internal(const uv_close_cb close_cb)
|
||||
{
|
||||
LOG::trace_in(channel::Component::tcp, "TCPChannel::{}()", __func__);
|
||||
connected_address_available_ = false;
|
||||
connected_ = false;
|
||||
if (!uv_is_closing((uv_handle_t *)&conn_)) {
|
||||
LOG::trace_in(channel::Component::tcp,
|
||||
"TCPChannel::{}: connection not closing, calling close on handle()", __func__);
|
||||
uv_close((uv_handle_t *)&conn_, close_cb);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/network_channel.h>
|
||||
#include <config/http_proxy_config.h>
|
||||
#include <platform/platform.h>
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
struct buffer_t {
|
||||
char *base;
|
||||
u32 offset;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
/**
|
||||
* A TCP channel
|
||||
*
|
||||
* Errors for on_error callback:
|
||||
* -EPROTO: handler threw exception
|
||||
* -EOVERFLOW: overflow occupies entire buffer SERVER_CONN_BUFFER_SIZE and not
|
||||
* handled by handler
|
||||
* libuv errors.
|
||||
*/
|
||||
class TCPChannel : public NetworkChannel {
|
||||
public:
|
||||
static constexpr u32 rx_buffer_size = (64 * 1024);
|
||||
|
||||
struct send_buffer_t {
|
||||
uv_write_t req; /* must be first */
|
||||
u32 len;
|
||||
u64 data[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* c'tor -- leaves socket ready for accept()
|
||||
*/
|
||||
TCPChannel(uv_loop_t &loop);
|
||||
|
||||
/**
|
||||
* c'tor -- leaves socket ready for connect()
|
||||
*/
|
||||
TCPChannel(
|
||||
uv_loop_t &loop,
|
||||
std::string addr,
|
||||
std::string port,
|
||||
std::optional<config::HttpProxyConfig> proxy = {}
|
||||
);
|
||||
|
||||
/**
|
||||
* d'tor
|
||||
*/
|
||||
virtual ~TCPChannel();
|
||||
|
||||
/**
|
||||
* Connects to an endpoint
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
* @param addr: ip address or hostname
|
||||
* @param port: string holding port number
|
||||
*/
|
||||
void connect(Callbacks &callbacks) override;
|
||||
|
||||
/**
|
||||
* Accepts a connection
|
||||
*
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
* @param listener: the listening socket to accept on
|
||||
*/
|
||||
void accept(Callbacks &callbacks, uv_tcp_t *listener);
|
||||
|
||||
/**
|
||||
* Opens a TCP connection from the file descriptor.
|
||||
*/
|
||||
void open_fd(Callbacks &callbacks, uv_os_sock_t fd);
|
||||
|
||||
/**
|
||||
* closes the channel. Callbacks::on_close will be called
|
||||
*/
|
||||
void close() override;
|
||||
|
||||
/**
|
||||
* Closes the channel, and does not try to reinitilize.
|
||||
*/
|
||||
void close_permanently();
|
||||
|
||||
/**
|
||||
* @see Channel::send
|
||||
*/
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
/**
|
||||
* Allocates a send buffer capable of holding @size bytes.
|
||||
*
|
||||
* It is the responsibility of the caller to call send() with the buffer.
|
||||
*/
|
||||
struct send_buffer_t *allocate_send_buffer(u32 size);
|
||||
|
||||
/**
|
||||
* Sends the given TCPChannel::send_buffer_t allocated with
|
||||
* allocate_send_buffer().
|
||||
*/
|
||||
std::error_code send(struct send_buffer_t *send_buffer);
|
||||
|
||||
/**
|
||||
* Returns the address (in binary format) that this channel is connected to,
|
||||
* if available. `nullptr` otherwise.
|
||||
*/
|
||||
in_addr_t const *connected_address() const override;
|
||||
|
||||
bool is_open() const override { return connected_; }
|
||||
|
||||
private:
|
||||
static void conn_read_alloc_cb(uv_handle_t *handle, size_t suggested_size,
|
||||
uv_buf_t *buf);
|
||||
static void conn_read_cb(uv_stream_t *stream, ssize_t nread,
|
||||
const uv_buf_t *buf);
|
||||
static void conn_close_cb(uv_handle_t *handle);
|
||||
static void conn_close_and_reinit_cb(uv_handle_t *handle);
|
||||
static void conn_connect_cb(uv_connect_t *req, int status);
|
||||
static void conn_write_cb(uv_write_t *req, int status);
|
||||
|
||||
void close_internal(const uv_close_cb close_cb);
|
||||
|
||||
/**
|
||||
* Inits the tcp handle (conn_) and buffers
|
||||
*/
|
||||
void reinit(uv_loop_t *loop);
|
||||
|
||||
/**
|
||||
* Completes socket configuration and starts reading
|
||||
*/
|
||||
void start_processing();
|
||||
|
||||
Callbacks *callbacks_ = nullptr;
|
||||
std::string addr_;
|
||||
std::string port_;
|
||||
std::optional<config::HttpProxyConfig> proxy_;
|
||||
std::unique_ptr<Callbacks> callback_wrapper_;
|
||||
|
||||
uv_tcp_t conn_;
|
||||
uv_connect_t connect_req_;
|
||||
|
||||
u64 rx_buffer_[(rx_buffer_size + 7) / 8];
|
||||
|
||||
/* number of bytes currently in the rx_buffer */
|
||||
u32 rx_len_;
|
||||
|
||||
bool allocated_ = false;
|
||||
|
||||
bool connected_address_available_ = false;
|
||||
bool connected_ = false;
|
||||
in_addr_t connected_address_ = 0;
|
||||
};
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,535 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/component.h>
|
||||
#include <channel/internal/ssl_context.h>
|
||||
#include <channel/internal/tls_shim.h>
|
||||
#include <channel/tls_channel.h>
|
||||
#include <channel/tls_error.h>
|
||||
#include <config.h>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
#include <openssl/conf.h>
|
||||
#include <openssl/crypto.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/opensslv.h>
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/ssl.h>
|
||||
#include <openssl/x509.h>
|
||||
#include <openssl/x509v3.h>
|
||||
#include <util/error_handling.h>
|
||||
#include <util/log.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
constexpr u32 APP_READ_SIZE = (16 * 1024);
|
||||
|
||||
int channel::TLSChannel::Initializer::channel_index = -1;
|
||||
|
||||
static bool verify_name(std::string_view peername, std::string_view certname) {
|
||||
if (peername == certname) {
|
||||
LOG::trace_in(channel::Component::tls, "Successfully validated cert names. Got '{}', expected '{}'", certname, peername);
|
||||
return true;
|
||||
}
|
||||
// Support wildcard certificates
|
||||
if ("*." == certname.substr(0, 2)) {
|
||||
size_t subject_dot_pos = 1;
|
||||
|
||||
std::string_view subject_domain = certname.substr(subject_dot_pos + 1);
|
||||
|
||||
size_t peer_dot_pos = peername.find('.');
|
||||
if (peer_dot_pos == std::string::npos) {
|
||||
return false;
|
||||
}
|
||||
std::string_view peer_domain = peername.substr(peer_dot_pos + 1);
|
||||
|
||||
if (subject_domain == peer_domain) {
|
||||
/* Everything after the wildcard compared equal */
|
||||
LOG::trace_in(channel::Component::tls, "Successfully validated cert names. Got '{}', expected '{}'", certname, peername);
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
LOG::trace_in(
|
||||
channel::Component::tls,
|
||||
"Certificate verification with wildcards error: subject name differs:"
|
||||
" got '{}' expected '{}'",
|
||||
certname, peername
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* not equal. reject */
|
||||
LOG::trace_in(
|
||||
channel::Component::tls,
|
||||
"Certificate verification error: subject name differs:"
|
||||
" got '{}' expected '{}'",
|
||||
certname, peername
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int verify_callback(int preverify, X509_STORE_CTX *ctx) {
|
||||
if (preverify != 1)
|
||||
return 0;
|
||||
|
||||
int depth = X509_STORE_CTX_get_error_depth(ctx);
|
||||
/* we only want to validate the server's certificate, not the CA */
|
||||
if (depth != 0)
|
||||
return preverify;
|
||||
|
||||
/* Get the ssl pointer from the context */
|
||||
SSL *ssl = (SSL *)X509_STORE_CTX_get_ex_data(
|
||||
ctx, SSL_get_ex_data_X509_STORE_CTX_idx());
|
||||
|
||||
/* get the TLSChannel pointer from the SSL object */
|
||||
auto channel = (channel::TLSChannel *)SSL_get_ex_data(
|
||||
ssl, channel::TLSChannel::Initializer::channel_index);
|
||||
|
||||
/* note: cert is an internal pointer (don't free) */
|
||||
X509 *cert = X509_STORE_CTX_get_current_cert(ctx);
|
||||
if (cert == nullptr) {
|
||||
LOG::error("Certificate verification error: no peer certificate");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* note: MUST NOT free subject_name */
|
||||
X509_NAME *subject_name = X509_get_subject_name(cert);
|
||||
if (subject_name == nullptr) {
|
||||
LOG::error("Certificate verification error: no subject name");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int idx = X509_NAME_get_index_by_NID(subject_name, NID_commonName, -1);
|
||||
if (idx < 0) {
|
||||
LOG::error("Certificate verification error: cannot get subject index");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* seems like an internal pointer. the reference code doesn't free it
|
||||
* (the .tar.gz at https://wiki.openssl.org/index.php/SSL/TLS_Client)
|
||||
*/
|
||||
X509_NAME_ENTRY *entry = X509_NAME_get_entry(subject_name, idx);
|
||||
if (entry == nullptr) {
|
||||
LOG::error("Certificate verification error: cannot get subject entry");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* also looks like an internal pointer and not freed by the example */
|
||||
ASN1_STRING *data = X509_NAME_ENTRY_get_data(entry);
|
||||
if (data == nullptr) {
|
||||
LOG::error("Certificate verification error: cannot get subject data");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (const std::string &peer_hostname = channel->peer_hostname(); peer_hostname.empty()) {
|
||||
return preverify;
|
||||
} else {
|
||||
u8 *subject = nullptr;
|
||||
int length = ASN1_STRING_to_UTF8(&subject, data);
|
||||
ASSUME(length >= 0);
|
||||
if (subject == nullptr) {
|
||||
LOG::error("Certificate verification error: cannot get subject string");
|
||||
return 0;
|
||||
}
|
||||
/* after this point, will need to OPENSSL_free(subject) */
|
||||
std::string_view subject_string(reinterpret_cast<char const *>(subject), length);
|
||||
|
||||
if (verify_name(peer_hostname, subject_string)) {
|
||||
OPENSSL_free(subject);
|
||||
return preverify;
|
||||
}
|
||||
else {
|
||||
// Did not validate, free subject and move on to alternate names
|
||||
OPENSSL_free(subject);
|
||||
}
|
||||
|
||||
GENERAL_NAMES* names = reinterpret_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0));
|
||||
if (!names) {
|
||||
LOG::error("Certificate verification error: cannot get general names");
|
||||
return 0;
|
||||
}
|
||||
/* after this point, will need to GENERAL_NAMES_free(names) */
|
||||
int count = sk_GENERAL_NAME_num(names);
|
||||
if (!count) {
|
||||
LOG::error("Certificate verification error: cannot get general name count");
|
||||
GENERAL_NAMES_free(names);
|
||||
return 0;
|
||||
}
|
||||
for (int i = 0; i < count; ++i) {
|
||||
GENERAL_NAME* entry = sk_GENERAL_NAME_value(names, i);
|
||||
if (!entry) {
|
||||
LOG::error("Certificate verification error: could not get alternate name entry");
|
||||
GENERAL_NAMES_free(names);
|
||||
return 0;
|
||||
}
|
||||
if (GEN_DNS == entry->type) {
|
||||
u8 *name = nullptr;
|
||||
int name_len = 0;
|
||||
name_len = ASN1_STRING_to_UTF8(&name, entry->d.dNSName);
|
||||
ASSUME(name_len >= 0);
|
||||
if (!name) {
|
||||
LOG::error("Certificate verification error: could not get string value of alternate name entry");
|
||||
GENERAL_NAMES_free(names);
|
||||
return 0;
|
||||
}
|
||||
/* need to OPENSSL_free(name) after this point */
|
||||
std::string_view alt_name(reinterpret_cast<char const *>(name), name_len);
|
||||
if (verify_name(peer_hostname, alt_name)) {
|
||||
// Successfully validated name, return success
|
||||
OPENSSL_free(name);
|
||||
GENERAL_NAMES_free(names);
|
||||
return preverify;
|
||||
}
|
||||
if (name) {
|
||||
// Did not validate, continue on to next name
|
||||
OPENSSL_free(name);
|
||||
name = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GENERAL_NAMES_free(names);
|
||||
LOG::error("Could not validate any of the hostnames in the cert.");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/****************************
|
||||
* TLSChannel::Initializer
|
||||
****************************/
|
||||
channel::TLSChannel::Initializer::Initializer() {
|
||||
static_assert(OPENSSL_VERSION_NUMBER == 0x1010102fL
|
||||
,
|
||||
"unexpected OpenSSL version");
|
||||
|
||||
/* https://www.openssl.org/docs/ssl/SSL_library_init.html */
|
||||
int ret = OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS, NULL);
|
||||
if (ret != 1) {
|
||||
throw std::runtime_error("Failed to initialize TLS library");
|
||||
}
|
||||
|
||||
channel_index = CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_SSL, 0, nullptr,
|
||||
nullptr, nullptr, nullptr);
|
||||
if (channel_index == -1) {
|
||||
throw std::runtime_error("could not allocate SSL index for channels");
|
||||
}
|
||||
}
|
||||
|
||||
channel::TLSChannel::Initializer::~Initializer() {
|
||||
/* free the index mapping SSL to TLSChannel */
|
||||
CRYPTO_free_ex_index(CRYPTO_EX_INDEX_SSL, channel_index);
|
||||
|
||||
/* Deinit does not seem to be necessary in 1.1.0 onwards as discussed in:
|
||||
* https://rt.openssl.org/Ticket/Display.html?id=3824&user=guest&pass=guest
|
||||
*
|
||||
* However if this is discovered to be incorrect, there should be info on:
|
||||
* https://wiki.openssl.org/index.php/Library_Initialization
|
||||
* (under Cleanup)
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/****************************
|
||||
* TLSChannel::Credentials
|
||||
****************************/
|
||||
channel::TLSChannel::Credentials::Credentials(
|
||||
std::string client_key,
|
||||
std::string client_crt
|
||||
):
|
||||
client_key_(std::move(client_key)),
|
||||
client_crt_(std::move(client_crt))
|
||||
{}
|
||||
|
||||
/****************************
|
||||
* TLSChannel
|
||||
****************************/
|
||||
channel::TLSChannel::TLSChannel(TCPChannel &transport, Credentials &creds,
|
||||
std::string server_hostname)
|
||||
: transport_(transport), creds_(creds),
|
||||
server_hostname_(std::move(server_hostname)),
|
||||
ssl_context_(std::make_unique<internal::SSLContext>(
|
||||
creds_.client_key_, creds.client_crt_))
|
||||
{
|
||||
LOG::trace_in(channel::Component::tls, "TLSChannel - server hostname: '{}'", server_hostname_);
|
||||
}
|
||||
|
||||
channel::TLSChannel::~TLSChannel() {
|
||||
if (tls_shim_ && !tls_shim_->is_closed()) {
|
||||
try {
|
||||
close();
|
||||
}
|
||||
catch (...) {
|
||||
/* pass, best effort */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void channel::TLSChannel::connect(Callbacks &callbacks) {
|
||||
if (tls_shim_ && !tls_shim_->is_closed()) {
|
||||
throw std::runtime_error("TLSChannel: (re)connect with unclosed client");
|
||||
}
|
||||
|
||||
callbacks_ = &callbacks;
|
||||
|
||||
// FIXME: this should be done at TCP `on_connect`, before calling the inner
|
||||
// callback's `on_connect`.
|
||||
tls_shim_ = std::make_unique<internal::TLSShim>(*ssl_context_);
|
||||
|
||||
SSL *ssl = tls_shim_->get_SSL();
|
||||
|
||||
/* add a pointer to _this_ to the channel_index */
|
||||
int res = SSL_set_ex_data(ssl, Initializer::channel_index, this);
|
||||
if (res != 1) {
|
||||
LOG::error("TLSChannel: could not add this reference to index: {}", TLSError());
|
||||
callbacks_->on_error(-EFAULT);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Configure the expected hostname in the SNI extension */
|
||||
if (!server_hostname_.empty()) {
|
||||
int res = SSL_set_tlsext_host_name(ssl, server_hostname_.c_str());
|
||||
if (res != 1) {
|
||||
LOG::error("TLSChannel: could not set SNI field: ", TLSError());
|
||||
callbacks_->on_error(-EFAULT);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* register the certificate verification callback */
|
||||
SSL_set_verify(ssl, SSL_VERIFY_PEER, verify_callback);
|
||||
|
||||
/* start connecting */
|
||||
SSL_set_connect_state(ssl);
|
||||
handshake();
|
||||
|
||||
if (auto error = flush_transport_bio()) {
|
||||
callbacks_->on_error(-error.value());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
u32 channel::TLSChannel::received_data(const u8 *data, u32 data_len) {
|
||||
ASSUME(tls_shim_);
|
||||
BIO *bio = tls_shim_->get_transport_bio();
|
||||
SSL *ssl = tls_shim_->get_SSL();
|
||||
|
||||
int finished_before = SSL_is_init_finished(ssl);
|
||||
|
||||
int wrote = BIO_write(bio, data, data_len);
|
||||
if (wrote < 0) {
|
||||
LOG::error("TLSChannel: failed to write received transport data: {}", TLSError());
|
||||
callbacks_->on_error(-ENOTCONN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!finished_before) {
|
||||
int res = handshake();
|
||||
|
||||
if (res != 0)
|
||||
return wrote; /* handshake() already called on_error() callback */
|
||||
|
||||
if (auto error = flush_transport_bio()) {
|
||||
callbacks_->on_error(-error.value());
|
||||
return wrote;
|
||||
}
|
||||
}
|
||||
|
||||
/* has the handshake finished? */
|
||||
if (SSL_is_init_finished(ssl)) {
|
||||
/* we might have data to give the application */
|
||||
u64 buf[(APP_READ_SIZE + 7) / 8];
|
||||
int read = SSL_read(ssl, buf, APP_READ_SIZE);
|
||||
if (read > 0) {
|
||||
/* read some data, pass it to the caller */
|
||||
callbacks_->received_data((u8 *)buf, read);
|
||||
}
|
||||
else {
|
||||
int err = ERR_get_error();
|
||||
if ((err != SSL_ERROR_WANT_READ) && (err != SSL_ERROR_WANT_WRITE) &&
|
||||
(err != SSL_ERROR_NONE)) {
|
||||
LOG::error("TLSChannel: application-side read failed: {}", TLSError(err));
|
||||
callbacks_->on_error(-ENOTCONN);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (u32)wrote;
|
||||
}
|
||||
|
||||
void channel::TLSChannel::close() {
|
||||
if (!tls_shim_ || tls_shim_->is_closed()) {
|
||||
LOG::trace_in(channel::Component::tls,
|
||||
"TLSChannel::{}: (tls_shim={}): '{}'",
|
||||
__func__, static_cast<bool>(tls_shim_), server_hostname_);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG::trace_in(channel::Component::tls, "TLSChannel::{}: '{}'",
|
||||
__func__, server_hostname_
|
||||
);
|
||||
SSL *ssl = tls_shim_->get_SSL();
|
||||
if (SSL_is_init_finished(ssl)) {
|
||||
int res = SSL_shutdown(ssl);
|
||||
int err = ERR_get_error();
|
||||
if ((res < 0) && (err != SSL_ERROR_WANT_READ) && (err != SSL_ERROR_WANT_WRITE)) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("TLSChannel::{}: unexpected error in close(): {}",
|
||||
__func__, TLSError(err))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/* flush to the transport, but do best effort; ignore errors */
|
||||
flush_transport_bio();
|
||||
|
||||
tls_shim_.reset();
|
||||
}
|
||||
|
||||
std::error_code channel::TLSChannel::send(const u8 *data, int data_len) {
|
||||
ASSUME(tls_shim_);
|
||||
ASSUME(is_open());
|
||||
SSL *ssl = tls_shim_->get_SSL();
|
||||
|
||||
while (data_len > 0) {
|
||||
/* try to write some bytes */
|
||||
int res = SSL_write(ssl, data, data_len);
|
||||
|
||||
if (res > 0) {
|
||||
/* success. move on and if more to send, send it */
|
||||
data_len -= res;
|
||||
data += res;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* make sure only a flush is required */
|
||||
int err = ERR_get_error();
|
||||
if ((err != SSL_ERROR_WANT_READ) && (err != SSL_ERROR_WANT_WRITE) &&
|
||||
(err != SSL_ERROR_NONE)) {
|
||||
// TODO: gracefully handle TLS errors
|
||||
throw std::runtime_error(
|
||||
fmt::format("TLSChannel: application-side send failed: {}", TLSError(err))
|
||||
);
|
||||
}
|
||||
|
||||
/* error might be due to full buffers. try to flush */
|
||||
if (auto error = flush_transport_bio()) {
|
||||
callbacks_->on_error(-error.value());
|
||||
return error;
|
||||
}
|
||||
|
||||
/* try again now buffer is flushed */
|
||||
res = SSL_write(ssl, data, data_len);
|
||||
|
||||
if (res > 0) {
|
||||
/* success. move on and if more to send, send it */
|
||||
data_len -= res;
|
||||
data += res;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
/* this shouldn't happen because we've disabled renegotiation, so
|
||||
* if the buffer is flushed, forward progress should be possible
|
||||
*/
|
||||
throw std::runtime_error(
|
||||
fmt::format("TLSChannel: application-side send failed after flush: {}",
|
||||
TLSError(err))
|
||||
);
|
||||
}
|
||||
|
||||
/* in one of the above SSL_write()'s, res returns > 0 and data_len was
|
||||
* reduced, so an infinite loop should not be possible.
|
||||
*/
|
||||
}
|
||||
|
||||
/* flush at the end. we don't won't to keep data around until next send */
|
||||
if (auto error = flush_transport_bio()) {
|
||||
callbacks_->on_error(-error.value());
|
||||
return error;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::error_code channel::TLSChannel::flush_transport_bio() {
|
||||
ASSUME(tls_shim_);
|
||||
BIO *bio = tls_shim_->get_transport_bio();
|
||||
|
||||
int pending = BIO_pending(bio);
|
||||
ASSUME(pending >= 0);
|
||||
if (pending == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (transport_.is_open()) {
|
||||
auto bufp = transport_.allocate_send_buffer(pending);
|
||||
if (bufp == nullptr) {
|
||||
LOG::error("TLSChannel: memory allocation failed when flushing transport");
|
||||
return std::make_error_code(std::errc::not_enough_memory);
|
||||
}
|
||||
|
||||
int read = BIO_read(bio, &bufp->data[0], pending);
|
||||
if (read != pending) {
|
||||
/* free the allocated buffer memory */
|
||||
bufp->len = 0;
|
||||
transport_.send(bufp);
|
||||
|
||||
/* error */
|
||||
LOG::error("TLSChannel: could not read entire buffer when flushing transport");
|
||||
return std::make_error_code(std::errc::io_error);
|
||||
}
|
||||
|
||||
bufp->len = pending;
|
||||
transport_.send(bufp);
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
int channel::TLSChannel::handshake() {
|
||||
SSL *ssl = tls_shim_->get_SSL();
|
||||
|
||||
int res = SSL_do_handshake(ssl);
|
||||
switch (res) {
|
||||
case 0:
|
||||
/* handshake failed permanently */
|
||||
LOG::error("TLSChannel: handshake failed: {}", TLSError());
|
||||
callbacks_->on_error(-EACCES);
|
||||
return -EACCES;
|
||||
case 1:
|
||||
/* finished handshake, notify the instance's user */
|
||||
callbacks_->on_connect();
|
||||
return 0;
|
||||
default: {
|
||||
int err = ERR_get_error();
|
||||
if ((err == SSL_ERROR_WANT_READ) || (err == SSL_ERROR_WANT_WRITE) ||
|
||||
(err == SSL_ERROR_NONE)) {
|
||||
return 0; /* will need more iterations */
|
||||
}
|
||||
|
||||
/* abnormal error */
|
||||
LOG::error("TLSChannel: unexpected error in handshake: {}", TLSError(err));
|
||||
callbacks_->on_error(-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const std::string &channel::TLSChannel::peer_hostname() {
|
||||
return server_hostname_;
|
||||
}
|
||||
|
|
@ -0,0 +1,144 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/tcp_channel.h>
|
||||
#include <exception>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class Callbacks;
|
||||
|
||||
namespace internal {
|
||||
class SSLContext;
|
||||
class TLSShim;
|
||||
}; // namespace internal
|
||||
|
||||
/**
|
||||
* A TLS channel
|
||||
*/
|
||||
class TLSChannel : public Channel {
|
||||
public:
|
||||
/**
|
||||
* A guard-like class to initialize the TLS library.
|
||||
*
|
||||
* A single Initializer must be alive before instantiating TLSChannels
|
||||
*/
|
||||
class Initializer {
|
||||
public:
|
||||
/**
|
||||
* c'tor. Initialize the TLS library.
|
||||
*/
|
||||
Initializer();
|
||||
|
||||
/**
|
||||
* d'tor. Clean up the TLS library (this might be partially implemented)
|
||||
*/
|
||||
~Initializer();
|
||||
|
||||
/**
|
||||
* Index to the exdata that links the SSL CTX to its TLSChannel
|
||||
*/
|
||||
static int channel_index;
|
||||
};
|
||||
|
||||
class Credentials {
|
||||
public:
|
||||
Credentials(std::string client_key, std::string client_crt);
|
||||
|
||||
private:
|
||||
friend class TLSChannel;
|
||||
|
||||
const std::string client_key_;
|
||||
const std::string client_crt_;
|
||||
};
|
||||
|
||||
/**
|
||||
* c'tor
|
||||
* @param transport: the ciphertext transport
|
||||
* @param creds: the credentials to use to establish connection
|
||||
* @param server_hostname: hostname for server certificate authentication
|
||||
*/
|
||||
TLSChannel(TCPChannel &transport, Credentials &creds, std::string server_hostname);
|
||||
|
||||
/**
|
||||
* d'tor
|
||||
*/
|
||||
virtual ~TLSChannel();
|
||||
|
||||
/**
|
||||
* Connects to an endpoint
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
*/
|
||||
void connect(Callbacks &callbacks);
|
||||
|
||||
/**
|
||||
* Data has been received from the underlying (e.g., TCP) transport.
|
||||
*
|
||||
* @returns how many bytes were consumed
|
||||
*/
|
||||
u32 received_data(const u8 *data, u32 data_len);
|
||||
|
||||
/**
|
||||
* disconnects the channel
|
||||
*
|
||||
* @important: this does NOT call the callback's close().
|
||||
*/
|
||||
void close() override;
|
||||
|
||||
/**
|
||||
* see @PollChannel::send
|
||||
*/
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
/**
|
||||
* Accessor for the peer's hostname to verify on the certificate
|
||||
*/
|
||||
const std::string &peer_hostname();
|
||||
|
||||
bool is_open() const override {
|
||||
return tls_shim_ && transport_.is_open();
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* Writes pending data from the transport bio to the underlying Channel
|
||||
*
|
||||
* @return 0 on success, negative error on failure
|
||||
*/
|
||||
std::error_code flush_transport_bio();
|
||||
|
||||
/**
|
||||
* try to finish the handshake
|
||||
* @returns:
|
||||
* 0 on normal conditions
|
||||
* negative error on failure
|
||||
*/
|
||||
int handshake();
|
||||
|
||||
TCPChannel &transport_;
|
||||
Callbacks *callbacks_ = nullptr;
|
||||
Credentials &creds_;
|
||||
std::string server_hostname_;
|
||||
|
||||
std::unique_ptr<channel::internal::SSLContext> ssl_context_;
|
||||
std::unique_ptr<channel::internal::TLSShim> tls_shim_;
|
||||
};
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/tls_error.h>
|
||||
|
||||
#include <openssl/err.h>
|
||||
|
||||
#include <sstream>
|
||||
|
||||
namespace channel {
|
||||
|
||||
TLSError::TLSError(): TLSError(ERR_get_error()) {}
|
||||
|
||||
TLSError::TLSError(int code): code_(code) {}
|
||||
|
||||
std::string_view TLSError::name() const {
|
||||
return ERR_lib_error_string(code_);
|
||||
}
|
||||
|
||||
std::string_view TLSError::reason() const {
|
||||
return ERR_reason_error_string(code_);
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
|
||||
namespace channel {
|
||||
|
||||
/**
|
||||
* A class representing a TLS error code.
|
||||
*
|
||||
* Using this class in log statements will provide properly formatted string
|
||||
* representation of TLS erros, with lazy evaluation (translation of error code
|
||||
* to string representation won't take place unless the log would be printed
|
||||
* due to log level check).
|
||||
*/
|
||||
class TLSError {
|
||||
public:
|
||||
/**
|
||||
* Represents the TLS error code given by `ERR_get_error`.
|
||||
* @param code: an OpenSSL error code
|
||||
*/
|
||||
TLSError();
|
||||
|
||||
/**
|
||||
* Represents the given TLS error code.
|
||||
* @param code: an OpenSSL error code
|
||||
*/
|
||||
TLSError(int code);
|
||||
|
||||
int code() const { return code_; }
|
||||
|
||||
std::string_view name() const;
|
||||
std::string_view reason() const;
|
||||
|
||||
private:
|
||||
int code_;
|
||||
};
|
||||
|
||||
template <typename Out>
|
||||
Out &operator <<(Out &&out, TLSError const &error) {
|
||||
out << "[TLS " << error.name() << ':' << error.code() << ": '" << error.reason() << "']";
|
||||
return out;
|
||||
}
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/tls_handler.h>
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
TLSHandler::TLSHandler(
|
||||
uv_loop_t &loop,
|
||||
std::string addr,
|
||||
std::string port,
|
||||
std::string agent_key,
|
||||
std::string agent_crt,
|
||||
std::string server_hostname,
|
||||
std::optional<config::HttpProxyConfig> proxy
|
||||
):
|
||||
creds_(std::move(agent_key), std::move(agent_crt)),
|
||||
tls_channel_(
|
||||
loop,
|
||||
std::move(addr),
|
||||
std::move(port),
|
||||
creds_,
|
||||
std::move(server_hostname),
|
||||
std::move(proxy)
|
||||
)
|
||||
{}
|
||||
|
||||
void TLSHandler::connect(Callbacks &callbacks) {
|
||||
tls_channel_.connect(callbacks);
|
||||
}
|
||||
|
||||
std::error_code TLSHandler::send(const u8 *data, int data_len) {
|
||||
return tls_channel_.send(data, data_len);
|
||||
}
|
||||
|
||||
void TLSHandler::close() {
|
||||
tls_channel_.close();
|
||||
}
|
||||
|
||||
std::error_code TLSHandler::flush() {
|
||||
return tls_channel_.flush();
|
||||
}
|
||||
|
||||
in_addr_t const *TLSHandler::connected_address() const {
|
||||
return tls_channel_.get_tcp_channel().connected_address();
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/network_channel.h>
|
||||
#include <channel/tls_channel.h>
|
||||
#include <channel/tls_over_tcp_channel.h>
|
||||
#include <map>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class TLSHandler: public NetworkChannel {
|
||||
public:
|
||||
/**
|
||||
* c'tor
|
||||
* Throws if:
|
||||
* 1. connection error on tcp_channel_
|
||||
* 2. call to connect_tls throws
|
||||
*
|
||||
*/
|
||||
TLSHandler(
|
||||
uv_loop_t &loop,
|
||||
std::string addr,
|
||||
std::string port,
|
||||
std::string agent_key = "",
|
||||
std::string agent_crt = "",
|
||||
std::string server_hostname = "",
|
||||
std::optional<config::HttpProxyConfig> proxy = {}
|
||||
);
|
||||
|
||||
/**
|
||||
* Connects to an endpoint and starts negotiating
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
*/
|
||||
void connect(Callbacks &callbacks) override;
|
||||
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
/**
|
||||
* disconnects the channel
|
||||
*/
|
||||
void close() override;
|
||||
std::error_code flush() override;
|
||||
|
||||
in_addr_t const *connected_address() const override;
|
||||
|
||||
bool is_open() const override { return tls_channel_.is_open(); }
|
||||
|
||||
private:
|
||||
TLSChannel::Credentials creds_;
|
||||
TlsOverTcpChannel tls_channel_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/tls_over_tcp_channel.h>
|
||||
|
||||
#include <util/log.h>
|
||||
#include <util/log_formatters.h>
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace channel {
|
||||
|
||||
TlsOverTcpChannel::TlsOverTcpChannel(
|
||||
uv_loop_t &loop,
|
||||
std::string addr,
|
||||
std::string port,
|
||||
TLSChannel::Credentials &creds,
|
||||
std::string server_hostname,
|
||||
std::optional<config::HttpProxyConfig> proxy
|
||||
):
|
||||
tcp_callbacks_(*this),
|
||||
tcp_(loop, std::move(addr), std::move(port), std::move(proxy)),
|
||||
tls_(tcp_, creds, std::move(server_hostname))
|
||||
{
|
||||
}
|
||||
|
||||
TlsOverTcpChannel::~TlsOverTcpChannel() {}
|
||||
|
||||
void TlsOverTcpChannel::connect(Callbacks &callbacks) {
|
||||
LOG::debug("TlsOverTcpChannel::connect()");
|
||||
callbacks_ = &callbacks;
|
||||
tcp_.connect(tcp_callbacks_);
|
||||
}
|
||||
|
||||
std::error_code TlsOverTcpChannel::send(const u8 *data, int data_len)
|
||||
{
|
||||
return tls_.send(data, data_len);
|
||||
}
|
||||
|
||||
void TlsOverTcpChannel::close() {
|
||||
tls_.close();
|
||||
tcp_.close();
|
||||
}
|
||||
|
||||
std::error_code TlsOverTcpChannel::flush()
|
||||
{
|
||||
if (auto error = tls_.flush()) {
|
||||
return error;
|
||||
}
|
||||
return tcp_.flush();
|
||||
}
|
||||
|
||||
/**************************************
|
||||
* TCP CALLBACKS
|
||||
**************************************/
|
||||
|
||||
TlsOverTcpChannel::TcpCallbacks::TcpCallbacks(TlsOverTcpChannel &parent_channel)
|
||||
: parent_channel_(parent_channel)
|
||||
{
|
||||
}
|
||||
|
||||
u32 TlsOverTcpChannel::TcpCallbacks::received_data(const u8 *data, int data_len)
|
||||
{
|
||||
return parent_channel_.tls_.received_data(data, data_len);
|
||||
}
|
||||
|
||||
void TlsOverTcpChannel::TcpCallbacks::on_error(int err)
|
||||
{
|
||||
LOG::error("TCP error {}", static_cast<std::errc>(-err));
|
||||
|
||||
parent_channel_.callbacks_->on_error(err);
|
||||
}
|
||||
|
||||
void TlsOverTcpChannel::TcpCallbacks::on_closed()
|
||||
{
|
||||
parent_channel_.callbacks_->on_closed();
|
||||
}
|
||||
|
||||
void TlsOverTcpChannel::TcpCallbacks::on_connect()
|
||||
{
|
||||
LOG::info("TCP connected");
|
||||
|
||||
/* ok TCP is connected. connect TLS */
|
||||
parent_channel_.tls_.connect(*parent_channel_.callbacks_);
|
||||
}
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/channel.h>
|
||||
#include <channel/tcp_channel.h>
|
||||
#include <channel/tls_channel.h>
|
||||
#include <string>
|
||||
#include <uv.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class Callbacks;
|
||||
|
||||
/**
|
||||
* A TLS over TCP channel
|
||||
*/
|
||||
class TlsOverTcpChannel : public Channel {
|
||||
public:
|
||||
/**
|
||||
* c'tor
|
||||
* @param transport: the ciphertext transport
|
||||
* @param creds: the credentials to use to establish connection
|
||||
* @param server_hostname: hostname for server certificate authentication
|
||||
*/
|
||||
TlsOverTcpChannel(
|
||||
uv_loop_t &loop,
|
||||
std::string addr,
|
||||
std::string port,
|
||||
TLSChannel::Credentials &creds,
|
||||
std::string server_hostname,
|
||||
std::optional<config::HttpProxyConfig> proxy = {}
|
||||
);
|
||||
|
||||
/**
|
||||
* d'tor
|
||||
*/
|
||||
virtual ~TlsOverTcpChannel();
|
||||
|
||||
/**
|
||||
* Connects to an endpoint and starts negotiating
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
* @param addr: ip address or hostname
|
||||
* @param port: string holding port number
|
||||
*/
|
||||
void connect(Callbacks &callbacks);
|
||||
|
||||
/**
|
||||
* @see Channel::send
|
||||
*/
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
void close() override;
|
||||
std::error_code flush() override;
|
||||
|
||||
TCPChannel const &get_tcp_channel() const { return tcp_; }
|
||||
TCPChannel &get_tcp_channel() { return tcp_; }
|
||||
|
||||
TLSChannel const &get_tls_channel() const { return tls_; }
|
||||
TLSChannel &get_tls_channel() { return tls_; }
|
||||
|
||||
bool is_open() const override { return tls_.is_open() && tcp_.is_open(); }
|
||||
|
||||
private:
|
||||
class TcpCallbacks : public channel::Callbacks {
|
||||
public:
|
||||
TcpCallbacks(TlsOverTcpChannel &parent_channel);
|
||||
virtual u32 received_data(const u8 *data, int data_len) override;
|
||||
virtual void on_error(int err) override;
|
||||
virtual void on_closed() override;
|
||||
virtual void on_connect() override;
|
||||
|
||||
private:
|
||||
TlsOverTcpChannel &parent_channel_;
|
||||
};
|
||||
friend TcpCallbacks;
|
||||
|
||||
TcpCallbacks tcp_callbacks_;
|
||||
TCPChannel tcp_;
|
||||
TLSChannel tls_;
|
||||
Callbacks *callbacks_ = nullptr;
|
||||
};
|
||||
|
||||
} /* namespace channel */
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <channel/upstream_connection.h>
|
||||
|
||||
#include <channel/component.h>
|
||||
#include <util/log.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
UpstreamConnection::UpstreamConnection(
|
||||
std::size_t buffer_size,
|
||||
bool allow_compression,
|
||||
NetworkChannel &primary_channel,
|
||||
Channel *secondary_channel
|
||||
):
|
||||
primary_channel_(primary_channel),
|
||||
lz4_channel_(primary_channel_, buffer_size),
|
||||
allow_compression_(allow_compression),
|
||||
double_write_channel_(lz4_channel_, secondary_channel ? *secondary_channel : lz4_channel_),
|
||||
buffered_writer_(
|
||||
secondary_channel
|
||||
? static_cast<Channel &>(double_write_channel_)
|
||||
: lz4_channel_,
|
||||
buffer_size
|
||||
)
|
||||
{}
|
||||
|
||||
void UpstreamConnection::connect(Callbacks &callbacks) {
|
||||
buffered_writer_.reset();
|
||||
primary_channel_.connect(callbacks);
|
||||
}
|
||||
|
||||
std::error_code UpstreamConnection::send(const u8 *data, int data_len) {
|
||||
auto buffer = buffered_writer_.start_write(data_len);
|
||||
if (!buffer) { return buffer.error(); }
|
||||
memcpy(*buffer, data, data_len);
|
||||
buffered_writer_.finish_write();
|
||||
return {};
|
||||
}
|
||||
|
||||
std::error_code UpstreamConnection::flush() {
|
||||
return buffered_writer_.flush();
|
||||
}
|
||||
|
||||
void UpstreamConnection::close() {
|
||||
buffered_writer_.reset();
|
||||
primary_channel_.close();
|
||||
}
|
||||
|
||||
void UpstreamConnection::set_compression(bool enabled) {
|
||||
buffered_writer_.flush();
|
||||
|
||||
LOG::trace_in(Component::upstream,
|
||||
"UpstreamConnection: {} ({}allowed) LZ4 compression",
|
||||
enabled ? "enabling" : "disabling",
|
||||
allow_compression_ ? "" : "not "
|
||||
);
|
||||
|
||||
lz4_channel_.set_compression(enabled && allow_compression_);
|
||||
}
|
||||
|
||||
BufferedWriter &UpstreamConnection::buffered_writer() { return buffered_writer_; }
|
||||
|
||||
in_addr_t const *UpstreamConnection::connected_address() const {
|
||||
return primary_channel_.connected_address();
|
||||
}
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/buffered_writer.h>
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/double_write_channel.h>
|
||||
#include <channel/lz4_channel.h>
|
||||
#include <channel/network_channel.h>
|
||||
#include <platform/platform.h>
|
||||
|
||||
namespace channel {
|
||||
|
||||
class UpstreamConnection: public NetworkChannel {
|
||||
public:
|
||||
UpstreamConnection(
|
||||
std::size_t buffer_size,
|
||||
bool allow_compression,
|
||||
NetworkChannel &primary_channel,
|
||||
Channel *secondary_channel = nullptr
|
||||
);
|
||||
|
||||
/**
|
||||
* Connects to an endpoint and starts negotiating
|
||||
* @param callbacks: the callbacks to use during this connection
|
||||
* @param addr: ip address or hostname
|
||||
* @param port: string holding port number
|
||||
*/
|
||||
void connect(Callbacks &callbacks) override;
|
||||
|
||||
std::error_code send(const u8 *data, int data_len) override;
|
||||
|
||||
/**
|
||||
* Flushes the internal buffers.
|
||||
*/
|
||||
std::error_code flush() override;
|
||||
|
||||
/**
|
||||
* disconnects the channel
|
||||
*/
|
||||
void close() override;
|
||||
|
||||
/**
|
||||
* Enables/disables compression.
|
||||
*/
|
||||
void set_compression(bool enabled);
|
||||
|
||||
BufferedWriter &buffered_writer();
|
||||
|
||||
in_addr_t const *connected_address() const override;
|
||||
|
||||
bool is_open() const override { return primary_channel_.is_open(); }
|
||||
|
||||
private:
|
||||
NetworkChannel &primary_channel_;
|
||||
Lz4Channel lz4_channel_;
|
||||
bool allow_compression_;
|
||||
DoubleWriteChannel double_write_channel_;
|
||||
BufferedWriter buffered_writer_;
|
||||
};
|
||||
|
||||
} // namespace channel
|
||||
|
|
@ -0,0 +1 @@
|
|||
find_package(absl REQUIRED)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
find_package(AWSSDK REQUIRED)
|
||||
set(AWS_SERVICES ec2 s3 core)
|
||||
AWSSDK_DETERMINE_LIBS_TO_LINK(AWS_SERVICES AWSSDK_LIBS)
|
||||
add_library(aws-sdk-cpp INTERFACE)
|
||||
target_link_libraries(
|
||||
aws-sdk-cpp
|
||||
INTERFACE
|
||||
-L${AWSSDK_LIB_DIR}
|
||||
${AWSSDK_LIBS}
|
||||
${AWSSDK_LIBS}
|
||||
curl-static
|
||||
z
|
||||
)
|
||||
target_include_directories(
|
||||
aws-sdk-cpp
|
||||
INTERFACE
|
||||
${AWSSDK_INCLUDE_DIR}
|
||||
)
|
||||
target_link_libraries(
|
||||
aws-sdk-cpp
|
||||
INTERFACE
|
||||
OpenSSL::Crypto
|
||||
-ldl
|
||||
)
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/ext/bcc/cmake)
|
||||
|
||||
# required libs, See bcc/cmake/clang_libs.cmake
|
||||
set(
|
||||
BCC_CLANG_LIBS
|
||||
clangFrontend
|
||||
clangSerialization
|
||||
clangDriver
|
||||
clangASTMatchers
|
||||
clangParse
|
||||
clangSema
|
||||
clangCodeGen
|
||||
clangAnalysis
|
||||
clangRewrite
|
||||
clangEdit
|
||||
clangAST
|
||||
clangLex
|
||||
clangBasic
|
||||
)
|
||||
|
||||
# find the required clang libraries
|
||||
foreach( LIB ${BCC_CLANG_LIBS} )
|
||||
find_library(lib${LIB} NAMES ${LIB} HINTS ${LLVM_LIBRARY_DIRS})
|
||||
if(lib${LIB} STREQUAL "lib${LIB}-NOTFOUND")
|
||||
message(FATAL_ERROR "Unable to find clang library ${LIB}. Build container should already have that set up")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
find_path(BCC_INCLUDE_DIRS bcc/libbpf.h)
|
||||
|
||||
# BCC libs, see the line that starts with "target_link_libraries(bcc-static"
|
||||
# in bcc/src/cc/MakeLists.txt
|
||||
find_library(BCC_LIBS NAMES "libbcc-combined.a")
|
||||
|
||||
set(CMAKE_REQUIRED_INCLUDES ${BCC_INCLUDE_DIRS})
|
||||
include(CheckIncludeFile)
|
||||
check_include_file("bcc/bcc_syms.h" FOUND_BCC_SYMS_H)
|
||||
if ( NOT FOUND_BCC_SYMS_H )
|
||||
message ( FATAL_ERROR "Could not find bcc_syms.h while searching for bcc. Build container should already have that set up" )
|
||||
endif ( NOT FOUND_BCC_SYMS_H )
|
||||
|
||||
FOREACH(LIB ${BCC_LIBS})
|
||||
if ( NOT EXISTS ${LIB} )
|
||||
message ( FATAL_ERROR "Could not find ${LIB}. Build container should already have that set up" )
|
||||
endif ( NOT EXISTS ${LIB} )
|
||||
ENDFOREACH()
|
||||
|
||||
message(STATUS "bcc libraries: ${BCC_LIBS}")
|
||||
message(STATUS "bcc include dirs: ${BCC_INCLUDE_DIRS}")
|
||||
|
||||
add_library(bcc-static INTERFACE)
|
||||
add_library(bcc-interface INTERFACE)
|
||||
target_include_directories(
|
||||
bcc-interface
|
||||
INTERFACE
|
||||
${BCC_INCLUDE_DIRS}
|
||||
${LLVM_INCLUDE_DIRS}
|
||||
)
|
||||
|
||||
# BCC LLVM libs, see bcc/src/cc/CMakeLists.txt
|
||||
set(
|
||||
BCC_LLVM_LIBNAMES
|
||||
bitwriter
|
||||
bpfcodegen
|
||||
debuginfodwarf
|
||||
irreader
|
||||
linker
|
||||
mcjit
|
||||
objcarcopts
|
||||
option
|
||||
passes
|
||||
lto
|
||||
nativecodegen
|
||||
coverage
|
||||
coroutines
|
||||
bpfasmparser
|
||||
bpfdisassembler
|
||||
)
|
||||
llvm_map_components_to_libnames(BCC_LLVM_LIBS ${BCC_LLVM_LIBNAMES})
|
||||
llvm_expand_dependencies(BCC_LLVM_LIBS_EXPANDED ${BCC_LLVM_LIBS})
|
||||
|
||||
target_compile_definitions(bcc-interface INTERFACE ${LLVM_DEFINITIONS})
|
||||
target_link_libraries(bcc-static INTERFACE bcc-interface
|
||||
${BCC_LIBS}
|
||||
${BCC_LLVM_LIBS_EXPANDED}
|
||||
${BCC_CLANG_LIBS}
|
||||
libelf.a)
|
||||
|
||||
# LLVM is built with -ffunctions-sections -fdata-sections so we can remove unused functions
|
||||
#target_compile_options(bcc-interface INTERFACE -ffunction-sections -fdata-sections)
|
||||
#target_link_libraries(bcc-static INTERFACE -Wl,--gc-sections)
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
pkg_check_modules(GOOGLE_BREAKPAD REQUIRED breakpad-client)
|
||||
add_library(breakpad_client INTERFACE)
|
||||
target_compile_options(breakpad_client INTERFACE "${GOOGLE_BREAKPAD_CFLAGS}")
|
||||
target_link_libraries(breakpad_client INTERFACE "${GOOGLE_BREAKPAD_LDFLAGS}")
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
find_path(
|
||||
CARES_INCLUDE_DIR
|
||||
ares.h
|
||||
PATHS
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
)
|
||||
if(NOT CARES_INCLUDE_DIR)
|
||||
message(FATAL_ERROR "Could not find c-ares, required for DNS. Build container should already have that set up")
|
||||
endif()
|
||||
add_library(c-ares-headers INTERFACE)
|
||||
target_include_directories(c-ares-headers INTERFACE "${CARES_INCLUDE_DIR}")
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
# use ccache if available. thanks to http://stackoverflow.com/a/24305849
|
||||
find_program(CCACHE_FOUND ccache)
|
||||
if(CCACHE_FOUND)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
|
||||
endif(CCACHE_FOUND)
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
find_package(CLANG REQUIRED CONFIG NAMES Clang)
|
||||
message(STATUS "Found Clang ${CLANG_VERSION}")
|
||||
message(STATUS "Using ClangConfig.cmake in: ${CLANG_CONFIG}")
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
# resolves the command line option for limiting error output
|
||||
# this is extremely useful for debugging compilation errors
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(CXX_ERROR_LIMIT_FLAG "-ferror-limit")
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set(CXX_ERROR_LIMIT_FLAG "-fmax-errors")
|
||||
endif()
|
||||
|
||||
message(STATUS "C++ compiler: ${CMAKE_CXX_COMPILER}")
|
||||
message(STATUS "C++ compiler version: ${CMAKE_CXX_COMPILER_VERSION}")
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_CXX_COMPILER} --version
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE CXX_COMPILER_NATIVE_VERSION
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
message(STATUS "C++ compiler native version string: ${CXX_COMPILER_NATIVE_VERSION}")
|
||||
|
||||
# most expressive debugging, and some optimization
|
||||
# also, disabling -Wno-stringop-truncation where necessary given that it warns about
|
||||
# behavior we intend to get out of strncpy
|
||||
set(FLOWMILL_COMMON_COMPILE_FLAGS "-ggdb3 -Wall -Werror -fno-omit-frame-pointer -Wno-stringop-truncation ${CXX_ERROR_LIMIT_FLAG}=1 -pthread")
|
||||
set(FLOWMILL_COMMON_C_FLAGS "${FLOWMILL_COMMON_COMPILE_FLAGS}")
|
||||
set(FLOWMILL_COMMON_CXX_FLAGS "${FLOWMILL_COMMON_COMPILE_FLAGS}")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLOWMILL_COMMON_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLOWMILL_COMMON_CXX_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLOWMILL_COMMON_LINKER_FLAGS} -static-libgcc -static-libstdc++ -pthread")
|
||||
|
||||
if(OPTIMIZE)
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O2")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O2")
|
||||
endif()
|
||||
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS "ON")
|
||||
|
||||
function (harden_executable TARGET)
|
||||
target_compile_options(
|
||||
${TARGET}
|
||||
PUBLIC
|
||||
-Wl,-z,relro,-z,now
|
||||
-fstack-protector
|
||||
-static-pie
|
||||
-fpie
|
||||
-fPIE
|
||||
)
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
find_library(CURL_STATIC_LIBRARY NAMES libcurl.a)
|
||||
message(STATUS "curl static library: ${CURL_STATIC_LIBRARY}")
|
||||
add_library(curl-static INTERFACE)
|
||||
target_link_libraries(
|
||||
curl-static
|
||||
INTERFACE
|
||||
${CURL_STATIC_LIBRARY}
|
||||
OpenSSL::SSL
|
||||
OpenSSL::Crypto
|
||||
z
|
||||
)
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
find_path(CURLPP_INCLUDE_DIR NAMES curlpp/cURLpp.hpp)
|
||||
find_library(CURLPP_STATIC_LIBRARY NAMES libcurlpp.a)
|
||||
message(STATUS "curlpp static library: ${CURLPP_STATIC_LIBRARY}")
|
||||
add_library(curl-cpp INTERFACE)
|
||||
target_include_directories(
|
||||
curl-cpp
|
||||
INTERFACE
|
||||
"${CURLPP_INCLUDE_DIR}"
|
||||
)
|
||||
target_link_libraries(
|
||||
curl-cpp
|
||||
INTERFACE
|
||||
"${CURLPP_STATIC_LIBRARY}"
|
||||
curl-static
|
||||
)
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
option(UPLOAD_DEBUG_SYMBOLS "When set, debug symbols will be uploaded to our symbol server" OFF)
|
||||
option(EXPORT_DEBUG_SYMBOLS "When set, debug symbols will be exported even if not being uploaded to our symbol server" OFF)
|
||||
|
||||
function(strip_binary TARGET)
|
||||
cmake_parse_arguments(ARG "" "" "DEPENDS" ${ARGN})
|
||||
|
||||
if (UPLOAD_DEBUG_SYMBOLS)
|
||||
list(APPEND STRIP_ARGS "--export")
|
||||
list(APPEND STRIP_ARGS "--upload")
|
||||
list(APPEND STRIP_ARGS "flowmill-debug-symbols")
|
||||
endif()
|
||||
|
||||
add_custom_target(
|
||||
${TARGET}-stripped
|
||||
DEPENDS ${ARG_DEPENDS}
|
||||
COMMAND
|
||||
${CMAKE_SOURCE_DIR}/dev/strip-symbols.sh ${STRIP_ARGS} $<TARGET_FILE:${TARGET}>
|
||||
)
|
||||
|
||||
set_property(
|
||||
TARGET
|
||||
${TARGET}-stripped
|
||||
PROPERTY
|
||||
"OUTPUT" $<TARGET_FILE:${TARGET}>-stripped
|
||||
)
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
option(RUN_DOCKER_COMMANDS "when disabled, prepares docker images to be built but stop short of running `docker` commands" ON)
|
||||
|
||||
add_custom_target(docker)
|
||||
add_custom_target(docker-registry)
|
||||
|
||||
################
|
||||
# DOCKER IMAGE #
|
||||
################
|
||||
|
||||
function(build_custom_docker_image IMAGE_NAME)
|
||||
cmake_parse_arguments(ARG "" "DOCKERFILE_PATH;OUT_DIR" "ARGS;IMAGE_TAGS;FILES;BINARIES;DIRECTORIES;DEPENDS;OUTPUT_OF;ARTIFACTS_OF;DOCKER_REGISTRY;DEPENDENCY_OF" ${ARGN})
|
||||
|
||||
# Dockerfile's directory defaults to the one containing CMakeLists.txt
|
||||
if (NOT DEFINED ARG_DOCKERFILE_PATH)
|
||||
set(ARG_DOCKERFILE_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED ARG_IMAGE_TAGS)
|
||||
if (DEFINED ENV{IMAGE_TAGS})
|
||||
set(ARG_IMAGE_TAGS "$ENV{IMAGE_TAGS}")
|
||||
else()
|
||||
set(ARG_IMAGE_TAGS "latest")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED ARG_DOCKER_REGISTRY)
|
||||
if (DEFINED ENV{DOCKER_REGISTRY})
|
||||
set(ARG_DOCKER_REGISTRY "$ENV{DOCKER_REGISTRY}")
|
||||
else()
|
||||
set(ARG_DOCKER_REGISTRY "localhost:5000")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
foreach (FILE ${ARG_FILES})
|
||||
if (FILE MATCHES "^[/~]")
|
||||
list(APPEND FILES_LIST "${FILE}")
|
||||
else()
|
||||
list(APPEND FILES_LIST "${CMAKE_CURRENT_SOURCE_DIR}/${FILE}")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
foreach (OUTPUT_OF ${ARG_OUTPUT_OF})
|
||||
get_target_property(OUTPUT ${OUTPUT_OF} OUTPUT)
|
||||
list(APPEND FILES_LIST "${OUTPUT}")
|
||||
endforeach()
|
||||
|
||||
# everything in BINARIES is relative to current binary dir
|
||||
foreach (BINARY ${ARG_BINARIES})
|
||||
list(APPEND BINARIES_LIST "${CMAKE_CURRENT_BINARY_DIR}/${BINARY}")
|
||||
endforeach()
|
||||
|
||||
foreach (ARTIFACTS_OF ${ARG_ARTIFACTS_OF})
|
||||
list(APPEND FILES_LIST $<TARGET_FILE:${ARTIFACTS_OF}>)
|
||||
endforeach()
|
||||
|
||||
set(DOCKER_ARGS "")
|
||||
foreach (ARG ${ARG_ARGS})
|
||||
list(APPEND DOCKER_ARGS "--build-arg" "${ARG}")
|
||||
endforeach()
|
||||
|
||||
set(out_path "${CMAKE_BINARY_DIR}/docker.out/${IMAGE_NAME}")
|
||||
|
||||
if (NOT DEFINED ARG_OUT_DIR)
|
||||
set(files_path "${out_path}")
|
||||
else()
|
||||
set(files_path "${out_path}/${ARG_OUT_DIR}")
|
||||
endif()
|
||||
|
||||
################
|
||||
# docker build #
|
||||
################
|
||||
|
||||
add_custom_target(
|
||||
"${IMAGE_NAME}-docker"
|
||||
DEPENDS
|
||||
${ARG_DEPENDS}
|
||||
${ARG_OUTPUT_OF}
|
||||
${ARG_ARTIFACTS_OF}
|
||||
)
|
||||
|
||||
add_dependencies(
|
||||
docker
|
||||
"${IMAGE_NAME}-docker"
|
||||
)
|
||||
|
||||
foreach (DEPENDENT_TARGET ${ARG_DEPENDENCY_OF})
|
||||
add_dependencies("${DEPENDENT_TARGET}-docker" "${IMAGE_NAME}-docker")
|
||||
endforeach()
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory "${out_path}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory "${files_path}"
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker"
|
||||
WORKING_DIRECTORY
|
||||
"${out_path}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy_if_different ${ARG_DOCKERFILE_PATH}/Dockerfile ${out_path}
|
||||
)
|
||||
|
||||
if (DEFINED FILES_LIST)
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker"
|
||||
WORKING_DIRECTORY
|
||||
"${out_path}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy_if_different ${FILES_LIST} ${files_path}
|
||||
)
|
||||
endif()
|
||||
|
||||
if (DEFINED BINARIES_LIST)
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker"
|
||||
WORKING_DIRECTORY
|
||||
"${out_path}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy_if_different ${BINARIES_LIST} ${files_path}
|
||||
)
|
||||
endif()
|
||||
|
||||
foreach (DIRECTORY ${ARG_DIRECTORIES})
|
||||
get_filename_component(DIR_NAME ${DIRECTORY} NAME)
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker"
|
||||
WORKING_DIRECTORY
|
||||
"${out_path}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy_directory
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}"
|
||||
"${files_path}/${DIR_NAME}"
|
||||
)
|
||||
endforeach()
|
||||
|
||||
if (RUN_DOCKER_COMMANDS)
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker"
|
||||
WORKING_DIRECTORY
|
||||
"${out_path}"
|
||||
COMMAND
|
||||
docker build -t "${IMAGE_NAME}" ${DOCKER_ARGS} .
|
||||
)
|
||||
endif()
|
||||
|
||||
###########################
|
||||
# push to docker registry #
|
||||
###########################
|
||||
|
||||
add_custom_target(
|
||||
"${IMAGE_NAME}-docker-registry"
|
||||
DEPENDS
|
||||
docker-registry-login
|
||||
"${IMAGE_NAME}-docker"
|
||||
${ARG_DEPENDS}
|
||||
)
|
||||
|
||||
add_dependencies(
|
||||
docker-registry
|
||||
"${IMAGE_NAME}-docker-registry"
|
||||
)
|
||||
|
||||
foreach (DEPENDENT_TARGET ${ARG_DEPENDENCY_OF})
|
||||
add_dependencies(
|
||||
"${DEPENDENT_TARGET}-docker-registry"
|
||||
"${IMAGE_NAME}-docker-registry"
|
||||
)
|
||||
endforeach()
|
||||
|
||||
if (RUN_DOCKER_COMMANDS)
|
||||
# TODO: merge docker-registry-push.sh and push_docker_image.sh
|
||||
foreach (IMAGE_TAG ${ARG_IMAGE_TAGS})
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${IMAGE_NAME}-docker-registry"
|
||||
COMMAND
|
||||
docker tag "${IMAGE_NAME}" "${IMAGE_NAME}:${IMAGE_TAG}"
|
||||
COMMAND
|
||||
"${CMAKE_SOURCE_DIR}/dev/docker-registry-push.sh"
|
||||
"${IMAGE_NAME}" "${IMAGE_TAG}" --no-login "${ARG_DOCKER_REGISTRY}"
|
||||
)
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
###################
|
||||
# DOCKER REGISRY ##
|
||||
###################
|
||||
|
||||
add_custom_target(
|
||||
docker-registry-login
|
||||
COMMAND
|
||||
"${CMAKE_SOURCE_DIR}/dev/docker-registry-login.sh" --no-vault env
|
||||
)
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
###########################
|
||||
# static compilation target
|
||||
add_library(static-executable INTERFACE)
|
||||
target_link_libraries(
|
||||
static-executable
|
||||
INTERFACE
|
||||
address_sanitizer-static
|
||||
-static-libstdc++
|
||||
)
|
||||
|
||||
###########################
|
||||
# shared compilation target
|
||||
add_library(shared-executable INTERFACE)
|
||||
target_link_libraries(
|
||||
shared-executable
|
||||
INTERFACE
|
||||
address_sanitizer-shared
|
||||
)
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
option(GO_STATIC_LINK "statically link go binaries" OFF)
|
||||
|
||||
set(GO_PATH "${CMAKE_BINARY_DIR}/go-path")
|
||||
set(GO_PATH_SRC "${GO_PATH}/src")
|
||||
|
||||
function(setup_go_module NAME DOMAIN)
|
||||
set(PACKAGE "${DOMAIN}/${NAME}")
|
||||
set(TARGET "${NAME}-go-module")
|
||||
|
||||
set(MOD_BUILD_DIR "${GO_PATH}/src/${PACKAGE}")
|
||||
set(GO_MOD_FILE "${MOD_BUILD_DIR}/go.mod")
|
||||
|
||||
add_custom_target(
|
||||
"${TARGET}"
|
||||
DEPENDS
|
||||
${ARG_DEPENDS}
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory
|
||||
"${MOD_BUILD_DIR}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy_if_different
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/go.mod"
|
||||
"${GO_MOD_FILE}"
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
WORKING_DIRECTORY
|
||||
"${MOD_BUILD_DIR}"
|
||||
COMMAND
|
||||
env GOPATH="${GO_PATH}"
|
||||
go mod download -x
|
||||
)
|
||||
|
||||
set_property(TARGET "${TARGET}" PROPERTY "MOD_BUILD_DIR" "${MOD_BUILD_DIR}")
|
||||
set_property(TARGET "${TARGET}" PROPERTY "GO_MOD_FILE" "${GO_MOD_FILE}")
|
||||
|
||||
foreach(DEPENDENCY ${ARG_DEPENDENCY_OF})
|
||||
add_dependencies(${DEPENDENCY} ${TARGET})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
# TODO: ADD STATIC CHECK
|
||||
# the given go package under previously set up (with `setup_go_module`) go module
|
||||
# sets the `GO_TARGET` variable in the parent scope with this target's name
|
||||
function(build_go_package NAME MODULE)
|
||||
cmake_parse_arguments(ARG "BINARY;GENERATED;ALL" "" "DEPENDS;DEPENDENCY_OF" ${ARGN})
|
||||
|
||||
string(REPLACE "/" "-" IDENTIFIER "${NAME}")
|
||||
set(TARGET "${IDENTIFIER}-go")
|
||||
set(GO_TARGET "${TARGET}" PARENT_SCOPE)
|
||||
|
||||
get_target_property(MOD_BUILD_DIR "${MODULE}" MOD_BUILD_DIR)
|
||||
get_target_property(GO_MOD_FILE "${MODULE}" GO_MOD_FILE)
|
||||
set(BUILD_DIR "${MOD_BUILD_DIR}/${NAME}")
|
||||
set(OUT_BINARY "${CMAKE_CURRENT_BINARY_DIR}/${NAME}")
|
||||
|
||||
set(TARGET_OPTIONS)
|
||||
if(ARG_ALL)
|
||||
list(APPEND TARGET_OPTIONS ALL)
|
||||
endif()
|
||||
|
||||
add_custom_target(
|
||||
"${TARGET}"
|
||||
${TARGET_OPTIONS}
|
||||
DEPENDS
|
||||
${MODULE}
|
||||
${ARG_DEPENDS}
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory
|
||||
"${BUILD_DIR}"
|
||||
)
|
||||
|
||||
if (NOT ARG_GENERATED)
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy_directory
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
"${BUILD_DIR}"
|
||||
#COMMAND
|
||||
# ${CMAKE_COMMAND} -E create_symlink
|
||||
# "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
# "${BUILD_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
WORKING_DIRECTORY
|
||||
"${BUILD_DIR}"
|
||||
COMMAND
|
||||
env GOPATH="${GO_PATH}"
|
||||
go get ./...
|
||||
)
|
||||
|
||||
if (ARG_BINARY)
|
||||
set(GO_BUILD_ARGS)
|
||||
if (GO_STATIC_LINK)
|
||||
list(APPEND GO_BUILD_ARGS CGO_ENABLED=0 GOOS=linux GOARCH=amd64)
|
||||
endif()
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
WORKING_DIRECTORY
|
||||
"${BUILD_DIR}"
|
||||
BYPRODUCTS
|
||||
"${OUT_BINARY}"
|
||||
COMMAND
|
||||
env GOPATH="${GO_PATH}" ${GO_BUILD_ARGS}
|
||||
go build -o "${OUT_BINARY}" -modfile "${GO_MOD_FILE}" .
|
||||
)
|
||||
|
||||
set_property(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
PROPERTY
|
||||
"OUTPUT" "${OUT_BINARY}"
|
||||
)
|
||||
endif()
|
||||
|
||||
foreach(DEPENDENCY ${ARG_DEPENDENCY_OF})
|
||||
add_dependencies(${DEPENDENCY} ${TARGET})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
function(build_standalone_go_binary NAME MODULE)
|
||||
cmake_parse_arguments(ARG "BINARY;GENERATED;ALL" "" "DEPENDS;DEPENDENCY_OF" ${ARGN})
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
pkg_check_modules(LIBELF REQUIRED libelf)
|
||||
add_library(libelf INTERFACE)
|
||||
target_compile_options(libelf INTERFACE "${LIBELF_CFLAGS}")
|
||||
target_link_libraries(libelf INTERFACE "${LIBELF_LINK_LIBRARIES}")
|
||||
message(STATUS "libelf CFLAGS: ${LIBELF_CFLAGS}")
|
||||
message(STATUS "libelf LIBRARIES: ${LIBELF_LINK_LIBRARIES}")
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
find_package(LLVM REQUIRED CONFIG)
|
||||
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
|
||||
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
|
||||
|
||||
add_library(llvm INTERFACE)
|
||||
add_library(llvm-shared INTERFACE)
|
||||
add_library(llvm-interface INTERFACE)
|
||||
target_include_directories(
|
||||
llvm-interface
|
||||
INTERFACE
|
||||
${LLVM_INCLUDE_DIRS}
|
||||
)
|
||||
llvm_map_components_to_libnames(
|
||||
LLVM_LIBS
|
||||
core
|
||||
mcjit
|
||||
native
|
||||
executionengine
|
||||
scalaropts
|
||||
)
|
||||
target_compile_definitions(llvm-interface INTERFACE ${LLVM_DEFINITIONS})
|
||||
target_link_libraries(llvm INTERFACE llvm-interface ${LLVM_LIBS})
|
||||
target_link_libraries(llvm-shared INTERFACE llvm-interface -L${LLVM_LIBRARY_DIRS} -lLLVM)
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
find_path(LZ4_INCLUDE_DIR lz4.h)
|
||||
find_library(LZ4_LIBRARY NAMES "liblz4.a")
|
||||
find_package_handle_standard_args(LZ4 DEFAULT_MSG LZ4_LIBRARY LZ4_INCLUDE_DIR)
|
||||
if(NOT LZ4_FOUND)
|
||||
message(FATAL_ERROR "Could not find lz4. Build container should already have that set up")
|
||||
endif()
|
||||
message(STATUS "lz4 INCLUDE_DIR: ${LZ4_INCLUDE_DIR}")
|
||||
message(STATUS "lz4 LIBRARY: ${LZ4_LIBRARY}")
|
||||
add_library(lz4 INTERFACE)
|
||||
target_include_directories(lz4 INTERFACE "${LZ4_INCLUDE_DIR}")
|
||||
target_link_libraries(lz4 INTERFACE "${LZ4_LIBRARY}")
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
if (NOT OPENSSL_VERSION STREQUAL "1.1.1b")
|
||||
message(FATAL_ERROR "OpenSSL must be a specific version (1.1.1b). Build container should already have that set up")
|
||||
endif()
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
set(GO_PROTOBUF_ANNOTATIONS_DIR /usr/local/go/src/github.com/grpc-ecosystem/grpc-gateway)
|
||||
set(GO_PROTOBUF_GOOGLEAPIS_DIR /usr/local/go/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis)
|
||||
|
||||
# GO should be given the go-module name associated with the built packages
|
||||
function (build_protobuf NAME)
|
||||
cmake_parse_arguments(ARG "CPP;GRPC;GRPC_GATEWAY" "GO" "DEPENDS;DEPENDENCY_OF" ${ARGN})
|
||||
|
||||
set(TARGET "${NAME}-protobuf")
|
||||
|
||||
add_custom_target(
|
||||
"${TARGET}"
|
||||
DEPENDS
|
||||
${ARG_DEPENDS}
|
||||
)
|
||||
|
||||
set(
|
||||
PROTOBUF_ARGS
|
||||
-I"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
)
|
||||
|
||||
if (ARG_CPP)
|
||||
list(
|
||||
APPEND
|
||||
PROTOBUF_ARGS
|
||||
-I/usr/local/include
|
||||
--cpp_out="${CMAKE_CURRENT_BINARY_DIR}/generated"
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/generated"
|
||||
)
|
||||
|
||||
set(
|
||||
GEN_FILES_CPP
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/generated/${NAME}.pb.h"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/generated/${NAME}.pb.cc"
|
||||
)
|
||||
|
||||
if (ARG_GRPC)
|
||||
list(
|
||||
APPEND
|
||||
PROTOBUF_ARGS
|
||||
--plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin"
|
||||
--grpc_out="${CMAKE_CURRENT_BINARY_DIR}/generated"
|
||||
)
|
||||
|
||||
list(
|
||||
APPEND
|
||||
GEN_FILES_CPP
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/generated/${NAME}.grpc.pb.h"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/generated/${NAME}.grpc.pb.cc"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (DEFINED ARG_GO)
|
||||
list(
|
||||
APPEND
|
||||
PROTOBUF_ARGS
|
||||
--go_out="plugins=grpc:${GO_PATH_SRC}"
|
||||
)
|
||||
|
||||
add_dependencies(
|
||||
"${TARGET}"
|
||||
"${ARG_GO}-go-module"
|
||||
)
|
||||
|
||||
if (ARG_GRPC)
|
||||
list(
|
||||
APPEND
|
||||
PROTOBUF_ARGS
|
||||
-I"${GO_PROTOBUF_ANNOTATIONS_DIR}"
|
||||
-I"${GO_PROTOBUF_GOOGLEAPIS_DIR}"
|
||||
--grpc-gateway_out="logtostderr=true:${GO_PATH_SRC}"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(
|
||||
APPEND
|
||||
PROTOBUF_ARGS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${NAME}.proto"
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
TARGET
|
||||
"${TARGET}"
|
||||
COMMAND
|
||||
protoc
|
||||
${PROTOBUF_ARGS}
|
||||
)
|
||||
|
||||
if (ARG_CPP)
|
||||
set(CPP_TARGET "${NAME}-cpp-protobuf")
|
||||
|
||||
set_source_files_properties(
|
||||
${GEN_FILES_CPP}
|
||||
PROPERTIES
|
||||
GENERATED TRUE
|
||||
)
|
||||
|
||||
add_library(
|
||||
"${CPP_TARGET}"
|
||||
STATIC
|
||||
${GEN_FILES_CPP}
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
"${CPP_TARGET}"
|
||||
protobuf
|
||||
grpc++
|
||||
)
|
||||
|
||||
target_include_directories(
|
||||
"${CPP_TARGET}"
|
||||
PUBLIC
|
||||
"${CMAKE_CURRENT_BINARY_DIR}"
|
||||
)
|
||||
|
||||
add_dependencies(
|
||||
"${CPP_TARGET}"
|
||||
"${TARGET}"
|
||||
)
|
||||
endif()
|
||||
|
||||
if (DEFINED ARG_GO)
|
||||
set(GO_TARGET "${NAME}-go-protobuf")
|
||||
|
||||
add_custom_target(
|
||||
"${GO_TARGET}"
|
||||
DEPENDS
|
||||
"${TARGET}"
|
||||
)
|
||||
endif()
|
||||
|
||||
foreach(DEPENDENCY ${ARG_DEPENDENCY_OF})
|
||||
add_dependencies(${DEPENDENCY} ${TARGET})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,219 @@
|
|||
function(render_compile INPUT_DIR)
|
||||
cmake_parse_arguments(ARG "" "OUTPUT_DIR;PACKAGE;COMPILER" "APPS;DEPENDS" ${ARGN})
|
||||
|
||||
if(DEFINED ARG_OUTPUT_DIR)
|
||||
set(OUTPUT_DIR ${ARG_OUTPUT_DIR})
|
||||
else()
|
||||
set(OUTPUT_DIR "${CMAKE_BINARY_DIR}/generated")
|
||||
endif()
|
||||
|
||||
if(DEFINED ARG_PACKAGE)
|
||||
set(PACKAGE ${ARG_PACKAGE})
|
||||
else()
|
||||
set(PACKAGE "flowmill")
|
||||
endif()
|
||||
|
||||
if(DEFINED ARG_COMPILER)
|
||||
set(RENDER_COMPILER ${ARG_COMPILER})
|
||||
else()
|
||||
get_target_property(RENDER_COMPILER render_compiler LOCATION)
|
||||
endif()
|
||||
|
||||
set(RENDER_${PACKAGE}_OUTPUTS "")
|
||||
|
||||
foreach(APP ${ARG_APPS})
|
||||
set(
|
||||
RENDER_${PACKAGE}_${APP}_DESCRIPTOR
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}.descriptor.cc"
|
||||
)
|
||||
set(
|
||||
RENDER_${PACKAGE}_${APP}_HASH
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/hash.c"
|
||||
)
|
||||
set(
|
||||
RENDER_${PACKAGE}_${APP}_WRITER
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/writer.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/writer.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/encoder.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/encoder.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/otlp_log_encoder.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/otlp_log_encoder.cc"
|
||||
)
|
||||
set(
|
||||
RENDER_${PACKAGE}_${APP}_OUTPUTS
|
||||
${RENDER_${PACKAGE}_${APP}_HASH}
|
||||
${RENDER_${PACKAGE}_${APP}_DESCRIPTOR}
|
||||
${RENDER_${PACKAGE}_${APP}_WRITER}
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/index.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/index.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/containers.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/containers.inl"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/containers.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/keys.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/handles.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/handles.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/auto_handles.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/auto_handles.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/weak_refs.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/weak_refs.inl"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/weak_refs.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/modifiers.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/modifiers.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/spans.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/spans.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/span_base.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/connection.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/connection.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/protocol.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/protocol.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/transform_builder.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/transform_builder.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/auto_handle_converters.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/auto_handle_converters.cc"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/meta.h"
|
||||
"${OUTPUT_DIR}/${PACKAGE}/${APP}/bpf.h"
|
||||
)
|
||||
list(
|
||||
APPEND
|
||||
RENDER_${PACKAGE}_OUTPUTS
|
||||
${RENDER_${PACKAGE}_${APP}_OUTPUTS}
|
||||
)
|
||||
endforeach()
|
||||
|
||||
list(
|
||||
APPEND
|
||||
RENDER_${PACKAGE}_OUTPUTS
|
||||
"${OUTPUT_DIR}/${PACKAGE}/metrics.h"
|
||||
)
|
||||
|
||||
set_source_files_properties(
|
||||
${RENDER_${PACKAGE}_OUTPUTS}
|
||||
PROPERTIES
|
||||
GENERATED TRUE
|
||||
)
|
||||
|
||||
file(
|
||||
GLOB
|
||||
RENDER_INPUTS
|
||||
"${INPUT_DIR}/*.render"
|
||||
)
|
||||
|
||||
# Generate sources
|
||||
#
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
${RENDER_${PACKAGE}_OUTPUTS}
|
||||
WORKING_DIRECTORY
|
||||
${INPUT_DIR}
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory "${OUTPUT_DIR}"
|
||||
COMMAND
|
||||
java -jar ${RENDER_COMPILER} -i . -o "${OUTPUT_DIR}"
|
||||
DEPENDS
|
||||
${RENDER_INPUTS}
|
||||
${RENDER_COMPILER}
|
||||
${ARG_DEPENDS}
|
||||
)
|
||||
add_custom_target(
|
||||
render_compile_${PACKAGE}
|
||||
DEPENDS
|
||||
${RENDER_${PACKAGE}_OUTPUTS}
|
||||
)
|
||||
|
||||
# Generated sources interface library
|
||||
#
|
||||
add_library(
|
||||
render_${PACKAGE}_artifacts
|
||||
INTERFACE
|
||||
)
|
||||
target_include_directories(
|
||||
render_${PACKAGE}_artifacts
|
||||
INTERFACE
|
||||
"${OUTPUT_DIR}"
|
||||
)
|
||||
target_link_libraries(
|
||||
render_${PACKAGE}_artifacts
|
||||
INTERFACE
|
||||
logging
|
||||
)
|
||||
add_dependencies(
|
||||
render_${PACKAGE}_artifacts
|
||||
render_compile_${PACKAGE}
|
||||
)
|
||||
|
||||
# Generated sources app libraries
|
||||
#
|
||||
foreach(APP ${ARG_APPS})
|
||||
add_library(
|
||||
render_${PACKAGE}_${APP}
|
||||
STATIC
|
||||
EXCLUDE_FROM_ALL
|
||||
${RENDER_${PACKAGE}_${APP}_OUTPUTS}
|
||||
)
|
||||
target_include_directories(
|
||||
render_${PACKAGE}_${APP}
|
||||
PUBLIC
|
||||
${OUTPUT_DIR}
|
||||
)
|
||||
target_link_libraries(
|
||||
render_${PACKAGE}_${APP}
|
||||
PUBLIC
|
||||
logging
|
||||
fixed_hash
|
||||
jitbuf_llvm
|
||||
render_${PACKAGE}_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
render_${PACKAGE}_${APP}_hash
|
||||
STATIC
|
||||
EXCLUDE_FROM_ALL
|
||||
${RENDER_${PACKAGE}_${APP}_HASH}
|
||||
)
|
||||
target_include_directories(
|
||||
render_${PACKAGE}_${APP}_hash
|
||||
PUBLIC
|
||||
${OUTPUT_DIR}
|
||||
)
|
||||
target_link_libraries(
|
||||
render_${PACKAGE}_${APP}_hash
|
||||
PUBLIC
|
||||
render_${PACKAGE}_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
render_${PACKAGE}_${APP}_descriptor
|
||||
STATIC
|
||||
EXCLUDE_FROM_ALL
|
||||
${RENDER_${PACKAGE}_${APP}_DESCRIPTOR}
|
||||
)
|
||||
target_include_directories(
|
||||
render_${PACKAGE}_${APP}_descriptor
|
||||
PUBLIC
|
||||
${OUTPUT_DIR}
|
||||
)
|
||||
target_link_libraries(
|
||||
render_${PACKAGE}_${APP}_descriptor
|
||||
PUBLIC
|
||||
render_${PACKAGE}_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
render_${PACKAGE}_${APP}_writer
|
||||
STATIC
|
||||
EXCLUDE_FROM_ALL
|
||||
${RENDER_${PACKAGE}_${APP}_WRITER}
|
||||
)
|
||||
target_include_directories(
|
||||
render_${PACKAGE}_${APP}_writer
|
||||
PUBLIC
|
||||
${OUTPUT_DIR}
|
||||
)
|
||||
target_link_libraries(
|
||||
render_${PACKAGE}_${APP}_writer
|
||||
PUBLIC
|
||||
render_${PACKAGE}_artifacts
|
||||
)
|
||||
endforeach()
|
||||
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
######
|
||||
# ASAN
|
||||
######
|
||||
|
||||
option(USE_ADDRESS_SANITIZER "Use Address Sanitizer in compilation" OFF)
|
||||
|
||||
add_library(address_sanitizer-static INTERFACE)
|
||||
add_library(address_sanitizer-shared INTERFACE)
|
||||
|
||||
message(STATUS "Address Sanitizer is ${USE_ADDRESS_SANITIZER}")
|
||||
|
||||
if (USE_ADDRESS_SANITIZER)
|
||||
target_compile_options(
|
||||
address_sanitizer-shared
|
||||
INTERFACE
|
||||
-fsanitize=address
|
||||
-U_FORTIFY_SOURCE
|
||||
-fno-stack-protector
|
||||
-fno-omit-frame-pointer
|
||||
)
|
||||
|
||||
target_compile_options(
|
||||
address_sanitizer-static
|
||||
INTERFACE
|
||||
-fsanitize=address
|
||||
-U_FORTIFY_SOURCE
|
||||
-fno-stack-protector
|
||||
-fno-omit-frame-pointer
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
address_sanitizer-shared
|
||||
INTERFACE
|
||||
-static-libasan
|
||||
-static-libstdc++
|
||||
-fsanitize=address
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
address_sanitizer-static
|
||||
INTERFACE
|
||||
-static-libasan
|
||||
-static-libstdc++
|
||||
-fsanitize=address
|
||||
)
|
||||
endif()
|
||||
|
||||
#######
|
||||
# UBSAN
|
||||
#######
|
||||
|
||||
option(USE_UNDEFINED_BEHAVIOR_SANITIZER "Use Undefined Behavior Sanitizer in compilation" OFF)
|
||||
|
||||
add_library(undefined_behavior_sanitizer-static INTERFACE)
|
||||
add_library(undefined_behavior_sanitizer-shared INTERFACE)
|
||||
|
||||
message(STATUS "Undefined Behavior Sanitizer is ${USE_UNDEFINED_BEHAVIOR_SANITIZER}")
|
||||
|
||||
if (USE_UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
target_compile_options(
|
||||
undefined_behavior_sanitizer-shared
|
||||
INTERFACE
|
||||
-fsanitize=undefined
|
||||
-U_FORTIFY_SOURCE
|
||||
-fno-stack-protector
|
||||
-fno-omit-frame-pointer
|
||||
)
|
||||
|
||||
target_compile_options(
|
||||
undefined_behavior_sanitizer-static
|
||||
INTERFACE
|
||||
-fsanitize=undefined
|
||||
-U_FORTIFY_SOURCE
|
||||
-fno-stack-protector
|
||||
-fno-omit-frame-pointer
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
undefined_behavior_sanitizer-shared
|
||||
INTERFACE
|
||||
-static-libubsan
|
||||
-static-libstdc++
|
||||
-fsanitize=undefined
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
undefined_behavior_sanitizer-static
|
||||
INTERFACE
|
||||
-static-libubsan
|
||||
-static-libstdc++
|
||||
-fsanitize=undefined
|
||||
)
|
||||
endif()
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
# Runs linters on the given set of shell scripts.
|
||||
#
|
||||
# The scripts are bundled as a single cmake target.
|
||||
#
|
||||
# Positional Arguments:
|
||||
# 1. TARGET:
|
||||
# The name of the target to create for the bundle.
|
||||
#
|
||||
# Named Arguments:
|
||||
# SOURCES:
|
||||
# The list of scripts to add to the bundle, relative to the current source directory.
|
||||
#
|
||||
# DEPENDS:
|
||||
# The list of targets this bundle explicitly depends on.
|
||||
#
|
||||
# Output:
|
||||
# The list of files linted (`SOURCES`) is exposed through the property `OUTPUT` and can be
|
||||
# accessed with the expression `$<TARGET_PROPERTY:TARGET,OUTPUT>`, where `TARGET` is the
|
||||
# target name given to the bundle. E.g.: `SET(script_list $<TARGET_PROPERTY:my_bundle,OUTPUT>)`.
|
||||
#
|
||||
# Usage:
|
||||
# lint_shell_script_bundle(
|
||||
# my_target
|
||||
# SOURCES
|
||||
# my_script_1.sh
|
||||
# my_script_2.sh
|
||||
# DEPENDS
|
||||
# some_dependency_1
|
||||
# some_dependency_2
|
||||
# )
|
||||
function(lint_shell_script_bundle TARGET)
|
||||
cmake_parse_arguments(ARG "" "" "SOURCES;DEPENDS" ${ARGN})
|
||||
|
||||
add_custom_target(
|
||||
${TARGET}
|
||||
ALL
|
||||
DEPENDS ${ARG_DEPENDS}
|
||||
)
|
||||
|
||||
foreach(SOURCE ${ARG_SOURCES})
|
||||
add_custom_command(
|
||||
TARGET
|
||||
${TARGET}
|
||||
WORKING_DIRECTORY
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
COMMAND
|
||||
shellcheck -x "${SOURCE}"
|
||||
)
|
||||
list(APPEND OUTPUT_LIST "${CMAKE_CURRENT_SOURCE_DIR}/${SOURCE}")
|
||||
endforeach()
|
||||
|
||||
set_property(TARGET ${TARGET} PROPERTY "OUTPUT" ${OUTPUT_LIST})
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
add_library(spdlog INTERFACE)
|
||||
target_include_directories(spdlog INTERFACE "${CMAKE_SOURCE_DIR}/ext/spdlog/include")
|
||||
target_link_libraries(spdlog INTERFACE)
|
||||
|
|
@ -0,0 +1,175 @@
|
|||
include(ExternalProject)
|
||||
|
||||
set(gtest_URL https://github.com/google/googletest.git)
|
||||
set(gtest_TAG "release-1.10.0")
|
||||
|
||||
# Directories
|
||||
set(gtest_LIB_DIR ${CMAKE_BINARY_DIR}/googletest/src/googletest/lib)
|
||||
set(gmock_LIB_DIR ${CMAKE_BINARY_DIR}/googletest/src/googletest/lib)
|
||||
set(gtest_INC_DIR ${CMAKE_BINARY_DIR}/googletest/src/googletest/googletest/include)
|
||||
set(gmock_INC_DIR ${CMAKE_BINARY_DIR}/googletest/src/googletest/googlemock/include)
|
||||
|
||||
# Outputs
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
|
||||
set(gtest_LIB ${gtest_LIB_DIR}/libgtestd.a)
|
||||
set(gtest_MAIN_LIB ${gtest_LIB_DIR}/libgtest_maind.a)
|
||||
set(gmock_LIB ${gmock_LIB_DIR}/libgmockd.a)
|
||||
set(gmock_MAIN_LIB ${gmock_LIB_DIR}/libgmock_maind.a)
|
||||
else()
|
||||
set(gtest_LIB ${gtest_LIB_DIR}/libgtest.a)
|
||||
set(gtest_MAIN_LIB ${gtest_LIB_DIR}/libgtest_main.a)
|
||||
set(gmock_LIB ${gmock_LIB_DIR}/libgmock.a)
|
||||
set(gmock_MAIN_LIB ${gmock_LIB_DIR}/libgmock_main.a)
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(
|
||||
googletest
|
||||
PREFIX "${CMAKE_BINARY_DIR}/googletest"
|
||||
GIT_REPOSITORY ${gtest_URL}
|
||||
GIT_TAG ${gtest_TAG}
|
||||
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${gtest_LIB} ${gtest_MAIN_LIB} ${gmock_LIB} ${gmock_MAIN_LIB}
|
||||
INSTALL_COMMAND ""
|
||||
CMAKE_CACHE_ARGS
|
||||
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
|
||||
-DBUILD_GMOCK:BOOL=ON
|
||||
-DINSTALL_GTEST:BOOL=OFF
|
||||
)
|
||||
|
||||
enable_testing()
|
||||
include_directories(${gtest_INC_DIR})
|
||||
|
||||
# This is for tests that aren't run as part of `make test`. These are useful
|
||||
# for manual or component tests that you don't want to necessarily run as
|
||||
# part of the unit test suite.
|
||||
function(add_standalone_gtest testName)
|
||||
set(options "")
|
||||
set(oneValueArgs "")
|
||||
set(multiValueArgs SRCS DEPS)
|
||||
cmake_parse_arguments(add_standalone_gtest
|
||||
"${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
add_executable(${testName}
|
||||
${add_standalone_gtest_SRCS})
|
||||
target_include_directories(${testName} PRIVATE
|
||||
${gtest_INC_DIR} ${gmock_INC_DIR})
|
||||
target_link_libraries(${testName}
|
||||
${gmock_MAIN_LIB} ${gtest_LIB} ${gmock_LIB} "-pthread"
|
||||
${add_standalone_gtest_DEPS})
|
||||
add_dependencies(${testName} googletest)
|
||||
endfunction (add_standalone_gtest)
|
||||
|
||||
# This function is for your typical gtest-based unit tests. Use this for
|
||||
# tests you want to run under `make test`
|
||||
function(add_gtest testName)
|
||||
set(options "")
|
||||
set(oneValueArgs "")
|
||||
set(multiValueArgs SRCS DEPS)
|
||||
cmake_parse_arguments(add_gtest
|
||||
"${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
add_executable(${testName}
|
||||
${add_gtest_SRCS})
|
||||
target_include_directories(${testName} PRIVATE
|
||||
${gtest_INC_DIR} ${gmock_INC_DIR})
|
||||
target_link_libraries(${testName}
|
||||
${gmock_MAIN_LIB} ${gtest_LIB} ${gmock_LIB} "-pthread"
|
||||
${add_gtest_DEPS}
|
||||
shared-executable
|
||||
)
|
||||
add_dependencies(${testName} googletest)
|
||||
add_test(${testName} ${testName})
|
||||
endfunction (add_gtest)
|
||||
|
||||
# This function is use for gtest/gmock-dependent libraries for use in other
|
||||
# unit tests (i.e. does not link a `main` symbol).
|
||||
function(add_gtest_lib testName)
|
||||
set(options "")
|
||||
set(oneValueArgs "")
|
||||
set(multiValueArgs SRCS DEPS)
|
||||
cmake_parse_arguments(add_gtest_lib
|
||||
"${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
add_library(${testName}
|
||||
${add_gtest_lib_SRCS})
|
||||
target_include_directories(${testName} PRIVATE
|
||||
${gtest_INC_DIR} ${gmock_INC_DIR})
|
||||
target_link_libraries(${testName}
|
||||
${gtest_LIB} ${gmock_LIB} "-pthread" ${add_gtest_lib_DEPS})
|
||||
add_dependencies(${testName} googletest)
|
||||
endfunction (add_gtest_lib)
|
||||
|
||||
# Adds a unit test named `${NAME}_test`.
|
||||
#
|
||||
# The file `${NAME}_test.cc` is implicitly added as a source.
|
||||
# Additional source files can be declared with the `SRCS` parameter.
|
||||
#
|
||||
# Test executable is implicitly linked with `gtest` and `gmock`.
|
||||
# Additional libraries can be linked with the `LIBS` parameter.
|
||||
#
|
||||
# Additional dependencies can be declares with the `DEPS` parameter.
|
||||
function(add_cpp_test NAME)
|
||||
set(TEST_NAME "${NAME}_test")
|
||||
cmake_parse_arguments(ARG "" "" "SRCS;LIBS;DEPS" ${ARGN})
|
||||
|
||||
add_executable(
|
||||
${TEST_NAME}
|
||||
"${TEST_NAME}.cc"
|
||||
${ARG_SRCS}
|
||||
)
|
||||
add_test(${TEST_NAME} ${TEST_NAME})
|
||||
|
||||
target_include_directories(
|
||||
${TEST_NAME}
|
||||
PRIVATE
|
||||
${gtest_INC_DIR}
|
||||
${gmock_INC_DIR}
|
||||
)
|
||||
target_link_libraries(
|
||||
${TEST_NAME}
|
||||
${ARG_LIBS}
|
||||
${gmock_MAIN_LIB}
|
||||
${gtest_LIB}
|
||||
${gmock_LIB}
|
||||
shared-executable
|
||||
)
|
||||
|
||||
add_dependencies(
|
||||
${TEST_NAME}
|
||||
${ARG_DEPS}
|
||||
googletest
|
||||
)
|
||||
endfunction(add_cpp_test)
|
||||
|
||||
# Adds a unit test named `${NAME}_test`, which is part of the `unit_tests` target.
|
||||
#
|
||||
# The file `${NAME}_test.cc` is implicitly added as a source.
|
||||
# Additional source files can be declared with the `SRCS` parameter.
|
||||
#
|
||||
# Test executable is implicitly linked with `gtest` and `gmock`.
|
||||
# Additional libraries can be linked with the `LIBS` parameter.
|
||||
#
|
||||
# Additional dependencies can be declares with the `DEPS` parameter.
|
||||
add_custom_target(unit_tests)
|
||||
function(add_unit_test NAME)
|
||||
cmake_parse_arguments(ARG "" "" "SRCS;LIBS;DEPS" ${ARGN})
|
||||
add_cpp_test(${NAME} SRCS ${ARG_SRCS} LIBS ${ARG_LIBS} DEPS ${ARG_DEPS})
|
||||
add_dependencies(unit_tests "${NAME}_test")
|
||||
endfunction(add_unit_test)
|
||||
|
||||
# Adds a component test named `${NAME}_test`, which is part of the `component_tests` target.
|
||||
#
|
||||
# The file `${NAME}_test.cc` is implicitly added as a source.
|
||||
# Additional source files can be declared with the `SRCS` parameter.
|
||||
#
|
||||
# Test executable is implicitly linked with `gtest` and `gmock`.
|
||||
# Additional libraries can be linked with the `LIBS` parameter.
|
||||
#
|
||||
# Additional dependencies can be declares with the `DEPS` parameter.
|
||||
add_custom_target(component_tests)
|
||||
function(add_component_test NAME)
|
||||
cmake_parse_arguments(ARG "" "" "SRCS;LIBS;DEPS" ${ARGN})
|
||||
add_cpp_test(${NAME} SRCS ${ARG_SRCS} LIBS ${ARG_LIBS} DEPS ${ARG_DEPS})
|
||||
add_dependencies(component_tests "${NAME}_test")
|
||||
endfunction(add_component_test)
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
find_path(LIBUV_INCLUDE_DIR uv.h)
|
||||
find_library(LIBUV_LIBS NAMES uv libuv)
|
||||
find_library(LIBUV_STATIC_LIBRARY NAMES libuv.a)
|
||||
find_package_handle_standard_args(LIBUV DEFAULT_MSG
|
||||
LIBUV_LIBS
|
||||
LIBUV_INCLUDE_DIR
|
||||
LIBUV_STATIC_LIBRARY)
|
||||
if((NOT LIBUV_INCLUDE_DIR) OR (NOT LIBUV_LIBS) OR (NOT LIBUV_STATIC_LIBRARY))
|
||||
message(FATAL_ERROR "Could not find libuv. Build container should already have that set up")
|
||||
endif()
|
||||
|
||||
message(STATUS "libuv INCLUDE_DIR: ${LIBUV_INCLUDE_DIR}")
|
||||
message(STATUS "libuv LIBS: ${LIBUV_LIBS}")
|
||||
message(STATUS "libuv STATIC_LIBRARY: ${LIBUV_STATIC_LIBRARY}")
|
||||
|
||||
add_library(libuv-interface INTERFACE)
|
||||
target_include_directories(
|
||||
libuv-interface
|
||||
INTERFACE
|
||||
"${LIBUV_INCLUDE_DIR}"
|
||||
)
|
||||
|
||||
add_library(libuv-shared INTERFACE)
|
||||
target_link_libraries(
|
||||
libuv-shared
|
||||
INTERFACE
|
||||
${LIBUV_LIBS}
|
||||
libuv-interface
|
||||
)
|
||||
|
||||
add_library(libuv-static INTERFACE)
|
||||
target_link_libraries(
|
||||
libuv-static
|
||||
INTERFACE
|
||||
${LIBUV_STATIC_LIBRARY}
|
||||
)
|
||||
target_include_directories(
|
||||
libuv-static
|
||||
INTERFACE
|
||||
"${LIBUV_INCLUDE_DIR}"
|
||||
libuv-interface
|
||||
)
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
function (add_xxd FIL GENERATED_NAME)
|
||||
cmake_parse_arguments(MOD "" "OUTPUT" "DEPENDS" ${ARGN})
|
||||
|
||||
get_filename_component(ABS_FIL ${FIL} ABSOLUTE BASENAME ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
get_filename_component(FIL_N ${FIL} NAME)
|
||||
if (MOD_OUTPUT)
|
||||
set(DST_FILENAME "${CMAKE_BINARY_DIR}/generated/${MOD_OUTPUT}")
|
||||
else ()
|
||||
set(DST_FILENAME "${CMAKE_BINARY_DIR}/generated/${FIL_N}.xxd")
|
||||
endif()
|
||||
get_filename_component(FIL_D ${ABS_FIL} DIRECTORY)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${DST_FILENAME}"
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/generated"
|
||||
COMMAND xxd
|
||||
ARGS -i ${FIL_N} > "${DST_FILENAME}.generated"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${DST_FILENAME}.generated" "${DST_FILENAME}"
|
||||
DEPENDS ${FIL} "${MOD_DEPENDS}"
|
||||
WORKING_DIRECTORY ${FIL_D}
|
||||
COMMENT "Generating XXD from ${FIL} -> ${DST_FILENAME}"
|
||||
VERBATIM
|
||||
)
|
||||
set_source_files_properties("${DST_FILENAME}" PROPERTIES GENERATED TRUE)
|
||||
set(${GENERATED_NAME} "${DST_FILENAME}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
find_path(YAMLCPP_INCLUDE_DIR yaml-cpp/yaml.h)
|
||||
find_library(YAMLCPP_LIBRARY NAMES "libyaml-cpp.a")
|
||||
find_package_handle_standard_args(YAMLCPP DEFAULT_MSG YAMLCPP_LIBRARY YAMLCPP_INCLUDE_DIR)
|
||||
if(NOT YAMLCPP_FOUND)
|
||||
message(FATAL_ERROR "Could not find yaml-cpp. Build container should already have that set up")
|
||||
endif()
|
||||
message(STATUS "yaml-cpp INCLUDE_DIR: ${YAMLCPP_INCLUDE_DIR}")
|
||||
message(STATUS "yamp-cpp LIBRARY: ${YAMLCPP_LIBRARY}")
|
||||
add_library(yamlcpp INTERFACE)
|
||||
target_include_directories(yamlcpp INTERFACE "${YAMLCPP_INCLUDE_DIR}")
|
||||
target_link_libraries(yamlcpp INTERFACE "${YAMLCPP_LIBRARY}")
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
add_custom_target(collectors)
|
||||
add_custom_target(collectors-docker)
|
||||
add_custom_target(collectors-docker-registry)
|
||||
|
||||
add_subdirectory(aws)
|
||||
add_subdirectory(k8s)
|
||||
add_subdirectory(kernel)
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/enum.h>
|
||||
|
||||
#define ENUM_NAME AgentLogKind
|
||||
#define ENUM_TYPE uint32_t
|
||||
#define ENUM_ELEMENTS(X) \
|
||||
X(BPF,0) \
|
||||
X(UDP,1) \
|
||||
X(DNS,2) \
|
||||
X(TCP,3) \
|
||||
X(HTTP,4) \
|
||||
X(NAT,6) \
|
||||
X(DOCKER,7) \
|
||||
X(FLOW,8) \
|
||||
X(CGROUPS,9) \
|
||||
X(PERF,10) \
|
||||
X(PID,11) \
|
||||
X(PROTOCOL,12) \
|
||||
X(CPU_MEM_IO,13) \
|
||||
X(NOMAD,14) \
|
||||
X(SOCKET, 15)
|
||||
#define ENUM_DEFAULT BPF
|
||||
#include <util/enum_operators.inl>
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
add_executable(
|
||||
aws-collector
|
||||
main.cc
|
||||
collector.cc
|
||||
enumerator.cc
|
||||
ingest_connection.cc
|
||||
)
|
||||
harden_executable(aws-collector)
|
||||
|
||||
add_dependencies(collectors aws-collector)
|
||||
|
||||
target_compile_options(
|
||||
aws-collector
|
||||
PRIVATE
|
||||
${CXX_ERROR_LIMIT_FLAG}=1
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
aws-collector
|
||||
PUBLIC
|
||||
render_flowmill_aws_collector
|
||||
render_flowmill_ingest_writer
|
||||
signal_handler
|
||||
aws-sdk-cpp
|
||||
reconnecting_channel
|
||||
connection_caretaker
|
||||
resource_usage_reporter
|
||||
config_file
|
||||
ip_address
|
||||
scheduling
|
||||
libuv-static
|
||||
args_parser
|
||||
system_ops
|
||||
aws_instance_metadata
|
||||
spdlog
|
||||
static-executable
|
||||
)
|
||||
|
||||
set_target_properties(
|
||||
aws-collector
|
||||
PROPERTIES LINK_FLAGS "-pthread"
|
||||
)
|
||||
|
||||
strip_binary(aws-collector)
|
||||
|
||||
lint_shell_script_bundle(
|
||||
aws-collector-scripts
|
||||
SOURCES
|
||||
entrypoint.sh
|
||||
)
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
|
||||
build_custom_docker_image(
|
||||
aws-collector
|
||||
OUT_DIR srv
|
||||
ARTIFACTS_OF
|
||||
aws-collector
|
||||
OUTPUT_OF
|
||||
aws-collector-scripts
|
||||
aws-collector-stripped
|
||||
BINARIES
|
||||
debug-info.conf
|
||||
FILES
|
||||
../../NOTICE.txt
|
||||
../../LICENSE.txt
|
||||
DEPENDENCY_OF
|
||||
collectors
|
||||
)
|
||||
else()
|
||||
build_custom_docker_image(
|
||||
aws-collector
|
||||
OUT_DIR srv
|
||||
OUTPUT_OF
|
||||
aws-collector-scripts
|
||||
aws-collector-stripped
|
||||
BINARIES
|
||||
debug-info.conf
|
||||
FILES
|
||||
../../NOTICE.txt
|
||||
../../LICENSE.txt
|
||||
DEPENDENCY_OF
|
||||
collectors
|
||||
)
|
||||
endif()
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
FROM segment/chamber:2 AS chamber
|
||||
|
||||
FROM bitnami/minideb:buster
|
||||
|
||||
LABEL org.label-schema.name="flowmill/aws-collector" \
|
||||
org.label-schema.description="Flowmill AWS Metadata Collector" \
|
||||
org.label-schema.schema-version="1.0"
|
||||
|
||||
# ca-certificates are required by libcurl
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENTRYPOINT [ "/srv/entrypoint.sh" ]
|
||||
|
||||
COPY --from=chamber /chamber /bin/chamber
|
||||
COPY srv /srv
|
||||
WORKDIR /srv
|
||||
RUN if [ ! -e /srv/aws-collector ]; then \
|
||||
ln /srv/aws-collector-stripped /srv/aws-collector; \
|
||||
fi
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
# AWS Collector
|
||||
|
||||
The `aws-collector` collects AWS information that cannot be otherwise collected by our flowmill
|
||||
agent, and feeds that information to our pipeline server.
|
||||
|
||||
The collector is comprised of two components:
|
||||
- the agent that periodically queries AWS and pushes the information to the pipeline server;
|
||||
- a span within the pipeline server that consumes messages sent by `aws-collector` and enriches
|
||||
the flow.
|
||||
|
||||
# Running:
|
||||
The `aws-collector` requires two pieces of settings to run: auth config and AWS credentials.
|
||||
|
||||
The TL;DR to build and run in a test environment within `benv` is this:
|
||||
```bash
|
||||
cd ~/out/
|
||||
make -j aws-collector
|
||||
export AWS_ACCESS_KEY_ID="your_access_key"
|
||||
export AWS_SECRET_ACCESS_KEY="your_secret_access_key"
|
||||
# make sure the server is running, as well as stunnel
|
||||
src/collector/aws/aws-collector --auth-config=$HOME/src/misc/localhost_auth_config.yaml
|
||||
```
|
||||
|
||||
## Auth Config
|
||||
NOTE: this is being deprecated in favor of API keys.
|
||||
|
||||
Those are the same auth config required by the flowmill agent.
|
||||
|
||||
For a dev environment there is a [YAML file](https://github.com/Flowmill/flowmill/blob/master/misc/localhost_auth_config.yaml)
|
||||
that can be used for testing purposes.
|
||||
|
||||
## AWS Credentials
|
||||
Check the AWS SDK Developer Guide for [recommended ways to provide credentials](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
|
||||
|
||||
For testing, you can supply two environment variables:
|
||||
```bash
|
||||
export AWS_ACCESS_KEY_ID=your_access_key_id
|
||||
export AWS_SECRET_ACCESS_KEY=your_secret_access_key
|
||||
```
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <collector/aws/collector.h>
|
||||
|
||||
#include <scheduling/interval_scheduler.h>
|
||||
|
||||
#include <util/jitter.h>
|
||||
#include <util/log.h>
|
||||
#include <util/log_formatters.h>
|
||||
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
|
||||
namespace collector::aws {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr auto RECONNECT_DELAY = 5s;
|
||||
// explicitly using milliseconds for a finer grained jitter
|
||||
constexpr std::chrono::milliseconds RECONNECT_JITTER = 1s;
|
||||
|
||||
} // namespace
|
||||
|
||||
AwsCollector::AwsCollector(
|
||||
::uv_loop_t &loop,
|
||||
std::string_view hostname,
|
||||
AuthzFetcher &authz_fetcher,
|
||||
std::chrono::milliseconds aws_metadata_timeout,
|
||||
std::chrono::milliseconds heartbeat_interval,
|
||||
std::size_t buffer_size,
|
||||
config::IntakeConfig intake_config,
|
||||
std::chrono::milliseconds poll_interval
|
||||
):
|
||||
loop_(loop),
|
||||
connection_(
|
||||
hostname,
|
||||
loop_,
|
||||
aws_metadata_timeout,
|
||||
heartbeat_interval,
|
||||
std::move(intake_config),
|
||||
authz_fetcher,
|
||||
buffer_size,
|
||||
*this,
|
||||
std::bind(&AwsCollector::on_authenticated, this)
|
||||
),
|
||||
log_(connection_.writer()),
|
||||
enumerator_(log_, connection_.index(), connection_.writer()),
|
||||
scheduler_(loop_, std::bind(&AwsCollector::callback, this)),
|
||||
poll_interval_(poll_interval)
|
||||
{}
|
||||
|
||||
AwsCollector::~AwsCollector() {
|
||||
::uv_loop_close(&loop_);
|
||||
}
|
||||
|
||||
void AwsCollector::run_loop() {
|
||||
connection_.connect();
|
||||
|
||||
while (::uv_run(&loop_, UV_RUN_DEFAULT));
|
||||
|
||||
scheduler_.stop();
|
||||
}
|
||||
|
||||
scheduling::JobFollowUp AwsCollector::callback() {
|
||||
auto result = enumerator_.enumerate();
|
||||
connection_.flush();
|
||||
return result;
|
||||
}
|
||||
|
||||
void AwsCollector::on_error(int err) {
|
||||
scheduler_.stop();
|
||||
|
||||
enumerator_.free_handles();
|
||||
|
||||
std::this_thread::sleep_for(add_jitter(RECONNECT_DELAY, -RECONNECT_JITTER, RECONNECT_JITTER));
|
||||
}
|
||||
|
||||
void AwsCollector::on_authenticated() {
|
||||
scheduler_.start(poll_interval_, poll_interval_);
|
||||
}
|
||||
|
||||
} // namespace collector::aws {
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <collector/aws/enumerator.h>
|
||||
#include <collector/aws/ingest_connection.h>
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <scheduling/interval_scheduler.h>
|
||||
#include <scheduling/job.h>
|
||||
#include <util/curl_engine.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
|
||||
namespace collector::aws {
|
||||
|
||||
struct AwsCollector: channel::Callbacks {
|
||||
AwsCollector(
|
||||
::uv_loop_t &loop,
|
||||
std::string_view hostname,
|
||||
AuthzFetcher &authz_fetcher,
|
||||
std::chrono::milliseconds aws_metadata_timeout,
|
||||
std::chrono::milliseconds heartbeat_interval,
|
||||
std::size_t buffer_size,
|
||||
config::IntakeConfig intake_config,
|
||||
std::chrono::milliseconds poll_interval
|
||||
);
|
||||
|
||||
~AwsCollector();
|
||||
|
||||
void run_loop();
|
||||
|
||||
::uv_loop_t &get_loop() { return loop_; }
|
||||
|
||||
private:
|
||||
scheduling::JobFollowUp callback();
|
||||
|
||||
void on_error(int err);
|
||||
void on_authenticated();
|
||||
|
||||
::uv_loop_t &loop_;
|
||||
IngestConnection connection_;
|
||||
logging::Logger log_;
|
||||
NetworkInterfacesEnumerator enumerator_;
|
||||
scheduling::IntervalScheduler scheduler_;
|
||||
std::chrono::milliseconds const poll_interval_;
|
||||
};
|
||||
|
||||
} // namespace collector::aws {
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
[[ ! -e ./debug-info.conf ]] || source ./debug-info.conf
|
||||
|
||||
# For customers using Segment.IO's Chamber, you should store agent keys in
|
||||
# a secret in chamber. We will look up that key under $CHAMBER_SERVICE_NAME
|
||||
if [ -n "${CHAMBER_SERVICE_NAME}" ]; then
|
||||
eval "$(/bin/chamber export --format dotenv "${CHAMBER_SERVICE_NAME}")"
|
||||
fi
|
||||
|
||||
if [[ -n "${FLOWMILL_PROXY_HOST}" ]]; then
|
||||
export http_proxy="http://${FLOWMILL_PROXY_HOST}:${FLOWMILL_PROXY_PORT:-1080}"
|
||||
export HTTP_PROXY="${http_proxy}"
|
||||
export https_proxy="${http_proxy}"
|
||||
export HTTPS_PROXY="${http_proxy}"
|
||||
fi
|
||||
|
||||
# to run the collector under gdb, set `FLOWMILL_RUN_UNDER_GDB` to the flavor of gdb
|
||||
# you want (e.g.: `cgdb` or `gdb`) - this is intended for development purposes
|
||||
if [[ -n "${FLOWMILL_RUN_UNDER_GDB}" ]]; then
|
||||
apt-get update -y
|
||||
apt-get install -y --no-install-recommends "${FLOWMILL_RUN_UNDER_GDB}"
|
||||
|
||||
if [[ "${#FLOWMILL_GDB_COMMANDS[@]}" -lt 1 ]]; then
|
||||
# default behavior is to run the agent, print a stack trace after it exits
|
||||
# and exit gdb without confirmation
|
||||
FLOWMILL_GDB_COMMANDS=( \
|
||||
'set pagination off'
|
||||
'handle SIGPIPE nostop pass'
|
||||
'handle SIGUSR1 nostop pass'
|
||||
'handle SIGUSR2 nostop pass'
|
||||
run
|
||||
bt
|
||||
'server q'
|
||||
)
|
||||
fi
|
||||
|
||||
GDB_ARGS=()
|
||||
for gdb_cmd in "${FLOWMILL_GDB_COMMANDS[@]}"; do
|
||||
GDB_ARGS+=(-ex "${gdb_cmd}")
|
||||
done
|
||||
|
||||
(set -x; exec "${FLOWMILL_RUN_UNDER_GDB}" -q "${GDB_ARGS[@]}" \
|
||||
--args /srv/aws-collector "$@" \
|
||||
)
|
||||
else
|
||||
(set -x; exec /srv/aws-collector "$@")
|
||||
fi
|
||||
|
|
@ -0,0 +1,230 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <collector/aws/enumerator.h>
|
||||
|
||||
#include <aws/ec2/model/DescribeNetworkInterfacesRequest.h>
|
||||
#include <aws/ec2/model/DescribeNetworkInterfacesResponse.h>
|
||||
#include <aws/ec2/model/DescribeRegionsRequest.h>
|
||||
#include <aws/ec2/model/DescribeRegionsResponse.h>
|
||||
|
||||
#include <generated/flowmill/aws_collector/index.h>
|
||||
#include <generated/flowmill/ingest.wire_message.h>
|
||||
|
||||
#include <util/ip_address.h>
|
||||
#include <util/log.h>
|
||||
#include <util/log_formatters.h>
|
||||
#include <util/resource_usage_reporter.h>
|
||||
#include <util/stop_watch.h>
|
||||
|
||||
#include <array>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace collector::aws {
|
||||
|
||||
NetworkInterfacesEnumerator::NetworkInterfacesEnumerator(
|
||||
logging::Logger &log,
|
||||
flowmill::aws_collector::Index &index,
|
||||
flowmill::ingest::Writer &writer
|
||||
):
|
||||
index_(index),
|
||||
writer_(writer),
|
||||
log_(log)
|
||||
{}
|
||||
|
||||
NetworkInterfacesEnumerator::~NetworkInterfacesEnumerator() {
|
||||
free_handles();
|
||||
}
|
||||
|
||||
void NetworkInterfacesEnumerator::set_handles(
|
||||
std::vector<flowmill::aws_collector::handles::aws_network_interface> handles
|
||||
) {
|
||||
free_handles();
|
||||
handles_ = std::move(handles);
|
||||
}
|
||||
|
||||
void NetworkInterfacesEnumerator::free_handles() {
|
||||
for (auto &handle: handles_) {
|
||||
handle.put(index_);
|
||||
}
|
||||
|
||||
handles_.clear();
|
||||
}
|
||||
|
||||
void translate_interfaces_to_spans(
|
||||
flowmill::aws_collector::Index &index,
|
||||
Aws::Vector<Aws::EC2::Model::NetworkInterface> const &interfaces,
|
||||
std::vector<flowmill::aws_collector::handles::aws_network_interface> &handles
|
||||
) {
|
||||
for (auto const &interface: interfaces) {
|
||||
auto const &attachment = interface.GetAttachment();
|
||||
auto const &association = interface.GetAssociation();
|
||||
auto const &ip_owner_id = association.GetIpOwnerId();
|
||||
auto const &vpc_id = interface.GetVpcId();
|
||||
auto const &az = interface.GetAvailabilityZone();
|
||||
|
||||
auto const &interface_id = interface.GetNetworkInterfaceId();
|
||||
auto const raw_interface_type = static_cast<std::uint16_t>(interface.GetInterfaceType());
|
||||
auto const &instance_id = attachment.GetInstanceId();
|
||||
auto const &instance_owner_id = attachment.GetInstanceOwnerId();
|
||||
auto const &public_dns_name = association.GetPublicDnsName();
|
||||
auto const &private_dns_name = interface.GetPrivateDnsName();
|
||||
auto const &description = interface.GetDescription();
|
||||
|
||||
auto const add_entry = [&](IPv6Address const &ipv6) {
|
||||
auto handle = index.aws_network_interface.by_key({.ip = ipv6.as_int()});
|
||||
|
||||
LOG::trace(
|
||||
"network_interface_info:"
|
||||
" ip={}" " ip_owner_id={}" " vpc_id={}" " az={}"
|
||||
" interface_id={} interface_type={} instance_id={} instance_owner_id={}"
|
||||
" public_dns_name={} private_dns_name={} description={}",
|
||||
ipv6, ip_owner_id, vpc_id, az,
|
||||
interface_id, raw_interface_type, instance_id, instance_owner_id,
|
||||
public_dns_name, private_dns_name, description
|
||||
);
|
||||
|
||||
handle.network_interface_info(
|
||||
jb_blob{ip_owner_id},
|
||||
jb_blob{vpc_id},
|
||||
jb_blob{az},
|
||||
jb_blob{interface_id},
|
||||
raw_interface_type,
|
||||
jb_blob{instance_id},
|
||||
jb_blob{instance_owner_id},
|
||||
jb_blob{public_dns_name},
|
||||
jb_blob{private_dns_name},
|
||||
jb_blob{description}
|
||||
);
|
||||
|
||||
handles.emplace_back(handle.to_handle());
|
||||
};
|
||||
|
||||
if (auto const public_ip = IPv4Address::parse(association.GetPublicIp().c_str())) {
|
||||
add_entry(public_ip->to_ipv6());
|
||||
}
|
||||
|
||||
for (auto const &ipv4: interface.GetPrivateIpAddresses()) {
|
||||
auto const private_ip = IPv4Address::parse(ipv4.GetPrivateIpAddress().c_str());
|
||||
|
||||
if (private_ip) {
|
||||
add_entry(private_ip->to_ipv6());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto const &address: interface.GetIpv6Addresses()) {
|
||||
if (auto const ipv6 = IPv6Address::parse(address.GetIpv6Address().c_str())) {
|
||||
add_entry(*ipv6);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkInterfacesEnumerator::handle_ec2_error(
|
||||
CollectorStatus status,
|
||||
Aws::Client::AWSError<Aws::EC2::EC2Errors> const &error
|
||||
) {
|
||||
if (error.GetErrorType() == Aws::EC2::EC2Errors::THROTTLING) {
|
||||
log_.error("{} - AWS API call throttled: {}", status, error.GetMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
auto const http_status = static_cast<std::underlying_type_t<Aws::Http::HttpResponseCode>>(
|
||||
error.GetResponseCode()
|
||||
);
|
||||
|
||||
LOG::trace("reporting aws collector as unhealthy (status={} detail={})", status, http_status);
|
||||
writer_.collector_health(integer_value(status), http_status);
|
||||
|
||||
if (http_status >= 400 && http_status < 500) {
|
||||
log_.error(
|
||||
"{} - API call failed with http status {}. Double check that AWS credentials are"
|
||||
" properly set up for this pod. Check Flowmill setup instructions for more"
|
||||
" information. Error message from AWS: {}",
|
||||
status, http_status, error.GetMessage()
|
||||
);
|
||||
} else {
|
||||
log_.error(
|
||||
"{} - API call failed with http status {}. Error message from AWS: {}",
|
||||
status, http_status, error.GetMessage()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
scheduling::JobFollowUp NetworkInterfacesEnumerator::enumerate() {
|
||||
ResourceUsageReporter::report(writer_);
|
||||
|
||||
auto const regions_response = ec2_.DescribeRegions({});
|
||||
if (!regions_response.IsSuccess()) {
|
||||
handle_ec2_error(CollectorStatus::aws_describe_regions_error, regions_response.GetError());
|
||||
return scheduling::JobFollowUp::backoff;
|
||||
}
|
||||
|
||||
std::vector<flowmill::aws_collector::handles::aws_network_interface> handles;
|
||||
auto result = scheduling::JobFollowUp::ok;
|
||||
|
||||
LOG::trace("starting AWS network interfaces enumeration");
|
||||
StopWatch<> watch;
|
||||
for (auto const ®ion: regions_response.GetResult().GetRegions()) {
|
||||
LOG::trace("enumerating network interfaces in region '{}'", region.GetRegionName());
|
||||
|
||||
Aws::Client::ClientConfiguration client_config;
|
||||
client_config.region = region.GetRegionName();
|
||||
|
||||
Aws::EC2::EC2Client client(client_config);
|
||||
|
||||
auto const interfaces_response = client.DescribeNetworkInterfaces({});
|
||||
|
||||
if (!interfaces_response.IsSuccess()) {
|
||||
handle_ec2_error(
|
||||
CollectorStatus::aws_describe_network_interfaces_error,
|
||||
interfaces_response.GetError()
|
||||
);
|
||||
result = scheduling::JobFollowUp::backoff;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto const &interfaces = interfaces_response.GetResult().GetNetworkInterfaces();
|
||||
|
||||
LOG::trace(
|
||||
"found {} network interfaces in region '{}'",
|
||||
interfaces.size(),
|
||||
region.GetRegionName()
|
||||
);
|
||||
|
||||
translate_interfaces_to_spans(index_, interfaces, handles);
|
||||
}
|
||||
LOG::trace(
|
||||
"finished AWS network interfaces enumeration after {}",
|
||||
watch.elapsed<std::chrono::milliseconds>()
|
||||
);
|
||||
|
||||
set_handles(std::move(handles));
|
||||
|
||||
LOG::trace("network interface live span count: {}", handles_.size());
|
||||
|
||||
if (result == scheduling::JobFollowUp::ok) {
|
||||
LOG::trace("reporting aws collector as healthy");
|
||||
writer_.collector_health(integer_value(CollectorStatus::healthy), 0);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace collector::aws {
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <common/collector_status.h>
|
||||
#include <generated/flowmill/aws_collector/handles.h>
|
||||
#include <scheduling/job.h>
|
||||
#include <util/logger.h>
|
||||
|
||||
#include <aws/ec2/EC2Client.h>
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
namespace collector::aws {
|
||||
|
||||
struct NetworkInterfacesEnumerator {
|
||||
|
||||
NetworkInterfacesEnumerator(
|
||||
logging::Logger &log,
|
||||
flowmill::aws_collector::Index &index,
|
||||
flowmill::ingest::Writer &writer
|
||||
);
|
||||
~NetworkInterfacesEnumerator();
|
||||
|
||||
scheduling::JobFollowUp enumerate();
|
||||
|
||||
void free_handles();
|
||||
|
||||
private:
|
||||
void set_handles(std::vector<flowmill::aws_collector::handles::aws_network_interface> handles);
|
||||
|
||||
void handle_ec2_error(
|
||||
CollectorStatus status,
|
||||
Aws::Client::AWSError<Aws::EC2::EC2Errors> const &error
|
||||
);
|
||||
|
||||
Aws::EC2::EC2Client ec2_;
|
||||
flowmill::aws_collector::Index &index_;
|
||||
flowmill::ingest::Writer &writer_;
|
||||
logging::Logger &log_;
|
||||
std::vector<flowmill::aws_collector::handles::aws_network_interface> handles_;
|
||||
};
|
||||
|
||||
} // namespace collector::aws {
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <collector/aws/ingest_connection.h>
|
||||
|
||||
#include <util/boot_time.h>
|
||||
|
||||
namespace collector::aws {
|
||||
|
||||
IngestConnection::IngestConnection(
|
||||
std::string_view hostname, ::uv_loop_t &loop,
|
||||
std::chrono::milliseconds aws_metadata_timeout,
|
||||
std::chrono::milliseconds heartbeat_interval,
|
||||
config::IntakeConfig intake_config,
|
||||
AuthzFetcher &authz_fetcher,
|
||||
std::size_t buffer_size,
|
||||
channel::Callbacks &connection_callback,
|
||||
std::function<void()> on_authenticated_cb)
|
||||
: curl_(CurlEngine::create(&loop)),
|
||||
channel_(std::move(intake_config), loop, buffer_size),
|
||||
connection_callback_(connection_callback),
|
||||
encoder_(channel_.intake_config().make_encoder()),
|
||||
writer_(channel_.buffered_writer(), monotonic, get_boot_time(), encoder_.get()),
|
||||
caretaker_(hostname, ClientType::aws, authz_fetcher,
|
||||
{},
|
||||
&loop, writer_,
|
||||
aws_metadata_timeout, heartbeat_interval,
|
||||
std::bind(&channel::ReconnectingChannel::flush, &channel_),
|
||||
std::bind(&channel::ReconnectingChannel::set_compression,
|
||||
&channel_, std::placeholders::_1),
|
||||
std::move(on_authenticated_cb)),
|
||||
index_({writer_})
|
||||
{
|
||||
channel_.register_pipeline_observer(this);
|
||||
}
|
||||
|
||||
void IngestConnection::connect()
|
||||
{
|
||||
caretaker_.refresh_authz_token();
|
||||
channel_.start_connect();
|
||||
}
|
||||
|
||||
void IngestConnection::flush()
|
||||
{
|
||||
channel_.flush();
|
||||
}
|
||||
|
||||
u32 IngestConnection::received_data(const u8 *data, int data_len)
|
||||
{
|
||||
return connection_callback_.received_data(data, data_len);
|
||||
}
|
||||
|
||||
void IngestConnection::on_error(int err)
|
||||
{
|
||||
caretaker_.set_disconnected();
|
||||
|
||||
connection_callback_.on_error(err);
|
||||
}
|
||||
|
||||
void IngestConnection::on_closed()
|
||||
{
|
||||
connection_callback_.on_closed();
|
||||
}
|
||||
|
||||
void IngestConnection::on_connect()
|
||||
{
|
||||
caretaker_.set_connected();
|
||||
|
||||
connection_callback_.on_connect();
|
||||
}
|
||||
|
||||
} // namespace collector::aws
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <channel/callbacks.h>
|
||||
#include <channel/connection_caretaker.h>
|
||||
#include <channel/reconnecting_channel.h>
|
||||
#include <channel/tls_channel.h>
|
||||
#include <generated/flowmill/aws_collector/index.h>
|
||||
#include <generated/flowmill/ingest/writer.h>
|
||||
|
||||
#include <uv.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
|
||||
namespace collector::aws {
|
||||
|
||||
class IngestConnection: channel::Callbacks {
|
||||
public:
|
||||
IngestConnection(
|
||||
std::string_view hostname,
|
||||
::uv_loop_t &loop,
|
||||
std::chrono::milliseconds aws_metadata_timeout,
|
||||
std::chrono::milliseconds heartbeat_interval,
|
||||
config::IntakeConfig intake_config,
|
||||
AuthzFetcher &authz_fetcher,
|
||||
std::size_t buffer_size,
|
||||
channel::Callbacks &connection_callback,
|
||||
std::function<void()> on_authenticated_cb
|
||||
);
|
||||
|
||||
void connect();
|
||||
void flush();
|
||||
|
||||
flowmill::ingest::Writer &writer() { return writer_; }
|
||||
|
||||
flowmill::aws_collector::Index &index() { return index_; }
|
||||
|
||||
private:
|
||||
u32 received_data(const u8 *data, int data_len);
|
||||
void on_error(int err);
|
||||
void on_closed();
|
||||
void on_connect();
|
||||
|
||||
channel::TLSChannel::Initializer tls_guard_;
|
||||
std::unique_ptr<CurlEngine> curl_;
|
||||
channel::ReconnectingChannel channel_;
|
||||
channel::Callbacks &connection_callback_;
|
||||
std::unique_ptr<::flowmill::ingest::Encoder> encoder_;
|
||||
flowmill::ingest::Writer writer_;
|
||||
channel::ConnectionCaretaker caretaker_;
|
||||
flowmill::aws_collector::Index index_;
|
||||
};
|
||||
|
||||
} // namespace collector::aws {
|
||||
|
|
@ -0,0 +1,150 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include <collector/aws/collector.h>
|
||||
|
||||
#include <channel/component.h>
|
||||
#include <collector/component.h>
|
||||
#include <collector/constants.h>
|
||||
#include <common/cloud_platform.h>
|
||||
#include <util/agent_id.h>
|
||||
#include <util/args_parser.h>
|
||||
#include <util/log.h>
|
||||
#include <util/log_whitelist.h>
|
||||
#include <util/signal_handler.h>
|
||||
#include <util/system_ops.h>
|
||||
#include <util/utility.h>
|
||||
|
||||
#include <aws/core/Aws.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
|
||||
#include <csignal>
|
||||
|
||||
/**
|
||||
* AWS Collector Agent
|
||||
*
|
||||
* Requires AWS Access Key ID and Secret Access Key to be set up in the
|
||||
* environment:
|
||||
* https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html
|
||||
*
|
||||
* In production, in an EC2 instance, this should work automagically.
|
||||
*
|
||||
* The easiest way to achieve that in a development environment is by setting up
|
||||
* environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
|
||||
*/
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
::uv_loop_t loop;
|
||||
if (auto const error = ::uv_loop_init(&loop)) {
|
||||
throw std::runtime_error(::uv_strerror(error));
|
||||
}
|
||||
|
||||
// read config from environment
|
||||
|
||||
auto const agent_key = AuthzFetcher::read_agent_key()
|
||||
.on_error([](auto &error) {
|
||||
LOG::critical("Authentication key error: {}", error);
|
||||
exit(-1);
|
||||
})
|
||||
.value();
|
||||
|
||||
// args parsing
|
||||
|
||||
cli::ArgsParser parser("Flowmill AWS collector agent");
|
||||
|
||||
args::HelpFlag help(*parser, "help", "Display this help menu", {'h', "help"});
|
||||
|
||||
args::ValueFlag<std::chrono::milliseconds::rep> ec2_poll_interval_ms(
|
||||
*parser, "ec2_poll_interval_ms",
|
||||
"How often, in milliseconds, to enumerate interfaces in EC2.",
|
||||
{"ec2-poll-interval-ms"}, std::chrono::milliseconds(1s).count());
|
||||
|
||||
auto &authz_server = AuthzFetcher::register_args_parser(parser);
|
||||
|
||||
args::ValueFlag<u64> aws_metadata_timeout_ms(
|
||||
*parser, "milliseconds", "Milliseconds to wait for AWS instance metadata",
|
||||
{"aws-timeout"}, 1 * 1000);
|
||||
|
||||
parser.new_handler<LogWhitelistHandler<channel::Component>>("channel");
|
||||
parser.new_handler<LogWhitelistHandler<collector::Component>>("component");
|
||||
parser.new_handler<LogWhitelistHandler<CloudPlatform>>("cloud-platform");
|
||||
parser.new_handler<LogWhitelistHandler<Utility>>("utility");
|
||||
|
||||
auto &intake_config_handler = parser.new_handler<config::IntakeConfig::ArgsHandler>();
|
||||
|
||||
SignalManager &signal_manager = parser.new_handler<SignalManager>(loop, "aws-collector")
|
||||
.add_auth(agent_key.key_id, agent_key.secret);
|
||||
|
||||
if (auto result = parser.process(argc, argv); !result.has_value()) {
|
||||
return result.error();
|
||||
}
|
||||
|
||||
if (ec2_poll_interval_ms.Get() == 0) {
|
||||
LOG::error("--ec2-poll-interval-ms cannot be 0");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
// resolve hostname
|
||||
std::string const hostname = get_host_name(MAX_HOSTNAME_LENGTH).recover([](auto &error) {
|
||||
LOG::error("Unable to retrieve host information from uname: {}", error);
|
||||
return "(unknown)";
|
||||
});
|
||||
|
||||
auto curl_engine = CurlEngine::create(&loop);
|
||||
|
||||
auto agent_id = gen_agent_id();
|
||||
|
||||
// Fetch initial authz token
|
||||
auto maybe_proxy_config = config::HttpProxyConfig::read_from_env();
|
||||
auto proxy_config = maybe_proxy_config.has_value() ? &maybe_proxy_config.value() : nullptr;
|
||||
AuthzFetcher authz_fetcher{*curl_engine, *authz_server, agent_key, agent_id, proxy_config};
|
||||
|
||||
auto intake_config = intake_config_handler.read_config(authz_fetcher.token()->intake());
|
||||
|
||||
LOG::info(
|
||||
"AWS Collector version {} ({}) started on host {}",
|
||||
versions::release, release_mode_string, hostname
|
||||
);
|
||||
LOG::info("AWS Collector agent ID is {}", agent_id);
|
||||
|
||||
// aws sdk init
|
||||
|
||||
Aws::InitAPI({});
|
||||
|
||||
// main
|
||||
|
||||
collector::aws::AwsCollector collector{
|
||||
loop, hostname, authz_fetcher,
|
||||
std::chrono::milliseconds(aws_metadata_timeout_ms.Get()),
|
||||
HEARTBEAT_INTERVAL, WRITE_BUFFER_SIZE,
|
||||
std::move(intake_config),
|
||||
std::chrono::milliseconds(ec2_poll_interval_ms.Get())
|
||||
};
|
||||
|
||||
signal_manager.handle_signals(
|
||||
{SIGINT, SIGTERM}
|
||||
// TODO: close gracefully
|
||||
);
|
||||
|
||||
collector.run_loop();
|
||||
|
||||
// shutdown
|
||||
Aws::ShutdownAPI({});
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/enum.h>
|
||||
|
||||
// components common to all collectors
|
||||
|
||||
#define ENUM_NAMESPACE collector
|
||||
#define ENUM_NAME Component
|
||||
#define ENUM_TYPE std::uint16_t
|
||||
#define ENUM_ELEMENTS(X) \
|
||||
X(none, 0) \
|
||||
X(auth, 1)
|
||||
#define ENUM_DEFAULT none
|
||||
#include <util/enum_operators.inl>
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <common/constants.h>
|
||||
#include <platform/types.h>
|
||||
|
||||
constexpr auto HEARTBEAT_INTERVAL = 2s;
|
||||
constexpr auto WRITE_BUFFER_SIZE = 16 * 1024;
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
setup_go_module(collector "flowmill.net")
|
||||
|
||||
build_protobuf(collector GO collector CPP GRPC)
|
||||
build_protobuf(kubernetes_info GO collector CPP)
|
||||
|
||||
#############
|
||||
# k8s-relay #
|
||||
#############
|
||||
|
||||
add_library(
|
||||
kubernetes_owner_kind
|
||||
STATIC
|
||||
kubernetes_owner_kind.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
kubernetes_owner_kind
|
||||
render_flowmill_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
resync_channel
|
||||
STATIC
|
||||
resync_channel.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
resync_channel
|
||||
render_flowmill_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
kubernetes_rpc_server
|
||||
STATIC
|
||||
kubernetes_rpc_server.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
kubernetes_rpc_server
|
||||
collector-cpp-protobuf
|
||||
kubernetes_info-cpp-protobuf
|
||||
yamlcpp
|
||||
fastpass_util
|
||||
render_flowmill_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
resync_queue
|
||||
STATIC
|
||||
resync_queue.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
resync_queue
|
||||
render_flowmill_artifacts
|
||||
)
|
||||
|
||||
add_library(
|
||||
resync_processor
|
||||
STATIC
|
||||
resync_processor.cc
|
||||
)
|
||||
target_link_libraries(
|
||||
resync_processor
|
||||
reconnecting_channel
|
||||
libuv-interface
|
||||
spdlog
|
||||
fastpass_util
|
||||
render_flowmill_artifacts
|
||||
)
|
||||
|
||||
add_executable(
|
||||
k8s-relay
|
||||
main.cc
|
||||
)
|
||||
harden_executable(k8s-relay)
|
||||
|
||||
add_dependencies(collectors k8s-relay)
|
||||
|
||||
target_link_libraries(
|
||||
k8s-relay
|
||||
PUBLIC
|
||||
signal_handler
|
||||
resync_processor
|
||||
resync_queue
|
||||
resync_channel
|
||||
kubernetes_rpc_server
|
||||
kubernetes_owner_kind
|
||||
connection_caretaker
|
||||
resource_usage_reporter
|
||||
render_flowmill_artifacts
|
||||
render_flowmill_ingest_writer
|
||||
config_file
|
||||
libuv-static
|
||||
args_parser
|
||||
system_ops
|
||||
spdlog
|
||||
static-executable
|
||||
-Wl,-Bstatic
|
||||
libgrpc++_unsecure.a
|
||||
libgrpc_unsecure.a
|
||||
libcares.a
|
||||
-Wl,-Bdynamic
|
||||
)
|
||||
|
||||
set_target_properties(k8s-relay PROPERTIES LINK_SEARCH_START_STATIC 1)
|
||||
set_target_properties(k8s-relay PROPERTIES LINK_SEARCH_END_STATIC 1)
|
||||
|
||||
strip_binary(k8s-relay)
|
||||
|
||||
lint_shell_script_bundle(
|
||||
k8s-relay-scripts
|
||||
SOURCES
|
||||
entrypoint.sh
|
||||
)
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
|
||||
build_custom_docker_image(
|
||||
k8s-relay
|
||||
OUT_DIR srv
|
||||
ARTIFACTS_OF
|
||||
k8s-relay
|
||||
OUTPUT_OF
|
||||
k8s-relay-scripts
|
||||
k8s-relay-stripped
|
||||
BINARIES
|
||||
debug-info.conf
|
||||
FILES
|
||||
../../NOTICE.txt
|
||||
../../LICENSE.txt
|
||||
DEPENDENCY_OF
|
||||
collectors
|
||||
)
|
||||
else()
|
||||
build_custom_docker_image(
|
||||
k8s-relay
|
||||
OUT_DIR srv
|
||||
OUTPUT_OF
|
||||
k8s-relay-scripts
|
||||
k8s-relay-stripped
|
||||
BINARIES
|
||||
debug-info.conf
|
||||
FILES
|
||||
../../NOTICE.txt
|
||||
../../LICENSE.txt
|
||||
DEPENDENCY_OF
|
||||
collectors
|
||||
)
|
||||
endif()
|
||||
|
||||
add_subdirectory(k8s-watcher)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
FROM segment/chamber:2 AS chamber
|
||||
|
||||
FROM bitnami/minideb:buster
|
||||
|
||||
LABEL org.label-schema.name="flowmill/k8s-relay" \
|
||||
org.label-schema.description="Relays Kubernetes Metadata from k8s-collector to Flowmill" \
|
||||
org.label-schema.schema-version="1.0"
|
||||
|
||||
# ca-certificates are required by libcurl
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# legacy stuff, we should get rid of references to `/etc/flowtune` at some point
|
||||
RUN ln -s /etc/flowtune /etc/flowmill
|
||||
|
||||
ENTRYPOINT [ "/srv/entrypoint.sh" ]
|
||||
|
||||
COPY --from=chamber /chamber /bin/chamber
|
||||
COPY srv /srv
|
||||
WORKDIR /srv
|
||||
RUN if [ ! -e /srv/k8s-relay ]; then \
|
||||
ln /srv/k8s-relay-stripped /srv/k8s-relay; \
|
||||
fi
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
syntax = "proto3";
|
||||
|
||||
option go_package = "flowmill.net/collector";
|
||||
|
||||
import "kubernetes_info.proto";
|
||||
|
||||
package collector;
|
||||
|
||||
message Info {
|
||||
enum Type {
|
||||
K8S_REPLICASET = 0;
|
||||
K8S_POD = 1;
|
||||
}
|
||||
|
||||
Type type = 1;
|
||||
|
||||
enum Event {
|
||||
ADDED = 0;
|
||||
MODIFIED = 1;
|
||||
DELETED = 2;
|
||||
ERROR = 3;
|
||||
}
|
||||
Event event = 2;
|
||||
|
||||
PodInfo pod_info = 3;
|
||||
ReplicaSetInfo rs_info = 4;
|
||||
}
|
||||
|
||||
message Response {
|
||||
}
|
||||
|
||||
service Collector {
|
||||
rpc Collect(stream Info) returns (stream Response) {}
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
[[ ! -e ./debug-info.conf ]] || source ./debug-info.conf
|
||||
|
||||
# For customers using Segment.IO's Chamber, you should store agent keys in
|
||||
# a secret in chamber. We will look up that key under $CHAMBER_SERVICE_NAME
|
||||
if [ -n "${CHAMBER_SERVICE_NAME}" ]; then
|
||||
eval "$(/bin/chamber export --format dotenv "${CHAMBER_SERVICE_NAME}")"
|
||||
fi
|
||||
|
||||
if [[ -n "${FLOWMILL_PROXY_HOST}" ]]; then
|
||||
export http_proxy="http://${FLOWMILL_PROXY_HOST}:${FLOWMILL_PROXY_PORT:-1080}"
|
||||
export HTTP_PROXY="${http_proxy}"
|
||||
export https_proxy="${http_proxy}"
|
||||
export HTTPS_PROXY="${http_proxy}"
|
||||
fi
|
||||
|
||||
# to run the collector under gdb, set `FLOWMILL_RUN_UNDER_GDB` to the flavor of gdb
|
||||
# you want (e.g.: `cgdb` or `gdb`) - this is intended for development purposes
|
||||
if [[ -n "${FLOWMILL_RUN_UNDER_GDB}" ]]; then
|
||||
apt-get update -y
|
||||
apt-get install -y --no-install-recommends "${FLOWMILL_RUN_UNDER_GDB}"
|
||||
|
||||
if [[ "${#FLOWMILL_GDB_COMMANDS[@]}" -lt 1 ]]; then
|
||||
# default behavior is to run the agent, print a stack trace after it exits
|
||||
# and exit gdb without confirmation
|
||||
FLOWMILL_GDB_COMMANDS=( \
|
||||
'set pagination off'
|
||||
'handle SIGPIPE nostop pass'
|
||||
'handle SIGUSR1 nostop pass'
|
||||
'handle SIGUSR2 nostop pass'
|
||||
run
|
||||
bt
|
||||
'server q'
|
||||
)
|
||||
fi
|
||||
|
||||
GDB_ARGS=()
|
||||
for gdb_cmd in "${FLOWMILL_GDB_COMMANDS[@]}"; do
|
||||
GDB_ARGS+=(-ex "${gdb_cmd}")
|
||||
done
|
||||
|
||||
(set -x; exec "${FLOWMILL_RUN_UNDER_GDB}" -q "${GDB_ARGS[@]}" \
|
||||
--args /srv/k8s-relay "$@" \
|
||||
)
|
||||
else
|
||||
(set -x; exec /srv/k8s-relay "$@")
|
||||
fi
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
module flowmill.net/collector
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
k8s.io/api HEAD
|
||||
k8s.io/apimachinery HEAD
|
||||
k8s.io/client-go HEAD
|
||||
)
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
add_custom_target(k8s-watcher)
|
||||
|
||||
build_go_package(
|
||||
k8s-watcher
|
||||
collector-go-module
|
||||
BINARY
|
||||
DEPENDS
|
||||
collector-go-protobuf
|
||||
kubernetes_info-go-protobuf
|
||||
DEPENDENCY_OF
|
||||
k8s-watcher
|
||||
collectors
|
||||
)
|
||||
|
||||
build_custom_docker_image(
|
||||
k8s-watcher
|
||||
OUT_DIR srv
|
||||
OUTPUT_OF
|
||||
k8s-watcher-go
|
||||
DEPENDENCY_OF
|
||||
collectors
|
||||
)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
FROM bitnami/minideb:unstable
|
||||
|
||||
COPY srv /srv
|
||||
|
||||
WORKDIR /srv
|
||||
ENTRYPOINT ["/srv/k8s-watcher"]
|
||||
|
||||
LABEL org.label-schema.name="flowmill/k8s-watcher" \
|
||||
org.label-schema.description="Flowmill Kubernetes Metadata Collector" \
|
||||
org.label-schema.schema-version="1.0"
|
||||
|
|
@ -0,0 +1,408 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"flowmill.net/collector"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
var (
|
||||
server_address = flag.String("server-address", "localhost:8712",
|
||||
"Where traffic is sent to. It should be in hostname:port format.")
|
||||
local_test = flag.Bool("local-test", false,
|
||||
"If true, do not connect to K8S; instead, generate synthetic traffic for test.")
|
||||
log_to_stderr = flag.Bool("log-to-stderr", false,
|
||||
"If true, perform additional logging to stderr.")
|
||||
)
|
||||
|
||||
func logmsg(msg string) {
|
||||
if *log_to_stderr {
|
||||
log.Println(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func info_event_type(watch_event_type watch.EventType) (collector.Info_Event, error) {
|
||||
switch watch_event_type {
|
||||
case watch.Added:
|
||||
return collector.Info_ADDED, nil
|
||||
case watch.Modified:
|
||||
return collector.Info_MODIFIED, nil
|
||||
case watch.Deleted:
|
||||
return collector.Info_DELETED, nil
|
||||
case watch.Error:
|
||||
return collector.Info_ERROR, nil
|
||||
}
|
||||
|
||||
return collector.Info_ERROR, errors.New("unexpected watch event")
|
||||
}
|
||||
|
||||
func extract_owner(objm metav1.ObjectMeta) *collector.OwnerInfo {
|
||||
for _, owner_ref := range objm.OwnerReferences {
|
||||
if owner_ref.Controller != nil && *owner_ref.Controller {
|
||||
owner_info := &collector.OwnerInfo{
|
||||
Uid: string(owner_ref.UID),
|
||||
Name: owner_ref.Name,
|
||||
Kind: owner_ref.Kind,
|
||||
}
|
||||
|
||||
return owner_info
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func extract_version(pod *corev1.Pod) string {
|
||||
var tags []string
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
tags = append(tags, fmt.Sprintf("'%s'", cs.Image))
|
||||
}
|
||||
sort.Strings(tags)
|
||||
return strings.Join(tags, ",")
|
||||
}
|
||||
|
||||
func extract_containers(pod *corev1.Pod) []*collector.ContainerInfo {
|
||||
containers := make([]*collector.ContainerInfo, 0, len(pod.Status.ContainerStatuses))
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
container := &collector.ContainerInfo{}
|
||||
container.Id = cs.ContainerID
|
||||
container.Name = cs.Name
|
||||
container.Image = cs.Image
|
||||
containers = append(containers, container)
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
func make_pod_info(pod *corev1.Pod) *collector.PodInfo {
|
||||
return &collector.PodInfo{
|
||||
Uid: string(pod.ObjectMeta.UID),
|
||||
Ip: pod.Status.PodIP,
|
||||
Name: pod.ObjectMeta.Name,
|
||||
Owner: extract_owner(pod.ObjectMeta),
|
||||
Ns: pod.ObjectMeta.Namespace,
|
||||
Version: extract_version(pod),
|
||||
IsHostNetwork: pod.Spec.HostNetwork,
|
||||
ContainerInfos: extract_containers(pod),
|
||||
}
|
||||
}
|
||||
|
||||
func make_rs_info(rs *appsv1.ReplicaSet) *collector.ReplicaSetInfo {
|
||||
return &collector.ReplicaSetInfo{
|
||||
Uid: string(rs.ObjectMeta.UID),
|
||||
Owner: extract_owner(rs.ObjectMeta),
|
||||
}
|
||||
}
|
||||
|
||||
func make_collector_info_from_pod(event_type collector.Info_Event, pod_info *collector.PodInfo) *collector.Info {
|
||||
return &collector.Info{
|
||||
Type: collector.Info_K8S_POD,
|
||||
Event: event_type,
|
||||
PodInfo: pod_info,
|
||||
}
|
||||
}
|
||||
|
||||
func make_collector_info_from_replicaset(event_type collector.Info_Event, rs_info *collector.ReplicaSetInfo) *collector.Info {
|
||||
return &collector.Info{
|
||||
Type: collector.Info_K8S_REPLICASET,
|
||||
Event: event_type,
|
||||
RsInfo: rs_info,
|
||||
}
|
||||
}
|
||||
|
||||
func send_info(info *collector.Info, stream collector.Collector_CollectClient) error {
|
||||
logmsg("Sending: " + info.String())
|
||||
return stream.Send(info)
|
||||
}
|
||||
|
||||
// Handles Pod event from watcher.
|
||||
//
|
||||
// Returns the version of the Pod object, and the error object, if any.
|
||||
func handle_pod_event(event watch.Event, stream collector.Collector_CollectClient) (*string, error) {
|
||||
event_type, err := info_event_type(event.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pod, ok := event.Object.(*corev1.Pod)
|
||||
if !ok || pod == nil {
|
||||
return nil, errors.New("errorenous Pod watch event")
|
||||
}
|
||||
|
||||
err = send_info(make_collector_info_from_pod(event_type, make_pod_info(pod)), stream)
|
||||
return &pod.ObjectMeta.ResourceVersion, err
|
||||
}
|
||||
|
||||
// Handles ReplicaSet event from watcher.
|
||||
//
|
||||
// Returns the version of the Pod object, and the error object, if any.
|
||||
func handle_rs_event(event watch.Event, stream collector.Collector_CollectClient) (*string, error) {
|
||||
event_type, err := info_event_type(event.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rs, ok := event.Object.(*appsv1.ReplicaSet)
|
||||
if !ok || rs == nil {
|
||||
return nil, errors.New("errorenous ReplicaSet watch event")
|
||||
}
|
||||
|
||||
err = send_info(make_collector_info_from_replicaset(event_type, make_rs_info(rs)), stream)
|
||||
return &rs.ObjectMeta.ResourceVersion, err
|
||||
}
|
||||
|
||||
// For test the connection w/o connecting to k8s master.
|
||||
func run_local_test(stream collector.Collector_CollectClient) error {
|
||||
for i := 0; ; i++ {
|
||||
s := strconv.Itoa(i)
|
||||
rs_info := &collector.ReplicaSetInfo{
|
||||
Uid: "RS-UID-" + s,
|
||||
Owner: &collector.OwnerInfo{
|
||||
Uid: "RS-OWNER-UID-" + s,
|
||||
Name: "RS-OWNER-Name-" + s,
|
||||
Kind: "Deployment",
|
||||
},
|
||||
}
|
||||
|
||||
info := &collector.Info{
|
||||
Type: collector.Info_K8S_REPLICASET,
|
||||
Event: collector.Info_ADDED,
|
||||
RsInfo: rs_info,
|
||||
}
|
||||
|
||||
logmsg("Sending: " + info.String())
|
||||
err := stream.Send(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pod_info := &collector.PodInfo{
|
||||
Uid: "POD-UID-" + s,
|
||||
Ip: "192.168.1.1",
|
||||
Ns: "POD-NS-" + s,
|
||||
Name: "POD-N-" + s,
|
||||
Owner: &collector.OwnerInfo{
|
||||
Uid: "RS-UID-" + s,
|
||||
Name: "RSS-OWNER-Name-" + s,
|
||||
Kind: "ReplicaSet",
|
||||
},
|
||||
}
|
||||
|
||||
info2 := &collector.Info{
|
||||
Type: collector.Info_K8S_POD,
|
||||
Event: collector.Info_ADDED,
|
||||
PodInfo: pod_info,
|
||||
}
|
||||
|
||||
logmsg("Sending: " + info2.String())
|
||||
err = stream.Send(info2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info3 := &collector.Info{
|
||||
Type: collector.Info_K8S_POD,
|
||||
Event: collector.Info_DELETED,
|
||||
PodInfo: pod_info,
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
logmsg("Sending: " + info3.String())
|
||||
err = stream.Send(info3)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pod_info_no_owner := &collector.PodInfo{
|
||||
Uid: "POD-UID-NO-OWNER" + s,
|
||||
Ip: "192.168.1.1",
|
||||
Name: "POD-NO-NAME-" + s,
|
||||
Ns: "POD-NS-NO-" + s,
|
||||
}
|
||||
|
||||
info4 := &collector.Info{
|
||||
Type: collector.Info_K8S_POD,
|
||||
Event: collector.Info_ADDED,
|
||||
PodInfo: pod_info_no_owner,
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
logmsg("Sending: " + info4.String())
|
||||
err = stream.Send(info4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
logmsg("Connect to Collector service.")
|
||||
|
||||
conn, err := grpc.Dial(*server_address, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := collector.NewCollectorClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.Collect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Testing in local machine.
|
||||
if *local_test {
|
||||
return run_local_test(stream)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
logmsg("Connect to k8s.")
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logmsg("Fetch k8s ReplicaSet info.")
|
||||
apps_api := clientset.AppsV1()
|
||||
rs_list, err := apps_api.ReplicaSets(metav1.NamespaceAll).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, rs := range rs_list.Items {
|
||||
err := send_info(make_collector_info_from_replicaset(collector.Info_ADDED, make_rs_info(&rs)), stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logmsg("Fetch k8s Pod info.")
|
||||
core_api := clientset.CoreV1()
|
||||
pod_list, err := core_api.Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pod := range pod_list.Items {
|
||||
err := send_info(make_collector_info_from_pod(collector.Info_ADDED, make_pod_info(&pod)), stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logmsg("Start watch.")
|
||||
|
||||
cancel_ch := make(chan error)
|
||||
go func() {
|
||||
_, err := stream.Recv()
|
||||
if err != nil {
|
||||
cancel_ch <- err
|
||||
} else {
|
||||
cancel_ch <- errors.New("Relay signals reset")
|
||||
}
|
||||
close(cancel_ch)
|
||||
}()
|
||||
|
||||
pod_version := pod_list.ListMeta.ResourceVersion
|
||||
rs_version := rs_list.ListMeta.ResourceVersion
|
||||
|
||||
tick_ch := time.NewTicker(5 * time.Minute).C
|
||||
for {
|
||||
// Watch Pod
|
||||
pod_watcher, err := core_api.Pods(metav1.NamespaceAll).Watch(ctx,
|
||||
metav1.ListOptions{
|
||||
ResourceVersion: pod_version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod_ch := pod_watcher.ResultChan()
|
||||
|
||||
// Watch ReplicaSet
|
||||
rs_watcher, err := apps_api.ReplicaSets(metav1.NamespaceAll).Watch(ctx,
|
||||
metav1.ListOptions{
|
||||
ResourceVersion: rs_version,
|
||||
})
|
||||
if err != nil {
|
||||
pod_watcher.Stop()
|
||||
return err
|
||||
}
|
||||
rs_ch := rs_watcher.ResultChan()
|
||||
|
||||
keep_watching := true
|
||||
for keep_watching {
|
||||
select {
|
||||
case event := <-rs_ch:
|
||||
version, err := handle_rs_event(event, stream)
|
||||
if err != nil {
|
||||
pod_watcher.Stop()
|
||||
rs_watcher.Stop()
|
||||
return err
|
||||
}
|
||||
rs_version = *version
|
||||
|
||||
case event := <-pod_ch:
|
||||
version, err := handle_pod_event(event, stream)
|
||||
if err != nil {
|
||||
pod_watcher.Stop()
|
||||
rs_watcher.Stop()
|
||||
return err
|
||||
}
|
||||
pod_version = *version
|
||||
|
||||
case <-ctx.Done():
|
||||
logmsg("server signals canceld")
|
||||
pod_watcher.Stop()
|
||||
rs_watcher.Stop()
|
||||
return ctx.Err()
|
||||
|
||||
case err := <-cancel_ch:
|
||||
pod_watcher.Stop()
|
||||
rs_watcher.Stop()
|
||||
return err
|
||||
|
||||
case <-tick_ch:
|
||||
keep_watching = false
|
||||
logmsg("end of one iteration of watch loop.")
|
||||
pod_watcher.Stop()
|
||||
rs_watcher.Stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
for {
|
||||
err := run()
|
||||
if err != nil {
|
||||
logmsg(fmt.Sprintf("Error: %v", err))
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
syntax = "proto3";
|
||||
|
||||
option go_package = "flowmill.net/collector";
|
||||
|
||||
package collector;
|
||||
|
||||
message OwnerInfo {
|
||||
string uid = 1;
|
||||
string name = 2;
|
||||
string kind = 3;
|
||||
}
|
||||
|
||||
message ContainerInfo {
|
||||
string id = 1;
|
||||
string name = 2;
|
||||
string image = 3;
|
||||
}
|
||||
|
||||
message PodInfo {
|
||||
string uid = 1;
|
||||
string ip = 2;
|
||||
string name = 6; // Used when we don't have OWNER.
|
||||
OwnerInfo owner = 3;
|
||||
string ns = 4; // namespace
|
||||
string version = 8;
|
||||
bool is_host_network = 5;
|
||||
repeated string containers_ids = 7; // deprecated
|
||||
repeated ContainerInfo container_infos = 9;
|
||||
}
|
||||
|
||||
message ReplicaSetInfo {
|
||||
string uid = 1;
|
||||
|
||||
OwnerInfo owner = 2;
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include "kubernetes_owner_kind.h"
|
||||
|
||||
namespace collector {
|
||||
|
||||
namespace {
|
||||
static constexpr char REPLICA_SET_KIND[] = "ReplicaSet";
|
||||
static constexpr char DEPLOYMENT_KIND[] = "Deployment";
|
||||
static constexpr char NO_OWNER_KIND[] = "NoOwner";
|
||||
static constexpr char OTHER_KIND[] = "Other";
|
||||
} // namespace
|
||||
|
||||
KubernetesOwnerKind KubernetesOwnerKindFromString(const std::string &str)
|
||||
{
|
||||
if (str == REPLICA_SET_KIND) {
|
||||
return KubernetesOwnerKind::ReplicaSet;
|
||||
}
|
||||
|
||||
if (str == DEPLOYMENT_KIND) {
|
||||
return KubernetesOwnerKind::Deployment;
|
||||
}
|
||||
|
||||
if (str == NO_OWNER_KIND) {
|
||||
return KubernetesOwnerKind::NoOwner;
|
||||
}
|
||||
|
||||
return KubernetesOwnerKind::Other;
|
||||
}
|
||||
|
||||
const char *KubernetesOwnerKindToString(const KubernetesOwnerKind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case KubernetesOwnerKind::ReplicaSet:
|
||||
return REPLICA_SET_KIND;
|
||||
case KubernetesOwnerKind::Deployment:
|
||||
return DEPLOYMENT_KIND;
|
||||
case KubernetesOwnerKind::NoOwner:
|
||||
return NO_OWNER_KIND;
|
||||
|
||||
default:
|
||||
return OTHER_KIND;
|
||||
}
|
||||
}
|
||||
|
||||
bool KubernetesOwnerIsDeployment(const std::string &str)
|
||||
{
|
||||
return str == DEPLOYMENT_KIND;
|
||||
}
|
||||
|
||||
bool KubernetesOwnerIsReplicaSet(const std::string &str)
|
||||
{
|
||||
return str == REPLICA_SET_KIND;
|
||||
}
|
||||
|
||||
bool KubernetesOwnerIsNoOwner(const std::string &str)
|
||||
{
|
||||
return str == NO_OWNER_KIND;
|
||||
}
|
||||
|
||||
} // namespace collector
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "platform/types.h"
|
||||
#include <string>
|
||||
|
||||
namespace collector {
|
||||
enum class KubernetesOwnerKind : u8 {
|
||||
ReplicaSet = 0,
|
||||
Deployment = 1,
|
||||
// TODO: fill in more as we go.
|
||||
//
|
||||
NoOwner = 254,
|
||||
Other = 255
|
||||
};
|
||||
|
||||
KubernetesOwnerKind KubernetesOwnerKindFromString(const std::string &str);
|
||||
const char *KubernetesOwnerKindToString(const KubernetesOwnerKind kind);
|
||||
|
||||
bool KubernetesOwnerIsDeployment(const std::string &str);
|
||||
bool KubernetesOwnerIsReplicaSet(const std::string &str);
|
||||
bool KubernetesOwnerIsNoOwner(const std::string &str);
|
||||
|
||||
} // namespace collector
|
||||
|
|
@ -0,0 +1,449 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include "kubernetes_rpc_server.h"
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "channel/buffered_writer.h"
|
||||
#include "platform/types.h"
|
||||
#include "util/boot_time.h"
|
||||
#include "util/log.h"
|
||||
#include "util/lookup3_hasher.h"
|
||||
#include "generated/kubernetes_info.pb.h"
|
||||
#include "generated/flowmill/ingest/writer.h"
|
||||
#include "kubernetes_owner_kind.h"
|
||||
#include "resync_channel.h"
|
||||
#include <util/protobuf_log.h>
|
||||
|
||||
namespace collector {
|
||||
using ::grpc::ServerContext;
|
||||
using ::grpc::ServerReaderWriter;
|
||||
using ::grpc::Status;
|
||||
using ::grpc::WriteOptions;
|
||||
|
||||
namespace {
|
||||
// K8sHandler maintain keeps track of the state of Pod & ReplicaSet.
|
||||
// It consumes the Pod & ReplicaSet events sent back by k8s-watcher, and
|
||||
// decides whether & what messages to be sent back to Flowmill server.
|
||||
class K8sHandler {
|
||||
public:
|
||||
// Does not take ownership of |writer|
|
||||
explicit K8sHandler(flowmill::ingest::Writer *writer) : writer_(writer) {}
|
||||
|
||||
~K8sHandler() {}
|
||||
|
||||
// Returns true if gRpc stream needs to be restarted.
|
||||
bool need_restart() const;
|
||||
|
||||
void replica_set_new_or_modified(const ReplicaSetInfo &rs_info);
|
||||
void replica_set_deleted(const ReplicaSetInfo &rs_info);
|
||||
void pod_new_or_modified(const PodInfo &pod_info);
|
||||
void pod_deleted(const PodInfo &pod_info);
|
||||
|
||||
private:
|
||||
// Max number of Pods allowed to wait for the ReplicaSet infos.
|
||||
static constexpr u64 max_waiting_pods_ = 10000;
|
||||
|
||||
// Max number of deleted ReplicaSets before they are purged.
|
||||
static constexpr u64 max_deleted_replicat_sets_ = 10000;
|
||||
|
||||
u64 get_id(const std::string &uid);
|
||||
void send_pod_new(const PodInfo &pod_info, const OwnerInfo &owner);
|
||||
void send_pod_new_no_owner(const PodInfo &pod_info);
|
||||
void send_pod_containers(const PodInfo &pod_info);
|
||||
|
||||
struct ReplicaSetStore {
|
||||
// Existing ReplicaSet metadata that we know of.
|
||||
std::unordered_map<u64, ReplicaSetInfo, ::util::Lookup3Hasher<u64>> infos;
|
||||
|
||||
// ReplicaSet metadata which has been deleted recently.
|
||||
std::deque<u64> deleted;
|
||||
|
||||
// Maps from the id of the ReplicaSet which is not available yet,
|
||||
// to list of ids of the Pods relying on this ReplicaSet.
|
||||
std::unordered_map<u64, std::vector<u64>, ::util::Lookup3Hasher<u64>>
|
||||
waiting;
|
||||
};
|
||||
|
||||
struct PodStore {
|
||||
// Existing Pod metadata that we know of.
|
||||
std::unordered_map<u64, PodInfo, ::util::Lookup3Hasher<u64>> infos;
|
||||
|
||||
// Set of Pods whose info has been sent back to pipeline server.
|
||||
std::unordered_set<u64, ::util::Lookup3Hasher<u64>> live;
|
||||
|
||||
// Set of Pods that replies on a yet-to-be-seen ReplicaSet.
|
||||
std::unordered_set<u64, ::util::Lookup3Hasher<u64>> waiting;
|
||||
};
|
||||
|
||||
// Maps ReplicaSet's and Pod's UID to a sequential id.
|
||||
// This is to avoid storing the UID (a long string) in multiple places.
|
||||
u64 next_id_ = 0;
|
||||
std::unordered_map<std::string, u64> uid_to_id_;
|
||||
|
||||
ReplicaSetStore replica_sets_;
|
||||
PodStore pods_;
|
||||
|
||||
flowmill::ingest::Writer *writer_;
|
||||
};
|
||||
|
||||
bool K8sHandler::need_restart() const
|
||||
{
|
||||
return pods_.waiting.size() >= max_waiting_pods_;
|
||||
}
|
||||
|
||||
void K8sHandler::send_pod_new(const PodInfo &pod_info, const OwnerInfo &owner)
|
||||
{
|
||||
LOG::trace("Server: enqueue POD New: {}", pod_info.uid());
|
||||
|
||||
jb_blob uid{pod_info.uid().data(), (u16)pod_info.uid().size()};
|
||||
|
||||
writer_->pod_new_with_name(uid,
|
||||
(u32)(inet_addr(pod_info.ip().c_str())),
|
||||
jb_blob{owner.name().data(), (u16)owner.name().size()},
|
||||
jb_blob{pod_info.name().c_str(), (u16)pod_info.name().size()},
|
||||
(uint8_t)KubernetesOwnerKindFromString(owner.kind()),
|
||||
jb_blob{owner.uid().data(), (u16)owner.uid().size()},
|
||||
(pod_info.is_host_network() ? 1 : 0),
|
||||
jb_blob{pod_info.ns().data(), (u16)pod_info.ns().size()},
|
||||
jb_blob{pod_info.version().data(), (u16)pod_info.version().size()});
|
||||
|
||||
send_pod_containers(pod_info);
|
||||
}
|
||||
|
||||
|
||||
void K8sHandler::send_pod_new_no_owner(const PodInfo &pod_info)
|
||||
{
|
||||
LOG::trace("Server: enqueue POD New (No Owner): {}", pod_info.uid());
|
||||
|
||||
jb_blob uid{pod_info.uid().data(), (u16)pod_info.uid().size()};
|
||||
|
||||
writer_->pod_new_with_name(uid,
|
||||
(u32)(inet_addr(pod_info.ip().c_str())),
|
||||
jb_blob{pod_info.name().c_str(), (u16)pod_info.name().size()},
|
||||
jb_blob{pod_info.name().c_str(), (u16)pod_info.name().size()},
|
||||
(uint8_t)(KubernetesOwnerKind::NoOwner),
|
||||
jb_blob{"", (u16)0},
|
||||
(pod_info.is_host_network() ? 1 : 0),
|
||||
jb_blob{pod_info.ns().data(), (u16)pod_info.ns().size()},
|
||||
jb_blob{pod_info.version().data(), (u16)pod_info.version().size()});
|
||||
|
||||
send_pod_containers(pod_info);
|
||||
}
|
||||
|
||||
void K8sHandler::send_pod_containers(const PodInfo &pod_info)
|
||||
{
|
||||
jb_blob uid{pod_info.uid().data(), (u16)pod_info.uid().size()};
|
||||
|
||||
for (int i = 0; i < pod_info.container_infos_size(); ++i) {
|
||||
std::string const &cid = pod_info.container_infos(i).id();
|
||||
std::string const &name = pod_info.container_infos(i).name();
|
||||
std::string const &image = pod_info.container_infos(i).image();
|
||||
writer_->pod_container(uid,
|
||||
jb_blob{cid.data(), (u16)cid.size()},
|
||||
jb_blob{name.data(), (u16)name.size()},
|
||||
jb_blob{image.data(), (u16)image.size()});
|
||||
}
|
||||
}
|
||||
|
||||
u64 K8sHandler::get_id(const std::string &uid)
|
||||
{
|
||||
const auto iter = uid_to_id_.find(uid);
|
||||
if (iter != uid_to_id_.end()) {
|
||||
return iter->second;
|
||||
}
|
||||
|
||||
u64 id = next_id_++;
|
||||
uid_to_id_.emplace(uid, id);
|
||||
return id;
|
||||
}
|
||||
|
||||
void K8sHandler::replica_set_new_or_modified(const ReplicaSetInfo &rs_info)
|
||||
{
|
||||
if (rs_info.uid().empty()) {
|
||||
LOG::warn("ReplicaSet info without UID. {}", rs_info);
|
||||
return;
|
||||
}
|
||||
|
||||
u64 id = get_id(rs_info.uid());
|
||||
|
||||
auto iter = replica_sets_.infos.find(id);
|
||||
if (iter != replica_sets_.infos.end()) {
|
||||
// updated
|
||||
iter->second.MergeFrom(rs_info);
|
||||
}
|
||||
else {
|
||||
// insert
|
||||
replica_sets_.infos.emplace(id, std::move(rs_info));
|
||||
}
|
||||
|
||||
// See if any Pod is waiting for this ReplicaSet
|
||||
auto waiting_iter = replica_sets_.waiting.find(id);
|
||||
if (waiting_iter == replica_sets_.waiting.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (u64 pod_id : waiting_iter->second) {
|
||||
auto pod_iter = pods_.infos.find(pod_id);
|
||||
if (pod_iter == pods_.infos.end()) {
|
||||
// The POD since has been deleted.
|
||||
continue;
|
||||
}
|
||||
|
||||
u64 current_owner_id = get_id(pod_iter->second.owner().uid());
|
||||
if (current_owner_id != id) {
|
||||
// The POD since has been updated with new owner.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (KubernetesOwnerIsDeployment(rs_info.owner().kind())) {
|
||||
send_pod_new(pod_iter->second, rs_info.owner());
|
||||
}
|
||||
else {
|
||||
send_pod_new(pod_iter->second, pod_iter->second.owner());
|
||||
}
|
||||
pods_.waiting.erase(pod_id);
|
||||
pods_.live.insert(pod_id);
|
||||
}
|
||||
replica_sets_.waiting.erase(waiting_iter);
|
||||
}
|
||||
|
||||
void K8sHandler::replica_set_deleted(const ReplicaSetInfo &rs_info)
|
||||
{
|
||||
if (rs_info.uid().empty()) {
|
||||
LOG::warn("ReplicaSet info without UID. {}", rs_info);
|
||||
return;
|
||||
}
|
||||
|
||||
u64 id = get_id(rs_info.uid());
|
||||
auto iter = replica_sets_.infos.find(id);
|
||||
if (iter == replica_sets_.infos.end()) {
|
||||
uid_to_id_.erase(rs_info.uid());
|
||||
return;
|
||||
}
|
||||
|
||||
replica_sets_.deleted.push_back(id);
|
||||
if (replica_sets_.deleted.size() <= max_deleted_replicat_sets_) {
|
||||
return;
|
||||
}
|
||||
|
||||
// There are more than |max_deleted_replicat_sets_| entries in the set,
|
||||
// so remove the oldest one.
|
||||
u64 expired_id = replica_sets_.deleted.front();
|
||||
replica_sets_.deleted.pop_front();
|
||||
auto expired_iter = replica_sets_.infos.find(expired_id);
|
||||
if (expired_iter == replica_sets_.infos.end()) {
|
||||
LOG::info("ReplicaSet removed before it expires.");
|
||||
return;
|
||||
}
|
||||
|
||||
uid_to_id_.erase(expired_iter->second.uid());
|
||||
replica_sets_.infos.erase(expired_id);
|
||||
}
|
||||
|
||||
void K8sHandler::pod_new_or_modified(const PodInfo &pod_info)
|
||||
{
|
||||
if (pod_info.uid().empty()) {
|
||||
LOG::warn("Pod info without UID. {}", pod_info);
|
||||
return;
|
||||
}
|
||||
|
||||
u64 id = get_id(pod_info.uid());
|
||||
|
||||
auto iter = pods_.infos.find(id);
|
||||
if (iter != pods_.infos.end()) {
|
||||
iter->second.MergeFrom(pod_info);
|
||||
LOG::trace("Merged pod into internal state: {}", pod_info);
|
||||
}
|
||||
else {
|
||||
iter = pods_.infos.emplace(id, std::move(pod_info)).first;
|
||||
LOG::trace("Added pod into internal state: {}", pod_info);
|
||||
}
|
||||
|
||||
if (pods_.live.find(id) != pods_.live.end()) {
|
||||
// TODO: we might want to define and pod_modified message and send it back
|
||||
// to pipeline server
|
||||
LOG::trace("Pod has already been reported. Sending containers only. {}", pod_info);
|
||||
send_pod_containers(pod_info);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &pod = iter->second;
|
||||
if (pod.ip().empty()) {
|
||||
LOG::trace("Pod has not been reported, but its ip is empty. IP empty: {}", pod.ip().empty());
|
||||
return;
|
||||
}
|
||||
|
||||
if (!pod.has_owner()) {
|
||||
LOG::trace("Pod does not have owner. Sending. {}", pod_info);
|
||||
send_pod_new_no_owner(pod);
|
||||
pods_.live.insert(id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!KubernetesOwnerIsReplicaSet(pod.owner().kind())) {
|
||||
// Not owned by a ReplicaSet, just send new_pod
|
||||
LOG::trace("Pod is not owned by ReplicaSet. Sending. {}", pod_info);
|
||||
send_pod_new(pod, pod.owner());
|
||||
pods_.live.insert(id);
|
||||
return;
|
||||
}
|
||||
|
||||
u64 owner_id = get_id(pod.owner().uid());
|
||||
auto rs_iter = replica_sets_.infos.find(owner_id);
|
||||
|
||||
if (rs_iter == replica_sets_.infos.end()) {
|
||||
// We have not seen the ReplicaSet yet, needs to wait.
|
||||
auto waiting_iter = replica_sets_.waiting.find(owner_id);
|
||||
if (waiting_iter == replica_sets_.waiting.end()) {
|
||||
replica_sets_.waiting[owner_id] = std::vector<u64>({id});
|
||||
LOG::trace("Pod's ReplicaSet and queue did not exist, added queue and enqueued. {}", pod_info);
|
||||
}
|
||||
else {
|
||||
waiting_iter->second.push_back(id);
|
||||
LOG::trace("Pod's ReplicaSet did not exist, enqueued to existing queue. {}", pod_info);
|
||||
}
|
||||
pods_.waiting.insert(id);
|
||||
return;
|
||||
}
|
||||
|
||||
const OwnerInfo &owner = rs_iter->second.owner();
|
||||
if (KubernetesOwnerIsDeployment(owner.kind())) {
|
||||
LOG::trace("Pod's owner ReplicaSet has Deployment owner. Sending pod with owner {}", owner);
|
||||
send_pod_new(pod, owner);
|
||||
}
|
||||
else {
|
||||
LOG::trace("Pod's owner ReplicaSet has non-Deployment owner. Sending pod with owner {}", pod.owner());
|
||||
send_pod_new(pod, pod.owner());
|
||||
}
|
||||
pods_.live.insert(id);
|
||||
}
|
||||
|
||||
void K8sHandler::pod_deleted(const PodInfo &pod_info)
|
||||
{
|
||||
if (pod_info.uid().empty()) {
|
||||
LOG::error("Pod delete event without UID. ({})", pod_info);
|
||||
return;
|
||||
}
|
||||
|
||||
u64 id = get_id(pod_info.uid());
|
||||
auto live_iter = pods_.live.find(id);
|
||||
if (live_iter != pods_.live.end()) {
|
||||
LOG::trace("Server: enqueue POD Delete: {}\n", pod_info.uid());
|
||||
|
||||
writer_->pod_delete(
|
||||
jb_blob{pod_info.uid().data(), (u16)pod_info.uid().size()});
|
||||
}
|
||||
|
||||
pods_.live.erase(id);
|
||||
pods_.infos.erase(id);
|
||||
pods_.waiting.erase(id);
|
||||
uid_to_id_.erase(pod_info.uid());
|
||||
}
|
||||
} // namespace
|
||||
|
||||
KubernetesRpcServer::KubernetesRpcServer(ResyncChannelFactory *channel_factory,
|
||||
std::size_t collect_buffer_size)
|
||||
: channel_factory_(channel_factory),
|
||||
collect_buffer_size_(collect_buffer_size)
|
||||
{
|
||||
}
|
||||
|
||||
KubernetesRpcServer::~KubernetesRpcServer() {}
|
||||
|
||||
Status
|
||||
KubernetesRpcServer::Collect(ServerContext *context,
|
||||
ServerReaderWriter<Response, Info> *reader_writer)
|
||||
{
|
||||
std::function<void(void)> reset_callback = [&]() {
|
||||
Response response;
|
||||
WriteOptions options;
|
||||
options.set_write_through().set_last_message();
|
||||
LOG::info("Relay: notify watcher to stop.");
|
||||
|
||||
reader_writer->Write(response, options);
|
||||
LOG::info("Relay: canceling watcher.");
|
||||
context->TryCancel();
|
||||
};
|
||||
|
||||
std::unique_ptr<ResyncChannel> resync_channel =
|
||||
channel_factory_->new_channel(reset_callback);
|
||||
|
||||
channel::BufferedWriter buffered_writer(*resync_channel, collect_buffer_size_);
|
||||
|
||||
flowmill::ingest::Writer writer(buffered_writer, monotonic, get_boot_time());
|
||||
|
||||
K8sHandler handler(&writer);
|
||||
Info info;
|
||||
while (reader_writer->Read(&info)) {
|
||||
if (info.type() == Info::K8S_REPLICASET) {
|
||||
const ReplicaSetInfo &rs_info = info.rs_info();
|
||||
switch (info.event()) {
|
||||
case Info_Event_ADDED:
|
||||
case Info_Event_MODIFIED:
|
||||
handler.replica_set_new_or_modified(rs_info);
|
||||
break;
|
||||
case Info_Event_DELETED:
|
||||
handler.replica_set_deleted(rs_info);
|
||||
break;
|
||||
case Info_Event_ERROR:
|
||||
default:
|
||||
// do nothing now.
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// K8S_POD
|
||||
const PodInfo &pod_info = info.pod_info();
|
||||
switch (info.event()) {
|
||||
case Info_Event_ADDED:
|
||||
case Info_Event_MODIFIED:
|
||||
handler.pod_new_or_modified(pod_info);
|
||||
break;
|
||||
case Info_Event_DELETED:
|
||||
handler.pod_deleted(pod_info);
|
||||
break;
|
||||
case Info_Event_ERROR:
|
||||
// do nothing now.
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (handler.need_restart()) {
|
||||
break;
|
||||
}
|
||||
// Always flush after every send.
|
||||
// The internal queue in ResyncQueue will buffer multiple messages
|
||||
// together instead.
|
||||
buffered_writer.flush();
|
||||
} // while()
|
||||
|
||||
// Discard anything left.
|
||||
buffered_writer.reset();
|
||||
|
||||
// Always returns CANCELLED, since the stream should not be broken unless
|
||||
// something bad happens.
|
||||
return Status::CANCELLED;
|
||||
}
|
||||
|
||||
} // namespace collector
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
//
|
||||
// Copyright 2021 Splunk Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "generated/collector.grpc.pb.h"
|
||||
#include "generated/collector.pb.h"
|
||||
#include "resync_queue_interface.h"
|
||||
|
||||
namespace collector {
|
||||
|
||||
// KubernetesRpcServer implements Collector::Service gRpc server.
|
||||
//
|
||||
// It recieves the client-side streaming gRpc from Kubernetes Reader, and
|
||||
// extracts related information and forwards to Flowmill pipeline server.
|
||||
class KubernetesRpcServer : public Collector::Service {
|
||||
public:
|
||||
// Does not take ownership of |chanel_factory|
|
||||
explicit KubernetesRpcServer(ResyncChannelFactory *channel_factory,
|
||||
std::size_t collect_buffer_size);
|
||||
~KubernetesRpcServer() override;
|
||||
|
||||
::grpc::Status
|
||||
Collect(::grpc::ServerContext *context,
|
||||
::grpc::ServerReaderWriter<Response, Info> *reader_writer) override;
|
||||
|
||||
private:
|
||||
ResyncChannelFactory *channel_factory_; // not owned
|
||||
std::size_t collect_buffer_size_;
|
||||
};
|
||||
} // namespace collector
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue