mirror of https://github.com/istio/proxy.git
Compare commits
67 Commits
Author | SHA1 | Date |
---|---|---|
|
06da92fd5c | |
|
9b2eb4ea8c | |
|
7160620e52 | |
|
1b19a2a500 | |
|
1eeb3a6180 | |
|
1716984e9e | |
|
71e077cbc1 | |
|
df03234fbb | |
|
188e4b578d | |
|
5f73db68b7 | |
|
71cd58c6ac | |
|
ae80069bcb | |
|
3cdaab0719 | |
|
38b08e6452 | |
|
802428003a | |
|
dd4d22e6eb | |
|
97afdc41e0 | |
|
eb790e43d4 | |
|
7fc1e3be58 | |
|
d475eda090 | |
|
37bf2b4817 | |
|
25a358d473 | |
|
74d77fd3aa | |
|
d839f09467 | |
|
f9707e29aa | |
|
c72a000d15 | |
|
6709b394a4 | |
|
fb5e034fbc | |
|
2e47601ff2 | |
|
6a145a6ace | |
|
54f8fc4f97 | |
|
517cd96513 | |
|
a804636e5c | |
|
9314f9ad2d | |
|
33e880887a | |
|
cfed93480a | |
|
f413882189 | |
|
96f989cc74 | |
|
11647e18d3 | |
|
bdb31c9666 | |
|
4d518f8d51 | |
|
e9b7643ac1 | |
|
a1ff538a63 | |
|
0f19b5a369 | |
|
2891ae0979 | |
|
ec466124d7 | |
|
ed82e4b0fb | |
|
db2e94f010 | |
|
103b0ac8fc | |
|
0434620cc1 | |
|
3b92829545 | |
|
3ac8014570 | |
|
17c173b8ad | |
|
31cf02691e | |
|
e78f346b66 | |
|
c41b644613 | |
|
e6816f641a | |
|
b954956ba1 | |
|
beb639e84a | |
|
01b89b4c43 | |
|
76e9677fb1 | |
|
da2487b4c8 | |
|
6a20765e1f | |
|
d2bde2ec1b | |
|
467917862c | |
|
16a9e0b7fb | |
|
c5b8bd373e |
|
@ -1 +1 @@
|
|||
6.1.0
|
||||
6.3.2
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "istio build-tools",
|
||||
"image": "gcr.io/istio-testing/build-tools:master-936207ec8823f21aa330a82c20649c6f9e2f7a22",
|
||||
"image": "gcr.io/istio-testing/build-tools:release-1.19-013f27a57a64f1d22f60c3dcb9b242cf4814065a",
|
||||
"privileged": true,
|
||||
"remoteEnv": {
|
||||
"USE_GKE_GCLOUD_AUTH_PLUGIN": "True",
|
||||
|
|
|
@ -1 +1 @@
|
|||
* @istio/wg-policies-and-telemetry-maintainers
|
||||
* @istio/release-managers-1-19
|
||||
|
|
|
@ -174,6 +174,7 @@ else
|
|||
endif
|
||||
|
||||
# Used by build container to export the build output from the docker volume cache
|
||||
exportcache: BAZEL_BIN_PATH ?= $(shell bazel info $(BAZEL_BUILD_ARGS) $(BAZEL_CONFIG_CURRENT) bazel-bin)
|
||||
exportcache:
|
||||
@mkdir -p /work/out/$(TARGET_OS)_$(TARGET_ARCH)
|
||||
@cp -a $(BAZEL_BIN_PATH)/envoy /work/out/$(TARGET_OS)_$(TARGET_ARCH)
|
||||
|
|
|
@ -34,10 +34,10 @@ bind(
|
|||
# 1. Determine SHA256 `wget https://github.com/envoyproxy/envoy/archive/$COMMIT.tar.gz && sha256sum $COMMIT.tar.gz`
|
||||
# 2. Update .bazelversion, envoy.bazelrc and .bazelrc if needed.
|
||||
#
|
||||
# Commit date: 2023-07-31
|
||||
ENVOY_SHA = "9b06b1b0c55bd61504ebae8b5897f89f6c898aed"
|
||||
# Commit date: 2024-04-04
|
||||
ENVOY_SHA = "9134d6a65e5c2c714d503807eb31a8490471fc5f"
|
||||
|
||||
ENVOY_SHA256 = "968680005396de39358e57f2476bc9a4e027bdaf3d6a2acca6e794207b5c2cfa"
|
||||
ENVOY_SHA256 = "a60eaa0e6a2ffac6428777c4fe716444dc5fef5e885b5c50ab646112d35fc95d"
|
||||
|
||||
ENVOY_ORG = "envoyproxy"
|
||||
|
||||
|
|
|
@ -35,6 +35,8 @@ ENVOY_EXTENSIONS = {
|
|||
"envoy.compression.gzip.decompressor": "//source/extensions/compression/gzip/decompressor:config",
|
||||
"envoy.compression.brotli.compressor": "//source/extensions/compression/brotli/compressor:config",
|
||||
"envoy.compression.brotli.decompressor": "//source/extensions/compression/brotli/decompressor:config",
|
||||
"envoy.compression.zstd.compressor": "//source/extensions/compression/zstd/compressor:config",
|
||||
"envoy.compression.zstd.decompressor": "//source/extensions/compression/zstd/decompressor:config",
|
||||
|
||||
#
|
||||
# gRPC Credentials Plugins
|
||||
|
@ -54,6 +56,9 @@ ENVOY_EXTENSIONS = {
|
|||
#
|
||||
|
||||
"envoy.health_checkers.redis": "//source/extensions/health_checkers/redis:config",
|
||||
"envoy.health_checkers.tcp": "//source/extensions/health_checkers/tcp:health_checker_lib",
|
||||
"envoy.health_checkers.http": "//source/extensions/health_checkers/http:health_checker_lib",
|
||||
"envoy.health_checkers.grpc": "//source/extensions/health_checkers/grpc:health_checker_lib",
|
||||
|
||||
#
|
||||
# Input Matchers
|
||||
|
@ -105,12 +110,15 @@ ENVOY_EXTENSIONS = {
|
|||
"envoy.filters.http.compressor": "//source/extensions/filters/http/compressor:config",
|
||||
"envoy.filters.http.cors": "//source/extensions/filters/http/cors:config",
|
||||
"envoy.filters.http.composite": "//source/extensions/filters/http/composite:config",
|
||||
"envoy.filters.http.connect_grpc_bridge": "//source/extensions/filters/http/connect_grpc_bridge:config",
|
||||
"envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config",
|
||||
"envoy.filters.http.decompressor": "//source/extensions/filters/http/decompressor:config",
|
||||
"envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config",
|
||||
"envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config",
|
||||
"envoy.filters.http.ext_proc": "//source/extensions/filters/http/ext_proc:config",
|
||||
"envoy.filters.http.fault": "//source/extensions/filters/http/fault:config",
|
||||
"envoy.filters.http.gcp_authn": "//source/extensions/filters/http/gcp_authn:config",
|
||||
"envoy.filters.http.grpc_field_extraction": "//source/extensions/filters/http/grpc_field_extraction:config",
|
||||
"envoy.filters.http.grpc_http1_bridge": "//source/extensions/filters/http/grpc_http1_bridge:config",
|
||||
"envoy.filters.http.grpc_http1_reverse_bridge": "//source/extensions/filters/http/grpc_http1_reverse_bridge:config",
|
||||
"envoy.filters.http.grpc_json_transcoder": "//source/extensions/filters/http/grpc_json_transcoder:config",
|
||||
|
@ -247,7 +255,8 @@ ENVOY_EXTENSIONS = {
|
|||
#
|
||||
# CacheFilter plugins
|
||||
#
|
||||
"envoy.extensions.http.cache.simple": "//source/extensions/http/cache/simple_http_cache:config",
|
||||
"envoy.extensions.http.cache.file_system_http_cache": "//source/extensions/http/cache/file_system_http_cache:config",
|
||||
"envoy.extensions.http.cache.simple": "//source/extensions/http/cache/simple_http_cache:config",
|
||||
|
||||
#
|
||||
# Internal redirect predicates
|
||||
|
|
|
@ -1 +1 @@
|
|||
3e7e4a6330e5a70608669a7bce30c737d961b36e
|
||||
bdb38aa251ecadf811809709d381eb2d8f62d9cf
|
||||
|
|
|
@ -91,7 +91,7 @@ mirror-licenses: mod-download-go
|
|||
@license-lint --mirror
|
||||
|
||||
TMP := $(shell mktemp -d -u)
|
||||
UPDATE_BRANCH ?= "master"
|
||||
UPDATE_BRANCH ?= "release-1.19"
|
||||
|
||||
BUILD_TOOLS_ORG ?= "istio"
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ fi
|
|||
TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io}
|
||||
PROJECT_ID=${PROJECT_ID:-istio-testing}
|
||||
if [[ "${IMAGE_VERSION:-}" == "" ]]; then
|
||||
IMAGE_VERSION=master-936207ec8823f21aa330a82c20649c6f9e2f7a22
|
||||
IMAGE_VERSION=release-1.19-013f27a57a64f1d22f60c3dcb9b242cf4814065a
|
||||
fi
|
||||
if [[ "${IMAGE_NAME:-}" == "" ]]; then
|
||||
IMAGE_NAME=build-tools
|
||||
|
|
125
envoy.bazelrc
125
envoy.bazelrc
|
@ -10,9 +10,11 @@
|
|||
# Startup options cannot be selected via config.
|
||||
startup --host_jvm_args=-Xmx3g
|
||||
|
||||
fetch --color=yes
|
||||
run --color=yes
|
||||
|
||||
build --color=yes
|
||||
build --jobs=HOST_CPUS-1
|
||||
build --workspace_status_command="bash bazel/get_workspace_status"
|
||||
build --incompatible_strict_action_env
|
||||
build --java_runtime_version=remotejdk_11
|
||||
|
@ -40,6 +42,8 @@ build --action_env=BAZEL_FAKE_SCM_REVISION --host_action_env=BAZEL_FAKE_SCM_REVI
|
|||
build --enable_platform_specific_config
|
||||
build --test_summary=terse
|
||||
|
||||
build:docs-ci --action_env=DOCS_RST_CHECK=1 --host_action_env=DOCS_RST_CHECK=1
|
||||
|
||||
# TODO(keith): Remove once these 2 are the default
|
||||
build --incompatible_config_setting_private_default_visibility
|
||||
build --incompatible_enforce_config_setting_visibility
|
||||
|
@ -69,8 +73,6 @@ build --@com_googlesource_googleurl//build_config:system_icu=0
|
|||
# Common flags for sanitizers
|
||||
build:sanitizer --define tcmalloc=disabled
|
||||
build:sanitizer --linkopt -ldl
|
||||
build:sanitizer --build_tag_filters=-no_san
|
||||
build:sanitizer --test_tag_filters=-no_san
|
||||
|
||||
# Common flags for Clang
|
||||
build:clang --action_env=BAZEL_COMPILER=clang
|
||||
|
@ -90,6 +92,8 @@ build:asan --config=sanitizer
|
|||
# ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN
|
||||
build:asan --define signal_trace=disabled
|
||||
build:asan --define ENVOY_CONFIG_ASAN=1
|
||||
build:asan --build_tag_filters=-no_san
|
||||
build:asan --test_tag_filters=-no_san
|
||||
build:asan --copt -fsanitize=address,undefined
|
||||
build:asan --linkopt -fsanitize=address,undefined
|
||||
# vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh.
|
||||
|
@ -143,12 +147,15 @@ build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE
|
|||
# https://github.com/abseil/abseil-cpp/issues/760
|
||||
# https://github.com/google/sanitizers/issues/953
|
||||
build:clang-tsan --test_env="TSAN_OPTIONS=report_atomic_races=0"
|
||||
build:clang-tsan --test_timeout=120,600,1500,4800
|
||||
|
||||
# Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without
|
||||
# our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo
|
||||
# with libc++ instruction and provide corresponding `--copt` and `--linkopt` as well.
|
||||
build:clang-msan --action_env=ENVOY_MSAN=1
|
||||
build:clang-msan --config=sanitizer
|
||||
build:clang-msan --build_tag_filters=-no_san
|
||||
build:clang-msan --test_tag_filters=-no_san
|
||||
build:clang-msan --define ENVOY_CONFIG_MSAN=1
|
||||
build:clang-msan --copt -fsanitize=memory
|
||||
build:clang-msan --linkopt -fsanitize=memory
|
||||
|
@ -182,6 +189,7 @@ build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH
|
|||
# Coverage options
|
||||
coverage --config=coverage
|
||||
coverage --build_tests_only
|
||||
|
||||
build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1
|
||||
build:coverage --action_env=GCOV=llvm-profdata
|
||||
build:coverage --copt=-DNDEBUG
|
||||
|
@ -190,20 +198,31 @@ build:coverage --test_timeout=390,750,1500,5700
|
|||
build:coverage --define=dynamic_link_tests=true
|
||||
build:coverage --define=ENVOY_CONFIG_COVERAGE=1
|
||||
build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1"
|
||||
build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support
|
||||
build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=bazel/coverage/collect_cc_coverage.sh
|
||||
build:coverage --test_env=HEAPCHECK=
|
||||
build:coverage --combined_report=lcov
|
||||
build:coverage --strategy=TestRunner=sandboxed,local
|
||||
build:coverage --strategy=TestRunner=remote,sandboxed,local
|
||||
build:coverage --strategy=CoverageReport=sandboxed,local
|
||||
build:coverage --experimental_use_llvm_covmap
|
||||
build:coverage --experimental_generate_llvm_lcov
|
||||
build:coverage --experimental_split_coverage_postprocessing
|
||||
build:coverage --experimental_fetch_all_coverage_outputs
|
||||
build:coverage --collect_code_coverage
|
||||
build:coverage --test_tag_filters=-nocoverage
|
||||
build:coverage --instrumentation_filter="//source(?!/common/quic/platform)[/:],//envoy[/:],//contrib(?!/.*/test)[/:]"
|
||||
build:coverage --instrumentation_filter="^//source(?!/common/quic/platform)[/:],^//envoy[/:],^//contrib(?!/.*/test)[/:]"
|
||||
build:coverage --remote_download_minimal
|
||||
build:coverage --define=tcmalloc=gperftools
|
||||
build:coverage --define=no_debug_info=1
|
||||
# `--no-relax` is required for coverage to not err with `relocation R_X86_64_REX_GOTPCRELX`
|
||||
build:coverage --linkopt=-Wl,-s,--no-relax
|
||||
build:coverage --test_env=ENVOY_IP_TEST_VERSIONS=v4only
|
||||
|
||||
build:test-coverage --test_arg="-l trace"
|
||||
build:test-coverage --test_arg="--log-path /dev/null"
|
||||
build:test-coverage --test_tag_filters=-nocoverage,-fuzz_target
|
||||
build:fuzz-coverage --config=plain-fuzzer
|
||||
build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh
|
||||
build:fuzz-coverage --test_tag_filters=-nocoverage
|
||||
|
||||
build:cache-local --remote_cache=grpc://localhost:9092
|
||||
|
||||
# Remote execution: https://docs.bazel.build/versions/master/remote-execution.html
|
||||
build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
|
||||
|
@ -263,10 +282,6 @@ build:remote --spawn_strategy=remote,sandboxed,local
|
|||
build:remote --strategy=Javac=remote,sandboxed,local
|
||||
build:remote --strategy=Closure=remote,sandboxed,local
|
||||
build:remote --strategy=Genrule=remote,sandboxed,local
|
||||
build:remote --remote_timeout=7200
|
||||
build:remote --google_default_credentials=true
|
||||
build:remote --remote_download_toplevel
|
||||
build:remote --nobuild_runfile_links
|
||||
|
||||
# Windows bazel does not allow sandboxed as a spawn strategy
|
||||
build:remote-windows --spawn_strategy=remote,local
|
||||
|
@ -306,9 +321,28 @@ build:remote-clang-cl --config=remote-windows
|
|||
build:remote-clang-cl --config=clang-cl
|
||||
build:remote-clang-cl --config=rbe-toolchain-clang-cl
|
||||
|
||||
## Compile-time-options testing
|
||||
# Right now, none of the available compile-time options conflict with each other. If this
|
||||
# changes, this build type may need to be broken up.
|
||||
build:compile-time-options --define=admin_html=disabled
|
||||
build:compile-time-options --define=signal_trace=disabled
|
||||
build:compile-time-options --define=hot_restart=disabled
|
||||
build:compile-time-options --define=google_grpc=disabled
|
||||
build:compile-time-options --define=boringssl=fips
|
||||
build:compile-time-options --define=log_debug_assert_in_release=enabled
|
||||
build:compile-time-options --define=path_normalization_by_default=true
|
||||
build:compile-time-options --define=deprecated_features=disabled
|
||||
build:compile-time-options --define=tcmalloc=gperftools
|
||||
build:compile-time-options --define=zlib=ng
|
||||
build:compile-time-options --define=uhv=enabled
|
||||
build:compile-time-options --config=libc++20
|
||||
build:compile-time-options --test_env=ENVOY_HAS_EXTRA_EXTENSIONS=true
|
||||
build:compile-time-options --@envoy//bazel:http3=False
|
||||
build:compile-time-options --@envoy//source/extensions/filters/http/kill_request:enabled
|
||||
|
||||
# Docker sandbox
|
||||
# NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8
|
||||
build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:41c5a05d708972d703661b702a63ef5060125c33
|
||||
build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:fdd65c6270a8507a18d5acd6cf19a18cb695e4fa@sha256:3c8a3ce6f90dcfb5d09dc8f79bb01404d3526d420061f9a176e0a8e91e1e573e
|
||||
build:docker-sandbox --spawn_strategy=docker
|
||||
build:docker-sandbox --strategy=Javac=docker
|
||||
build:docker-sandbox --strategy=Closure=docker
|
||||
|
@ -339,16 +373,13 @@ build:docker-tsan --config=rbe-toolchain-clang-libc++
|
|||
build:docker-tsan --config=rbe-toolchain-tsan
|
||||
|
||||
# CI configurations
|
||||
build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com
|
||||
build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com
|
||||
build:remote-ci --config=ci
|
||||
build:remote-ci --remote_download_minimal
|
||||
|
||||
# Note this config is used by mobile CI also.
|
||||
build:ci --noshow_progress
|
||||
build:ci --noshow_loading_progress
|
||||
|
||||
# Build Event Service
|
||||
build:google-bes --bes_backend=grpcs://buildeventservice.googleapis.com
|
||||
build:google-bes --bes_results_url=https://source.cloud.google.com/results/invocations/
|
||||
build:ci --test_output=errors
|
||||
|
||||
# Fuzz builds
|
||||
|
||||
|
@ -439,6 +470,64 @@ build:windows --features=fully_static_link
|
|||
build:windows --features=static_link_msvcrt
|
||||
build:windows --dynamic_mode=off
|
||||
|
||||
# RBE (Google)
|
||||
build:rbe-google --google_default_credentials=true
|
||||
build:rbe-google --remote_cache=grpcs://remotebuildexecution.googleapis.com
|
||||
build:rbe-google --remote_executor=grpcs://remotebuildexecution.googleapis.com
|
||||
build:rbe-google --remote_timeout=7200
|
||||
build:rbe-google --remote_instance_name=projects/envoy-ci/instances/default_instance
|
||||
|
||||
build:rbe-google-bes --bes_backend=grpcs://buildeventservice.googleapis.com
|
||||
build:rbe-google-bes --bes_results_url=https://source.cloud.google.com/results/invocations/
|
||||
|
||||
# RBE (Engflow mobile)
|
||||
build:rbe-engflow --google_default_credentials=false
|
||||
build:rbe-engflow --remote_cache=grpcs://envoy.cluster.engflow.com
|
||||
build:rbe-engflow --remote_executor=grpcs://envoy.cluster.engflow.com
|
||||
build:rbe-engflow --bes_backend=grpcs://envoy.cluster.engflow.com/
|
||||
build:rbe-engflow --bes_results_url=https://envoy.cluster.engflow.com/invocation/
|
||||
build:rbe-engflow --credential_helper=*.engflow.com=%workspace%/bazel/engflow-bazel-credential-helper.sh
|
||||
build:rbe-engflow --grpc_keepalive_time=30s
|
||||
build:rbe-engflow --remote_timeout=3600s
|
||||
build:rbe-engflow --bes_timeout=3600s
|
||||
build:rbe-engflow --bes_upload_mode=fully_async
|
||||
|
||||
build:cache-envoy-engflow --google_default_credentials=false
|
||||
build:cache-envoy-engflow --remote_cache=grpcs://morganite.cluster.engflow.com
|
||||
build:cache-envoy-engflow --remote_timeout=3600s
|
||||
build:cache-envoy-engflow --credential_helper=*.engflow.com=%workspace%/bazel/engflow-bazel-credential-helper.sh
|
||||
build:cache-envoy-engflow --grpc_keepalive_time=30s
|
||||
build:bes-envoy-engflow --bes_backend=grpcs://morganite.cluster.engflow.com/
|
||||
build:bes-envoy-engflow --bes_results_url=https://morganite.cluster.engflow.com/invocation/
|
||||
build:bes-envoy-engflow --bes_timeout=3600s
|
||||
build:bes-envoy-engflow --bes_upload_mode=fully_async
|
||||
build:rbe-envoy-engflow --config=cache-envoy-engflow
|
||||
build:rbe-envoy-engflow --config=bes-envoy-engflow
|
||||
build:rbe-envoy-engflow --remote_executor=grpcs://morganite.cluster.engflow.com
|
||||
build:rbe-envoy-engflow --remote_default_exec_properties=container-image=docker://docker.io/envoyproxy/envoy-build-ubuntu:fdd65c6270a8507a18d5acd6cf19a18cb695e4fa@sha256:3c8a3ce6f90dcfb5d09dc8f79bb01404d3526d420061f9a176e0a8e91e1e573e
|
||||
|
||||
#############################################################################
|
||||
# debug: Various Bazel debugging flags
|
||||
#############################################################################
|
||||
# debug/bazel
|
||||
common:debug-bazel --announce_rc
|
||||
common:debug-bazel -s
|
||||
# debug/sandbox
|
||||
common:debug-sandbox --verbose_failures
|
||||
common:debug-sandbox --sandbox_debug
|
||||
# debug/coverage
|
||||
common:debug-coverage --action_env=VERBOSE_COVERAGE=true
|
||||
common:debug-coverage --test_env=VERBOSE_COVERAGE=true
|
||||
common:debug-coverage --test_env=DISPLAY_LCOV_CMD=true
|
||||
common:debug-coverage --config=debug-tests
|
||||
# debug/tests
|
||||
common:debug-tests --test_output=all
|
||||
# debug/everything
|
||||
common:debug --config=debug-bazel
|
||||
common:debug --config=debug-sandbox
|
||||
common:debug --config=debug-coverage
|
||||
common:debug --config=debug-tests
|
||||
|
||||
try-import %workspace%/clang.bazelrc
|
||||
try-import %workspace%/user.bazelrc
|
||||
try-import %workspace%/local_tsan.bazelrc
|
||||
|
|
|
@ -347,7 +347,7 @@ PeerNodeInfo::PeerNodeInfo(const std::string_view peer_metadata_id_key,
|
|||
const std::string_view peer_metadata_key) {
|
||||
// Attempt to read from filter_state first.
|
||||
found_ = getValue({peer_metadata_id_key}, &peer_id_);
|
||||
if (found_ && peer_id_ != kMetadataNotFoundValue) {
|
||||
if (found_) {
|
||||
if (getValue({peer_metadata_key}, &peer_node_)) {
|
||||
return;
|
||||
}
|
||||
|
@ -355,6 +355,9 @@ PeerNodeInfo::PeerNodeInfo(const std::string_view peer_metadata_id_key,
|
|||
|
||||
// Sentinel value is preserved as ID to implement maybeWaiting.
|
||||
found_ = false;
|
||||
if (getValue({kMetadataNotFoundValue}, &peer_id_)) {
|
||||
peer_id_ = kMetadataNotFoundValue;
|
||||
}
|
||||
|
||||
// Downstream peer metadata will never be in localhost endpoint. Skip
|
||||
// looking for it.
|
||||
|
|
|
@ -32,8 +32,8 @@ constexpr std::string_view kUpstreamMetadataKey = "upstream_peer";
|
|||
constexpr std::string_view kDownstreamMetadataIdKey = "downstream_peer_id";
|
||||
constexpr std::string_view kDownstreamMetadataKey = "downstream_peer";
|
||||
|
||||
// Sentinel value assigned to peer metadata ID key, indicating that the peer
|
||||
// metadata is absent. This is different from a missing peer metadata ID key
|
||||
// Sentinel key in the filter state, indicating that the peer metadata is
|
||||
// decidedly absent. This is different from a missing peer metadata ID key
|
||||
// which could indicate that the metadata is not received yet.
|
||||
const std::string kMetadataNotFoundValue = "envoy.wasm.metadata_exchange.peer_unknown";
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "extensions/stackdriver/common/constants.h"
|
||||
#include "google/api/monitored_resource.pb.h"
|
||||
#include "grpcpp/grpcpp.h"
|
||||
#include "grpcpp/security/tls_certificate_provider.h"
|
||||
|
||||
namespace Extensions {
|
||||
namespace Stackdriver {
|
||||
|
@ -80,16 +81,19 @@ getStackdriverOptions(const Wasm::Common::FlatNode& local_node_info,
|
|||
}
|
||||
}
|
||||
|
||||
auto ssl_creds_options = grpc::SslCredentialsOptions();
|
||||
grpc::experimental::TlsChannelCredentialsOptions tls_options;
|
||||
tls_options.set_max_tls_version(grpc_tls_version::TLS1_2);
|
||||
if (!stub_option.test_root_pem_path.empty()) {
|
||||
std::ifstream file(stub_option.test_root_pem_path);
|
||||
if (!file.fail()) {
|
||||
std::stringstream file_string;
|
||||
file_string << file.rdbuf();
|
||||
ssl_creds_options.pem_root_certs = file_string.str();
|
||||
tls_options.set_certificate_provider(
|
||||
std::make_shared<grpc::experimental::StaticDataCertificateProvider>(file_string.str()));
|
||||
tls_options.watch_root_certs();
|
||||
}
|
||||
}
|
||||
auto channel_creds = grpc::SslCredentials(ssl_creds_options);
|
||||
auto channel_creds = grpc::experimental::TlsCredentials(tls_options);
|
||||
|
||||
if (!stub_option.insecure_endpoint.empty()) {
|
||||
auto channel =
|
||||
|
|
2
go.mod
2
go.mod
|
@ -7,7 +7,7 @@ require (
|
|||
cloud.google.com/go/monitoring v1.13.0
|
||||
cloud.google.com/go/trace v1.9.0
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4
|
||||
github.com/envoyproxy/go-control-plane v0.11.2-0.20230726084335-b501c94cb61e
|
||||
github.com/envoyproxy/go-control-plane v0.11.2-0.20230725211550-11bfe846bcd4
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/prometheus/client_model v0.4.0
|
||||
|
|
2
go.sum
2
go.sum
|
@ -18,6 +18,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.11.2-0.20230725211550-11bfe846bcd4 h1:u4facp1hZe/ZmiAkiEFZmWAbg7s4+a5Z3tkz8Hg6I9w=
|
||||
github.com/envoyproxy/go-control-plane v0.11.2-0.20230725211550-11bfe846bcd4/go.mod h1:djL+W7LURiPM8Szxc5/47R6qMRulOSkfsDLO1CaGUNM=
|
||||
github.com/envoyproxy/go-control-plane v0.11.2-0.20230726084335-b501c94cb61e h1:HXKPsHR2TRMWMdbCqwWsWzNqml/+oMQO3zm5LhhoYOo=
|
||||
github.com/envoyproxy/go-control-plane v0.11.2-0.20230726084335-b501c94cb61e/go.mod h1:djL+W7LURiPM8Szxc5/47R6qMRulOSkfsDLO1CaGUNM=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
|
|
|
@ -35,7 +35,7 @@ ENVOY_ORG="$(grep -Pom1 "^ENVOY_ORG = \"\K[a-zA-Z-]+" "${WORKSPACE}")"
|
|||
ENVOY_REPO="$(grep -Pom1 "^ENVOY_REPO = \"\K[a-zA-Z-]+" "${WORKSPACE}")"
|
||||
|
||||
# get latest commit for specified org/repo
|
||||
LATEST_SHA="$(git ls-remote https://github.com/"${ENVOY_ORG}"/"${ENVOY_REPO}" "$UPDATE_BRANCH" | awk '{ print $1}')"
|
||||
LATEST_SHA="$(git ls-remote https://github.com/"${ENVOY_ORG}"/"${ENVOY_REPO}" "refs/heads/$UPDATE_BRANCH" | awk '{ print $1}')"
|
||||
DATE=$(curl -s -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/"${ENVOY_ORG}""/""${ENVOY_REPO}"/commits/"${LATEST_SHA}" | jq '.commit.committer.date')
|
||||
DATE=$(echo "${DATE/\"/}" | cut -d'T' -f1)
|
||||
|
||||
|
|
|
@ -55,24 +55,6 @@ Http::Protocol AlpnFilterConfig::getHttpProtocol(
|
|||
}
|
||||
|
||||
Http::FilterHeadersStatus AlpnFilter::decodeHeaders(Http::RequestHeaderMap&, bool) {
|
||||
const auto upstream_info = decoder_callbacks_->streamInfo().upstreamInfo();
|
||||
auto upstream_host = upstream_info ? upstream_info->upstreamHost() : nullptr;
|
||||
if (upstream_host && upstream_host->metadata()) {
|
||||
const auto& filter_metadata = upstream_host->metadata()->filter_metadata();
|
||||
const auto& it = filter_metadata.find("istio");
|
||||
if (it != filter_metadata.end()) {
|
||||
const auto& alpn_it = it->second.fields().find("alpn_override");
|
||||
if (alpn_it != it->second.fields().end()) {
|
||||
const auto alpnOverrideMetadata = alpn_it->second.string_value();
|
||||
if (alpnOverrideMetadata == "false") {
|
||||
// Skip ALPN header rewrite
|
||||
ENVOY_LOG(debug, "Skipping ALPN header rewrite because alpn_override metadata is false");
|
||||
return Http::FilterHeadersStatus::Continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Router::RouteConstSharedPtr route = decoder_callbacks_->route();
|
||||
const Router::RouteEntry* route_entry;
|
||||
if (!route || !(route_entry = route->routeEntry())) {
|
||||
|
@ -87,6 +69,21 @@ Http::FilterHeadersStatus AlpnFilter::decodeHeaders(Http::RequestHeaderMap&, boo
|
|||
return Http::FilterHeadersStatus::Continue;
|
||||
}
|
||||
|
||||
const auto& filter_metadata = cluster->info()->metadata().filter_metadata();
|
||||
const auto& istio = filter_metadata.find("istio");
|
||||
if (istio != filter_metadata.end()) {
|
||||
const auto& alpn_override = istio->second.fields().find("alpn_override");
|
||||
if (alpn_override != istio->second.fields().end()) {
|
||||
const auto alpn_override_value = alpn_override->second.string_value();
|
||||
if (alpn_override_value == "false") {
|
||||
// Skip ALPN header rewrite
|
||||
ENVOY_LOG(debug,
|
||||
"Skipping ALPN header rewrite because istio.alpn_override metadata is false");
|
||||
return Http::FilterHeadersStatus::Continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto protocols =
|
||||
cluster->info()->upstreamHttpProtocol(decoder_callbacks_->streamInfo().protocol());
|
||||
const auto& alpn_override = config_->alpnOverrides(protocols[0]);
|
||||
|
|
|
@ -169,28 +169,22 @@ TEST_F(AlpnFilterTest, EmptyOverrideAlpn) {
|
|||
|
||||
TEST_F(AlpnFilterTest, AlpnOverrideFalse) {
|
||||
NiceMock<StreamInfo::MockStreamInfo> stream_info;
|
||||
std::shared_ptr<NiceMock<StreamInfo::MockUpstreamInfo>> upstream_info(
|
||||
new NiceMock<StreamInfo::MockUpstreamInfo>());
|
||||
std::shared_ptr<NiceMock<Envoy::Upstream::MockHostDescription>> upstream_host(
|
||||
new NiceMock<Envoy::Upstream::MockHostDescription>());
|
||||
auto metadata = std::make_shared<envoy::config::core::v3::Metadata>(
|
||||
TestUtility::parseYaml<envoy::config::core::v3::Metadata>(
|
||||
R"EOF(
|
||||
auto metadata = TestUtility::parseYaml<envoy::config::core::v3::Metadata>(R"EOF(
|
||||
filter_metadata:
|
||||
istio:
|
||||
alpn_override: "false"
|
||||
)EOF"));
|
||||
)EOF");
|
||||
|
||||
ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));
|
||||
ON_CALL(stream_info, upstreamInfo()).WillByDefault(Return(upstream_info));
|
||||
ON_CALL(*upstream_info, upstreamHost()).WillByDefault(Return(upstream_host));
|
||||
ON_CALL(*upstream_host, metadata()).WillByDefault(Return(metadata));
|
||||
ON_CALL(cluster_manager_, getThreadLocalCluster(_)).WillByDefault(Return(fake_cluster_.get()));
|
||||
ON_CALL(*fake_cluster_, info()).WillByDefault(Return(cluster_info_));
|
||||
ON_CALL(*cluster_info_, metadata()).WillByDefault(ReturnRef(metadata));
|
||||
|
||||
const AlpnOverrides alpn = {{Http::Protocol::Http10, {"foo", "bar"}},
|
||||
{Http::Protocol::Http11, {"baz"}}};
|
||||
auto filter = makeAlpnOverrideFilter(alpn);
|
||||
|
||||
EXPECT_CALL(callbacks_, route()).Times(0);
|
||||
EXPECT_CALL(*cluster_info_, upstreamHttpProtocol(_)).Times(0);
|
||||
EXPECT_EQ(filter->decodeHeaders(headers_, false), Http::FilterHeadersStatus::Continue);
|
||||
}
|
||||
|
||||
|
|
|
@ -93,10 +93,8 @@ bool peerInfoRead(Reporter reporter, const StreamInfo::FilterState& filter_state
|
|||
reporter == Reporter::ServerSidecar || reporter == Reporter::ServerGateway
|
||||
? "wasm.downstream_peer_id"
|
||||
: "wasm.upstream_peer_id";
|
||||
const auto* object =
|
||||
filter_state.getDataReadOnly<Envoy::Extensions::Filters::Common::Expr::CelState>(
|
||||
filter_state_key);
|
||||
return object != nullptr;
|
||||
return filter_state.hasDataWithName(filter_state_key) ||
|
||||
filter_state.hasDataWithName("envoy.wasm.metadata_exchange.peer_unknown");
|
||||
}
|
||||
|
||||
const Wasm::Common::FlatNode* peerInfo(Reporter reporter,
|
||||
|
@ -604,18 +602,21 @@ struct Config : public Logger::Loggable<Logger::Id::filter> {
|
|||
|
||||
// RAII for stream context propagation.
|
||||
struct StreamOverrides : public Filters::Common::Expr::StreamActivation {
|
||||
StreamOverrides(Config& parent, Stats::StatNameDynamicPool& pool,
|
||||
const StreamInfo::StreamInfo& info,
|
||||
const Http::RequestHeaderMap* request_headers = nullptr,
|
||||
const Http::ResponseHeaderMap* response_headers = nullptr,
|
||||
const Http::ResponseTrailerMap* response_trailers = nullptr)
|
||||
: parent_(parent) {
|
||||
StreamOverrides(Config& parent, Stats::StatNameDynamicPool& pool)
|
||||
: parent_(parent), pool_(pool) {}
|
||||
|
||||
void evaluate(const StreamInfo::StreamInfo& info,
|
||||
const Http::RequestHeaderMap* request_headers = nullptr,
|
||||
const Http::ResponseHeaderMap* response_headers = nullptr,
|
||||
const Http::ResponseTrailerMap* response_trailers = nullptr) {
|
||||
evaluated_ = true;
|
||||
if (parent_.metric_overrides_) {
|
||||
activation_info_ = &info;
|
||||
activation_request_headers_ = request_headers;
|
||||
activation_response_headers_ = response_headers;
|
||||
activation_response_trailers_ = response_trailers;
|
||||
const auto& compiled_exprs = parent_.metric_overrides_->compiled_exprs_;
|
||||
expr_values_.clear();
|
||||
expr_values_.reserve(compiled_exprs.size());
|
||||
for (size_t id = 0; id < compiled_exprs.size(); id++) {
|
||||
Protobuf::Arena arena;
|
||||
|
@ -631,7 +632,7 @@ struct Config : public Logger::Loggable<Logger::Id::filter> {
|
|||
}
|
||||
expr_values_.push_back(std::make_pair(Stats::StatName(), amount));
|
||||
} else {
|
||||
expr_values_.push_back(std::make_pair(pool.add(string_value), 0));
|
||||
expr_values_.push_back(std::make_pair(pool_.add(string_value), 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -662,6 +663,7 @@ struct Config : public Logger::Loggable<Logger::Id::filter> {
|
|||
|
||||
void addCounter(Stats::StatName metric, const Stats::StatNameTagVector& tags,
|
||||
uint64_t amount = 1) {
|
||||
ASSERT(evaluated_);
|
||||
if (parent_.metric_overrides_) {
|
||||
if (parent_.metric_overrides_->drop_.contains(metric)) {
|
||||
return;
|
||||
|
@ -679,6 +681,7 @@ struct Config : public Logger::Loggable<Logger::Id::filter> {
|
|||
|
||||
void recordHistogram(Stats::StatName metric, Stats::Histogram::Unit unit,
|
||||
const Stats::StatNameTagVector& tags, uint64_t value) {
|
||||
ASSERT(evaluated_);
|
||||
if (parent_.metric_overrides_) {
|
||||
if (parent_.metric_overrides_->drop_.contains(metric)) {
|
||||
return;
|
||||
|
@ -695,6 +698,7 @@ struct Config : public Logger::Loggable<Logger::Id::filter> {
|
|||
}
|
||||
|
||||
void recordCustomMetrics() {
|
||||
ASSERT(evaluated_);
|
||||
if (parent_.metric_overrides_) {
|
||||
for (const auto& [_, metric] : parent_.metric_overrides_->custom_metrics_) {
|
||||
const auto tags = parent_.metric_overrides_->overrideTags(metric.name_, {}, expr_values_);
|
||||
|
@ -725,7 +729,9 @@ struct Config : public Logger::Loggable<Logger::Id::filter> {
|
|||
}
|
||||
|
||||
Config& parent_;
|
||||
Stats::StatNameDynamicPool& pool_;
|
||||
std::vector<std::pair<Stats::StatName, uint64_t>> expr_values_;
|
||||
bool evaluated_{false};
|
||||
};
|
||||
|
||||
void recordVersion() {
|
||||
|
@ -760,7 +766,8 @@ class IstioStatsFilter : public Http::PassThroughFilter,
|
|||
public Network::ConnectionCallbacks {
|
||||
public:
|
||||
IstioStatsFilter(ConfigSharedPtr config)
|
||||
: config_(config), context_(*config->context_), pool_(config->scope()->symbolTable()) {
|
||||
: config_(config), context_(*config->context_), pool_(config->scope()->symbolTable()),
|
||||
stream_(*config_, pool_) {
|
||||
tags_.reserve(25);
|
||||
switch (config_->reporter()) {
|
||||
case Reporter::ServerSidecar:
|
||||
|
@ -813,23 +820,24 @@ public:
|
|||
}
|
||||
populateFlagsAndConnectionSecurity(info);
|
||||
|
||||
Config::StreamOverrides stream(*config_, pool_, info, request_headers, response_headers,
|
||||
response_trailers);
|
||||
stream.addCounter(context_.requests_total_, tags_);
|
||||
// Evaluate the end stream override expressions for HTTP. This may change values for periodic
|
||||
// metrics.
|
||||
stream_.evaluate(info, request_headers, response_headers, response_trailers);
|
||||
stream_.addCounter(context_.requests_total_, tags_);
|
||||
auto duration = info.requestComplete();
|
||||
if (duration.has_value()) {
|
||||
stream.recordHistogram(context_.request_duration_milliseconds_,
|
||||
Stats::Histogram::Unit::Milliseconds, tags_,
|
||||
absl::FromChrono(duration.value()) / absl::Milliseconds(1));
|
||||
stream_.recordHistogram(context_.request_duration_milliseconds_,
|
||||
Stats::Histogram::Unit::Milliseconds, tags_,
|
||||
absl::FromChrono(duration.value()) / absl::Milliseconds(1));
|
||||
}
|
||||
auto meter = info.getDownstreamBytesMeter();
|
||||
if (meter) {
|
||||
stream.recordHistogram(context_.request_bytes_, Stats::Histogram::Unit::Bytes, tags_,
|
||||
meter->wireBytesReceived());
|
||||
stream.recordHistogram(context_.response_bytes_, Stats::Histogram::Unit::Bytes, tags_,
|
||||
meter->wireBytesSent());
|
||||
stream_.recordHistogram(context_.request_bytes_, Stats::Histogram::Unit::Bytes, tags_,
|
||||
meter->wireBytesReceived());
|
||||
stream_.recordHistogram(context_.response_bytes_, Stats::Histogram::Unit::Bytes, tags_,
|
||||
meter->wireBytesSent());
|
||||
}
|
||||
stream.recordCustomMetrics();
|
||||
stream_.recordCustomMetrics();
|
||||
}
|
||||
|
||||
// Network::ReadFilter
|
||||
|
@ -877,6 +885,10 @@ private:
|
|||
if (peer_read_ || end_stream) {
|
||||
populatePeerInfo(info, info.filterState());
|
||||
}
|
||||
if (is_grpc_ && (peer_read_ || end_stream)) {
|
||||
// For periodic HTTP metric, evaluate once when the peer info is read.
|
||||
stream_.evaluate(decoder_callbacks_->streamInfo());
|
||||
}
|
||||
}
|
||||
if (is_grpc_ && (peer_read_ || end_stream)) {
|
||||
const auto* counters =
|
||||
|
@ -884,11 +896,10 @@ private:
|
|||
.filterState()
|
||||
->getDataReadOnly<GrpcStats::GrpcStatsObject>("envoy.filters.http.grpc_stats");
|
||||
if (counters) {
|
||||
Config::StreamOverrides stream(*config_, pool_, decoder_callbacks_->streamInfo());
|
||||
stream.addCounter(context_.request_messages_total_, tags_,
|
||||
counters->request_message_count - request_message_count_);
|
||||
stream.addCounter(context_.response_messages_total_, tags_,
|
||||
counters->response_message_count - response_message_count_);
|
||||
stream_.addCounter(context_.request_messages_total_, tags_,
|
||||
counters->request_message_count - request_message_count_);
|
||||
stream_.addCounter(context_.response_messages_total_, tags_,
|
||||
counters->response_message_count - response_message_count_);
|
||||
request_message_count_ = counters->request_message_count;
|
||||
response_message_count_ = counters->response_message_count;
|
||||
}
|
||||
|
@ -906,7 +917,6 @@ private:
|
|||
? *upstream_info->upstreamFilterState()
|
||||
: info.filterState();
|
||||
|
||||
Config::StreamOverrides stream(*config_, pool_, info);
|
||||
if (!peer_read_) {
|
||||
peer_read_ = peerInfoRead(config_->reporter(), filter_state);
|
||||
// Report connection open once peer info is read or connection is closed.
|
||||
|
@ -914,23 +924,25 @@ private:
|
|||
populatePeerInfo(info, filter_state);
|
||||
tags_.push_back({context_.request_protocol_, context_.tcp_});
|
||||
populateFlagsAndConnectionSecurity(info);
|
||||
stream.addCounter(context_.tcp_connections_opened_total_, tags_);
|
||||
// For TCP, evaluate only once immediately before emitting the first metric.
|
||||
stream_.evaluate(info);
|
||||
stream_.addCounter(context_.tcp_connections_opened_total_, tags_);
|
||||
}
|
||||
}
|
||||
if (peer_read_ || end_stream) {
|
||||
auto meter = info.getDownstreamBytesMeter();
|
||||
if (meter) {
|
||||
stream.addCounter(context_.tcp_sent_bytes_total_, tags_,
|
||||
meter->wireBytesSent() - bytes_sent_);
|
||||
stream_.addCounter(context_.tcp_sent_bytes_total_, tags_,
|
||||
meter->wireBytesSent() - bytes_sent_);
|
||||
bytes_sent_ = meter->wireBytesSent();
|
||||
stream.addCounter(context_.tcp_received_bytes_total_, tags_,
|
||||
meter->wireBytesReceived() - bytes_received_);
|
||||
stream_.addCounter(context_.tcp_received_bytes_total_, tags_,
|
||||
meter->wireBytesReceived() - bytes_received_);
|
||||
bytes_received_ = meter->wireBytesReceived();
|
||||
}
|
||||
}
|
||||
if (end_stream) {
|
||||
stream.addCounter(context_.tcp_connections_closed_total_, tags_);
|
||||
stream.recordCustomMetrics();
|
||||
stream_.addCounter(context_.tcp_connections_closed_total_, tags_);
|
||||
stream_.recordCustomMetrics();
|
||||
}
|
||||
}
|
||||
void onReportTimer() {
|
||||
|
@ -1196,6 +1208,8 @@ private:
|
|||
bool is_grpc_{false};
|
||||
uint64_t request_message_count_{0};
|
||||
uint64_t response_message_count_{0};
|
||||
// Custom expression values are evaluated at most twice: at the start and the end of the stream.
|
||||
Config::StreamOverrides stream_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -20,23 +20,26 @@ package io.istio.http.peer_metadata;
|
|||
// Peer metadata provider filter. This filter encapsulates the discovery of the
|
||||
// peer telemetry attributes for consumption by the telemetry filters.
|
||||
message Config {
|
||||
// Use CONNECT baggage header encoding.
|
||||
// This method uses `baggage` header encoding.
|
||||
message Baggage {
|
||||
}
|
||||
|
||||
// Use workload metadata xDS. Requires that the bootstrap extension is enabled.
|
||||
// For downstream discovery, uses the remote address as the lookup key.
|
||||
// This method uses the workload metadata xDS. Requires that the bootstrap extension is enabled.
|
||||
// For downstream discovery, the remote address is the lookup key in xDS.
|
||||
// For upstream discovery:
|
||||
//
|
||||
// * If the upstream host address is an IP, uses it as the lookup key;
|
||||
// * If the upstream host address is an IP, this IP is used as the lookup key;
|
||||
//
|
||||
// * If the upstream host address is internal, uses the
|
||||
// "filter_metadata.tunnel.destination" dynamic metadata value.
|
||||
// "filter_metadata.tunnel.destination" dynamic metadata value as the lookup key.
|
||||
message WorkloadDiscovery {
|
||||
}
|
||||
|
||||
// Use Istio HTTP metadata exchange headers. Removes these headers if found.
|
||||
// This method uses Istio HTTP metadata exchange headers, e.g. `x-envoy-peer-metadata`. Removes these headers if found.
|
||||
message IstioHeaders {
|
||||
// Strip x-envoy-peer-metadata and x-envoy-peer-metadata-id headers on HTTP requests to services outside the mesh.
|
||||
// Detects upstream clusters with `istio` and `external` filter metadata fields
|
||||
bool skip_external_clusters = 1;
|
||||
}
|
||||
|
||||
// An exhaustive list of the derivation methods.
|
||||
|
@ -49,9 +52,11 @@ message Config {
|
|||
}
|
||||
|
||||
// The order of the derivation of the downstream peer metadata, in the precedence order.
|
||||
// First successful lookup wins.
|
||||
repeated DiscoveryMethod downstream_discovery = 1;
|
||||
|
||||
// The order of the derivation of the upstream peer metadata, in the precedence order.
|
||||
// First successful lookup wins.
|
||||
repeated DiscoveryMethod upstream_discovery = 2;
|
||||
|
||||
// An exhaustive list of the metadata propagation methods.
|
||||
|
|
|
@ -56,8 +56,8 @@ using CelPrototypes = ConstSingleton<CelPrototypeValues>;
|
|||
|
||||
class BaggageMethod : public DiscoveryMethod {
|
||||
public:
|
||||
absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&,
|
||||
Http::HeaderMap&) const override;
|
||||
absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&, Http::HeaderMap&,
|
||||
Context&) const override;
|
||||
};
|
||||
|
||||
class XDSMethod : public DiscoveryMethod {
|
||||
|
@ -65,28 +65,16 @@ public:
|
|||
XDSMethod(bool downstream, Server::Configuration::ServerFactoryContext& factory_context)
|
||||
: downstream_(downstream),
|
||||
metadata_provider_(Extensions::Common::WorkloadDiscovery::GetProvider(factory_context)) {}
|
||||
absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&,
|
||||
Http::HeaderMap&) const override;
|
||||
absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&, Http::HeaderMap&,
|
||||
Context&) const override;
|
||||
|
||||
private:
|
||||
const bool downstream_;
|
||||
Extensions::Common::WorkloadDiscovery::WorkloadMetadataProviderSharedPtr metadata_provider_;
|
||||
};
|
||||
|
||||
class MXMethod : public DiscoveryMethod {
|
||||
public:
|
||||
absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&,
|
||||
Http::HeaderMap&) const override;
|
||||
void remove(Http::HeaderMap&) const override;
|
||||
|
||||
private:
|
||||
absl::optional<PeerInfo> lookup(absl::string_view id, absl::string_view value) const;
|
||||
mutable absl::flat_hash_map<std::string, std::string> cache_;
|
||||
const int64_t max_peer_cache_size_{500};
|
||||
};
|
||||
|
||||
absl::optional<PeerInfo> BaggageMethod::derivePeerInfo(const StreamInfo::StreamInfo&,
|
||||
Http::HeaderMap& headers) const {
|
||||
Http::HeaderMap& headers, Context&) const {
|
||||
const auto header_string =
|
||||
Http::HeaderUtility::getAllOfHeaderAsString(headers, Headers::get().Baggage);
|
||||
const auto result = header_string.result();
|
||||
|
@ -98,7 +86,7 @@ absl::optional<PeerInfo> BaggageMethod::derivePeerInfo(const StreamInfo::StreamI
|
|||
}
|
||||
|
||||
absl::optional<PeerInfo> XDSMethod::derivePeerInfo(const StreamInfo::StreamInfo& info,
|
||||
Http::HeaderMap&) const {
|
||||
Http::HeaderMap&, Context&) const {
|
||||
if (!metadata_provider_) {
|
||||
return {};
|
||||
}
|
||||
|
@ -140,12 +128,23 @@ absl::optional<PeerInfo> XDSMethod::derivePeerInfo(const StreamInfo::StreamInfo&
|
|||
return {};
|
||||
}
|
||||
|
||||
MXMethod::MXMethod(bool downstream, Server::Configuration::ServerFactoryContext& factory_context)
|
||||
: downstream_(downstream), tls_(factory_context.threadLocal()) {
|
||||
tls_.set([](Event::Dispatcher&) { return std::make_shared<MXCache>(); });
|
||||
}
|
||||
|
||||
absl::optional<PeerInfo> MXMethod::derivePeerInfo(const StreamInfo::StreamInfo&,
|
||||
Http::HeaderMap& headers) const {
|
||||
Http::HeaderMap& headers, Context& ctx) const {
|
||||
const auto peer_id_header = headers.get(Headers::get().ExchangeMetadataHeaderId);
|
||||
if (downstream_) {
|
||||
ctx.request_peer_id_received_ = !peer_id_header.empty();
|
||||
}
|
||||
absl::string_view peer_id =
|
||||
peer_id_header.empty() ? "" : peer_id_header[0]->value().getStringView();
|
||||
const auto peer_info_header = headers.get(Headers::get().ExchangeMetadataHeader);
|
||||
if (downstream_) {
|
||||
ctx.request_peer_received_ = !peer_info_header.empty();
|
||||
}
|
||||
absl::string_view peer_info =
|
||||
peer_info_header.empty() ? "" : peer_info_header[0]->value().getStringView();
|
||||
if (!peer_info.empty()) {
|
||||
|
@ -162,9 +161,10 @@ void MXMethod::remove(Http::HeaderMap& headers) const {
|
|||
absl::optional<PeerInfo> MXMethod::lookup(absl::string_view id, absl::string_view value) const {
|
||||
// This code is copied from:
|
||||
// https://github.com/istio/proxy/blob/release-1.18/extensions/metadata_exchange/plugin.cc#L116
|
||||
auto& cache = tls_->cache_;
|
||||
if (max_peer_cache_size_ > 0 && !id.empty()) {
|
||||
auto it = cache_.find(id);
|
||||
if (it != cache_.end()) {
|
||||
auto it = cache.find(id);
|
||||
if (it != cache.end()) {
|
||||
return it->second;
|
||||
}
|
||||
}
|
||||
|
@ -177,17 +177,23 @@ absl::optional<PeerInfo> MXMethod::lookup(absl::string_view id, absl::string_vie
|
|||
std::string out(reinterpret_cast<const char*>(fb.data()), fb.size());
|
||||
if (max_peer_cache_size_ > 0 && !id.empty()) {
|
||||
// do not let the cache grow beyond max cache size.
|
||||
if (static_cast<uint32_t>(cache_.size()) > max_peer_cache_size_) {
|
||||
cache_.erase(cache_.begin(), std::next(cache_.begin(), max_peer_cache_size_ / 4));
|
||||
if (static_cast<uint32_t>(cache.size()) > max_peer_cache_size_) {
|
||||
cache.erase(cache.begin(), std::next(cache.begin(), max_peer_cache_size_ / 4));
|
||||
}
|
||||
cache_.emplace(id, out);
|
||||
cache.emplace(id, out);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
MXPropagationMethod::MXPropagationMethod(
|
||||
Server::Configuration::ServerFactoryContext& factory_context)
|
||||
: id_(factory_context.localInfo().node().id()) {
|
||||
bool downstream, Server::Configuration::ServerFactoryContext& factory_context,
|
||||
const io::istio::http::peer_metadata::Config_IstioHeaders& istio_headers)
|
||||
: downstream_(downstream), id_(factory_context.localInfo().node().id()),
|
||||
value_(computeValue(factory_context)),
|
||||
skip_external_clusters_(istio_headers.skip_external_clusters()) {}
|
||||
|
||||
std::string MXPropagationMethod::computeValue(
|
||||
Server::Configuration::ServerFactoryContext& factory_context) const {
|
||||
const auto fb = ::Wasm::Common::extractNodeFlatBufferFromStruct(
|
||||
factory_context.localInfo().node().metadata());
|
||||
google::protobuf::Struct metadata;
|
||||
|
@ -195,12 +201,22 @@ MXPropagationMethod::MXPropagationMethod(
|
|||
*flatbuffers::GetRoot<::Wasm::Common::FlatNode>(fb.data()), &metadata);
|
||||
std::string metadata_bytes;
|
||||
::Wasm::Common::serializeToStringDeterministic(metadata, &metadata_bytes);
|
||||
value_ = Base64::encode(metadata_bytes.data(), metadata_bytes.size());
|
||||
return Base64::encode(metadata_bytes.data(), metadata_bytes.size());
|
||||
}
|
||||
|
||||
void MXPropagationMethod::inject(Http::HeaderMap& headers) const {
|
||||
headers.setReference(Headers::get().ExchangeMetadataHeaderId, id_);
|
||||
headers.setReference(Headers::get().ExchangeMetadataHeader, value_);
|
||||
void MXPropagationMethod::inject(const StreamInfo::StreamInfo& info, Http::HeaderMap& headers,
|
||||
Context& ctx) const {
|
||||
if (skip_external_clusters_) {
|
||||
if (skipMXHeaders(info)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!downstream_ || ctx.request_peer_id_received_) {
|
||||
headers.setReference(Headers::get().ExchangeMetadataHeaderId, id_);
|
||||
}
|
||||
if (!downstream_ || ctx.request_peer_received_) {
|
||||
headers.setReference(Headers::get().ExchangeMetadataHeader, value_);
|
||||
}
|
||||
}
|
||||
|
||||
FilterConfig::FilterConfig(const io::istio::http::peer_metadata::Config& config,
|
||||
|
@ -211,9 +227,9 @@ FilterConfig::FilterConfig(const io::istio::http::peer_metadata::Config& config,
|
|||
upstream_discovery_(
|
||||
buildDiscoveryMethods(config.upstream_discovery(), false, factory_context)),
|
||||
downstream_propagation_(
|
||||
buildPropagationMethods(config.downstream_propagation(), factory_context)),
|
||||
buildPropagationMethods(config.downstream_propagation(), true, factory_context)),
|
||||
upstream_propagation_(
|
||||
buildPropagationMethods(config.upstream_propagation(), factory_context)) {}
|
||||
buildPropagationMethods(config.upstream_propagation(), false, factory_context)) {}
|
||||
|
||||
std::vector<DiscoveryMethodPtr> FilterConfig::buildDiscoveryMethods(
|
||||
const Protobuf::RepeatedPtrField<io::istio::http::peer_metadata::Config::DiscoveryMethod>&
|
||||
|
@ -233,7 +249,8 @@ std::vector<DiscoveryMethodPtr> FilterConfig::buildDiscoveryMethods(
|
|||
break;
|
||||
case io::istio::http::peer_metadata::Config::DiscoveryMethod::MethodSpecifierCase::
|
||||
kIstioHeaders:
|
||||
methods.push_back(std::make_unique<MXMethod>());
|
||||
methods.push_back(
|
||||
std::make_unique<MXMethod>(downstream, factory_context.getServerFactoryContext()));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -245,15 +262,15 @@ std::vector<DiscoveryMethodPtr> FilterConfig::buildDiscoveryMethods(
|
|||
std::vector<PropagationMethodPtr> FilterConfig::buildPropagationMethods(
|
||||
const Protobuf::RepeatedPtrField<io::istio::http::peer_metadata::Config::PropagationMethod>&
|
||||
config,
|
||||
Server::Configuration::FactoryContext& factory_context) const {
|
||||
bool downstream, Server::Configuration::FactoryContext& factory_context) const {
|
||||
std::vector<PropagationMethodPtr> methods;
|
||||
methods.reserve(config.size());
|
||||
for (const auto& method : config) {
|
||||
switch (method.method_specifier_case()) {
|
||||
case io::istio::http::peer_metadata::Config::PropagationMethod::MethodSpecifierCase::
|
||||
kIstioHeaders:
|
||||
methods.push_back(
|
||||
std::make_unique<MXPropagationMethod>(factory_context.getServerFactoryContext()));
|
||||
methods.push_back(std::make_unique<MXPropagationMethod>(
|
||||
downstream, factory_context.getServerFactoryContext(), method.istio_headers()));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -262,20 +279,20 @@ std::vector<PropagationMethodPtr> FilterConfig::buildPropagationMethods(
|
|||
return methods;
|
||||
}
|
||||
|
||||
void FilterConfig::discoverDownstream(StreamInfo::StreamInfo& info,
|
||||
Http::RequestHeaderMap& headers) const {
|
||||
discover(info, true, headers);
|
||||
void FilterConfig::discoverDownstream(StreamInfo::StreamInfo& info, Http::RequestHeaderMap& headers,
|
||||
Context& ctx) const {
|
||||
discover(info, true, headers, ctx);
|
||||
}
|
||||
|
||||
void FilterConfig::discoverUpstream(StreamInfo::StreamInfo& info,
|
||||
Http::ResponseHeaderMap& headers) const {
|
||||
discover(info, false, headers);
|
||||
void FilterConfig::discoverUpstream(StreamInfo::StreamInfo& info, Http::ResponseHeaderMap& headers,
|
||||
Context& ctx) const {
|
||||
discover(info, false, headers, ctx);
|
||||
}
|
||||
|
||||
void FilterConfig::discover(StreamInfo::StreamInfo& info, bool downstream,
|
||||
Http::HeaderMap& headers) const {
|
||||
void FilterConfig::discover(StreamInfo::StreamInfo& info, bool downstream, Http::HeaderMap& headers,
|
||||
Context& ctx) const {
|
||||
for (const auto& method : downstream ? downstream_discovery_ : upstream_discovery_) {
|
||||
const auto result = method->derivePeerInfo(info, headers);
|
||||
const auto result = method->derivePeerInfo(info, headers, ctx);
|
||||
if (result) {
|
||||
setFilterState(info, downstream, *result);
|
||||
break;
|
||||
|
@ -286,43 +303,74 @@ void FilterConfig::discover(StreamInfo::StreamInfo& info, bool downstream,
|
|||
}
|
||||
}
|
||||
|
||||
void FilterConfig::injectDownstream(Http::ResponseHeaderMap& headers) const {
|
||||
void FilterConfig::injectDownstream(const StreamInfo::StreamInfo& info,
|
||||
Http::ResponseHeaderMap& headers, Context& ctx) const {
|
||||
for (const auto& method : downstream_propagation_) {
|
||||
method->inject(headers);
|
||||
method->inject(info, headers, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void FilterConfig::injectUpstream(Http::RequestHeaderMap& headers) const {
|
||||
void FilterConfig::injectUpstream(const StreamInfo::StreamInfo& info,
|
||||
Http::RequestHeaderMap& headers, Context& ctx) const {
|
||||
for (const auto& method : upstream_propagation_) {
|
||||
method->inject(headers);
|
||||
method->inject(info, headers, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void FilterConfig::setFilterState(StreamInfo::StreamInfo& info, bool downstream,
|
||||
const std::string& value) const {
|
||||
auto node_info = std::make_unique<CelStateHashable>(CelPrototypes::get().NodeInfo);
|
||||
node_info->setValue(value);
|
||||
info.filterState()->setData(downstream ? WasmDownstreamPeer : WasmUpstreamPeer,
|
||||
std::move(node_info), StreamInfo::FilterState::StateType::Mutable,
|
||||
StreamInfo::FilterState::LifeSpan::FilterChain, sharedWithUpstream());
|
||||
const absl::string_view key = downstream ? WasmDownstreamPeer : WasmUpstreamPeer;
|
||||
if (!info.filterState()->hasDataWithName(key)) {
|
||||
auto node_info = std::make_unique<CelStateHashable>(CelPrototypes::get().NodeInfo);
|
||||
node_info->setValue(value);
|
||||
info.filterState()->setData(
|
||||
key, std::move(node_info), StreamInfo::FilterState::StateType::Mutable,
|
||||
StreamInfo::FilterState::LifeSpan::FilterChain, sharedWithUpstream());
|
||||
} else {
|
||||
ENVOY_LOG(debug, "Duplicate peer metadata, skipping");
|
||||
}
|
||||
// This is needed because stats filter awaits for the prefix on the wire and checks for the key
|
||||
// presence before emitting any telemetry.
|
||||
auto node_id = std::make_unique<Filters::Common::Expr::CelState>(CelPrototypes::get().NodeId);
|
||||
node_id->setValue("unknown");
|
||||
info.filterState()->setData(downstream ? WasmDownstreamPeerID : WasmUpstreamPeerID,
|
||||
std::move(node_id), StreamInfo::FilterState::StateType::Mutable,
|
||||
StreamInfo::FilterState::LifeSpan::FilterChain, sharedWithUpstream());
|
||||
const absl::string_view id_key = downstream ? WasmDownstreamPeerID : WasmUpstreamPeerID;
|
||||
if (!info.filterState()->hasDataWithName(id_key)) {
|
||||
auto node_id = std::make_unique<Filters::Common::Expr::CelState>(CelPrototypes::get().NodeId);
|
||||
node_id->setValue("unknown");
|
||||
info.filterState()->setData(
|
||||
id_key, std::move(node_id), StreamInfo::FilterState::StateType::Mutable,
|
||||
StreamInfo::FilterState::LifeSpan::FilterChain, sharedWithUpstream());
|
||||
} else {
|
||||
ENVOY_LOG(debug, "Duplicate peer id, skipping");
|
||||
}
|
||||
}
|
||||
|
||||
Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) {
|
||||
config_->discoverDownstream(decoder_callbacks_->streamInfo(), headers);
|
||||
config_->injectUpstream(headers);
|
||||
config_->discoverDownstream(decoder_callbacks_->streamInfo(), headers, ctx_);
|
||||
config_->injectUpstream(decoder_callbacks_->streamInfo(), headers, ctx_);
|
||||
return Http::FilterHeadersStatus::Continue;
|
||||
}
|
||||
|
||||
bool MXPropagationMethod::skipMXHeaders(const StreamInfo::StreamInfo& info) const {
|
||||
const auto& cluster_info = info.upstreamClusterInfo();
|
||||
if (cluster_info && cluster_info.value()) {
|
||||
const auto& cluster_name = cluster_info.value()->name();
|
||||
if (cluster_name == "PassthroughCluster") {
|
||||
return true;
|
||||
}
|
||||
const auto& filter_metadata = cluster_info.value()->metadata().filter_metadata();
|
||||
const auto& it = filter_metadata.find("istio");
|
||||
if (it != filter_metadata.end()) {
|
||||
const auto& skip_mx = it->second.fields().find("external");
|
||||
if (skip_mx != it->second.fields().end()) {
|
||||
return skip_mx->second.bool_value();
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) {
|
||||
config_->discoverUpstream(decoder_callbacks_->streamInfo(), headers);
|
||||
config_->injectDownstream(headers);
|
||||
config_->discoverUpstream(decoder_callbacks_->streamInfo(), headers, ctx_);
|
||||
config_->injectDownstream(decoder_callbacks_->streamInfo(), headers, ctx_);
|
||||
return Http::FilterHeadersStatus::Continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,44 +43,71 @@ using Headers = ConstSingleton<HeaderValues>;
|
|||
// Peer info in the flatbuffers format.
|
||||
using PeerInfo = std::string;
|
||||
|
||||
struct Context {
|
||||
bool request_peer_id_received_{false};
|
||||
bool request_peer_received_{false};
|
||||
};
|
||||
|
||||
// Base class for the discovery methods. First derivation wins but all methods perform removal.
|
||||
class DiscoveryMethod {
|
||||
public:
|
||||
virtual ~DiscoveryMethod() = default;
|
||||
virtual absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&,
|
||||
Http::HeaderMap&) const PURE;
|
||||
virtual absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&, Http::HeaderMap&,
|
||||
Context&) const PURE;
|
||||
virtual void remove(Http::HeaderMap&) const {}
|
||||
};
|
||||
|
||||
using DiscoveryMethodPtr = std::unique_ptr<DiscoveryMethod>;
|
||||
|
||||
class MXMethod : public DiscoveryMethod {
|
||||
public:
|
||||
MXMethod(bool downstream, Server::Configuration::ServerFactoryContext& factory_context);
|
||||
absl::optional<PeerInfo> derivePeerInfo(const StreamInfo::StreamInfo&, Http::HeaderMap&,
|
||||
Context&) const override;
|
||||
void remove(Http::HeaderMap&) const override;
|
||||
|
||||
private:
|
||||
absl::optional<PeerInfo> lookup(absl::string_view id, absl::string_view value) const;
|
||||
const bool downstream_;
|
||||
struct MXCache : public ThreadLocal::ThreadLocalObject {
|
||||
absl::flat_hash_map<std::string, std::string> cache_;
|
||||
};
|
||||
mutable ThreadLocal::TypedSlot<MXCache> tls_;
|
||||
const int64_t max_peer_cache_size_{500};
|
||||
};
|
||||
|
||||
// Base class for the propagation methods.
|
||||
class PropagationMethod {
|
||||
public:
|
||||
virtual ~PropagationMethod() = default;
|
||||
virtual void inject(Http::HeaderMap&) const PURE;
|
||||
virtual void inject(const StreamInfo::StreamInfo&, Http::HeaderMap&, Context&) const PURE;
|
||||
};
|
||||
|
||||
using PropagationMethodPtr = std::unique_ptr<PropagationMethod>;
|
||||
|
||||
class MXPropagationMethod : public PropagationMethod {
|
||||
public:
|
||||
MXPropagationMethod(Server::Configuration::ServerFactoryContext& factory_context);
|
||||
void inject(Http::HeaderMap&) const override;
|
||||
MXPropagationMethod(bool downstream, Server::Configuration::ServerFactoryContext& factory_context,
|
||||
const io::istio::http::peer_metadata::Config_IstioHeaders&);
|
||||
void inject(const StreamInfo::StreamInfo&, Http::HeaderMap&, Context&) const override;
|
||||
|
||||
private:
|
||||
const bool downstream_;
|
||||
std::string computeValue(Server::Configuration::ServerFactoryContext&) const;
|
||||
const std::string id_;
|
||||
std::string value_;
|
||||
const std::string value_;
|
||||
const bool skip_external_clusters_;
|
||||
bool skipMXHeaders(const StreamInfo::StreamInfo&) const;
|
||||
};
|
||||
|
||||
class FilterConfig {
|
||||
class FilterConfig : public Logger::Loggable<Logger::Id::filter> {
|
||||
public:
|
||||
FilterConfig(const io::istio::http::peer_metadata::Config&,
|
||||
Server::Configuration::FactoryContext&);
|
||||
void discoverDownstream(StreamInfo::StreamInfo&, Http::RequestHeaderMap&) const;
|
||||
void discoverUpstream(StreamInfo::StreamInfo&, Http::ResponseHeaderMap&) const;
|
||||
void injectDownstream(Http::ResponseHeaderMap&) const;
|
||||
void injectUpstream(Http::RequestHeaderMap&) const;
|
||||
void discoverDownstream(StreamInfo::StreamInfo&, Http::RequestHeaderMap&, Context&) const;
|
||||
void discoverUpstream(StreamInfo::StreamInfo&, Http::ResponseHeaderMap&, Context&) const;
|
||||
void injectDownstream(const StreamInfo::StreamInfo&, Http::ResponseHeaderMap&, Context&) const;
|
||||
void injectUpstream(const StreamInfo::StreamInfo&, Http::RequestHeaderMap&, Context&) const;
|
||||
|
||||
private:
|
||||
std::vector<DiscoveryMethodPtr> buildDiscoveryMethods(
|
||||
|
@ -88,13 +115,13 @@ private:
|
|||
bool downstream, Server::Configuration::FactoryContext&) const;
|
||||
std::vector<PropagationMethodPtr> buildPropagationMethods(
|
||||
const Protobuf::RepeatedPtrField<io::istio::http::peer_metadata::Config::PropagationMethod>&,
|
||||
Server::Configuration::FactoryContext&) const;
|
||||
bool downstream, Server::Configuration::FactoryContext&) const;
|
||||
StreamInfo::StreamSharingMayImpactPooling sharedWithUpstream() const {
|
||||
return shared_with_upstream_
|
||||
? StreamInfo::StreamSharingMayImpactPooling::SharedWithUpstreamConnectionOnce
|
||||
: StreamInfo::StreamSharingMayImpactPooling::None;
|
||||
}
|
||||
void discover(StreamInfo::StreamInfo&, bool downstream, Http::HeaderMap&) const;
|
||||
void discover(StreamInfo::StreamInfo&, bool downstream, Http::HeaderMap&, Context&) const;
|
||||
void setFilterState(StreamInfo::StreamInfo&, bool downstream, const std::string& value) const;
|
||||
const bool shared_with_upstream_;
|
||||
const std::vector<DiscoveryMethodPtr> downstream_discovery_;
|
||||
|
@ -113,6 +140,7 @@ public:
|
|||
|
||||
private:
|
||||
FilterConfigSharedPtr config_;
|
||||
Context ctx_;
|
||||
};
|
||||
|
||||
class FilterConfigFactory : public Common::FactoryBase<io::istio::http::peer_metadata::Config> {
|
||||
|
|
|
@ -316,6 +316,24 @@ constexpr absl::string_view SampleIstioHeader =
|
|||
"NWJjNzc4Ch8KDkFQUF9DT05UQUlORVJTEg0aC3Rlc3QsYm9uemFpChYKCU5BTUVTUEFDRRIJGgdkZWZhdWx0CjMKK1NUQU"
|
||||
"NLRFJJVkVSX01PTklUT1JJTkdfRVhQT1JUX0lOVEVSVkFMX1NFQ1MSBBoCMjA";
|
||||
|
||||
TEST(MXMethod, Cache) {
|
||||
NiceMock<Server::Configuration::MockServerFactoryContext> context;
|
||||
MXMethod method(true, context);
|
||||
NiceMock<StreamInfo::MockStreamInfo> stream_info;
|
||||
Http::TestRequestHeaderMapImpl request_headers;
|
||||
const int32_t max = 1000;
|
||||
for (int32_t run = 0; run < 3; run++) {
|
||||
for (int32_t i = 0; i < max; i++) {
|
||||
std::string id = absl::StrCat("test-", i);
|
||||
request_headers.setReference(Headers::get().ExchangeMetadataHeaderId, id);
|
||||
request_headers.setReference(Headers::get().ExchangeMetadataHeader, SampleIstioHeader);
|
||||
Context ctx;
|
||||
const auto result = method.derivePeerInfo(stream_info, request_headers, ctx);
|
||||
EXPECT_TRUE(result.has_value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(PeerMetadataTest, DownstreamMX) {
|
||||
request_headers_.setReference(Headers::get().ExchangeMetadataHeaderId, "test-pod");
|
||||
request_headers_.setReference(Headers::get().ExchangeMetadataHeader, SampleIstioHeader);
|
||||
|
@ -410,15 +428,31 @@ TEST_F(PeerMetadataTest, DownstreamMXPropagation) {
|
|||
- istio_headers: {}
|
||||
)EOF");
|
||||
EXPECT_EQ(0, request_headers_.size());
|
||||
EXPECT_EQ(2, response_headers_.size());
|
||||
EXPECT_EQ(0, response_headers_.size());
|
||||
checkNoPeer(true);
|
||||
checkNoPeer(false);
|
||||
}
|
||||
|
||||
TEST_F(PeerMetadataTest, DownstreamMXDiscoveryPropagation) {
|
||||
request_headers_.setReference(Headers::get().ExchangeMetadataHeaderId, "test-pod");
|
||||
request_headers_.setReference(Headers::get().ExchangeMetadataHeader, SampleIstioHeader);
|
||||
initialize(R"EOF(
|
||||
downstream_discovery:
|
||||
- istio_headers: {}
|
||||
downstream_propagation:
|
||||
- istio_headers: {}
|
||||
)EOF");
|
||||
EXPECT_EQ(0, request_headers_.size());
|
||||
EXPECT_EQ(2, response_headers_.size());
|
||||
checkPeerNamespace(true, "default");
|
||||
checkNoPeer(false);
|
||||
}
|
||||
|
||||
TEST_F(PeerMetadataTest, UpstreamMXPropagation) {
|
||||
initialize(R"EOF(
|
||||
upstream_propagation:
|
||||
- istio_headers: {}
|
||||
- istio_headers:
|
||||
skip_external_clusters: false
|
||||
)EOF");
|
||||
EXPECT_EQ(2, request_headers_.size());
|
||||
EXPECT_EQ(0, response_headers_.size());
|
||||
|
@ -426,6 +460,55 @@ TEST_F(PeerMetadataTest, UpstreamMXPropagation) {
|
|||
checkNoPeer(false);
|
||||
}
|
||||
|
||||
TEST_F(PeerMetadataTest, UpstreamMXPropagationSkipNoMatch) {
|
||||
initialize(R"EOF(
|
||||
upstream_propagation:
|
||||
- istio_headers:
|
||||
skip_external_clusters: true
|
||||
)EOF");
|
||||
EXPECT_EQ(2, request_headers_.size());
|
||||
EXPECT_EQ(0, response_headers_.size());
|
||||
checkNoPeer(true);
|
||||
checkNoPeer(false);
|
||||
}
|
||||
|
||||
TEST_F(PeerMetadataTest, UpstreamMXPropagationSkip) {
|
||||
std::shared_ptr<Upstream::MockClusterInfo> cluster_info_{
|
||||
std::make_shared<NiceMock<Upstream::MockClusterInfo>>()};
|
||||
auto metadata = TestUtility::parseYaml<envoy::config::core::v3::Metadata>(R"EOF(
|
||||
filter_metadata:
|
||||
istio:
|
||||
external: true
|
||||
)EOF");
|
||||
ON_CALL(stream_info_, upstreamClusterInfo()).WillByDefault(testing::Return(cluster_info_));
|
||||
ON_CALL(*cluster_info_, metadata()).WillByDefault(ReturnRef(metadata));
|
||||
initialize(R"EOF(
|
||||
upstream_propagation:
|
||||
- istio_headers:
|
||||
skip_external_clusters: true
|
||||
)EOF");
|
||||
EXPECT_EQ(0, request_headers_.size());
|
||||
EXPECT_EQ(0, response_headers_.size());
|
||||
checkNoPeer(true);
|
||||
checkNoPeer(false);
|
||||
}
|
||||
|
||||
TEST_F(PeerMetadataTest, UpstreamMXPropagationSkipPassthrough) {
|
||||
std::shared_ptr<Upstream::MockClusterInfo> cluster_info_{
|
||||
std::make_shared<NiceMock<Upstream::MockClusterInfo>>()};
|
||||
cluster_info_->name_ = "PassthroughCluster";
|
||||
ON_CALL(stream_info_, upstreamClusterInfo()).WillByDefault(testing::Return(cluster_info_));
|
||||
initialize(R"EOF(
|
||||
upstream_propagation:
|
||||
- istio_headers:
|
||||
skip_external_clusters: true
|
||||
)EOF");
|
||||
EXPECT_EQ(0, request_headers_.size());
|
||||
EXPECT_EQ(0, response_headers_.size());
|
||||
checkNoPeer(true);
|
||||
checkNoPeer(false);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace PeerMetadata
|
||||
} // namespace HttpFilters
|
||||
|
|
|
@ -69,7 +69,7 @@ MetadataExchangeConfig::MetadataExchangeConfig(const std::string& stat_prefix,
|
|||
: scope_(scope), stat_prefix_(stat_prefix), protocol_(protocol),
|
||||
filter_direction_(filter_direction), stats_(generateStats(stat_prefix, scope)) {}
|
||||
|
||||
Network::FilterStatus MetadataExchangeFilter::onData(Buffer::Instance& data, bool) {
|
||||
Network::FilterStatus MetadataExchangeFilter::onData(Buffer::Instance& data, bool end_stream) {
|
||||
switch (conn_state_) {
|
||||
case Invalid:
|
||||
FALLTHRU;
|
||||
|
@ -103,6 +103,14 @@ Network::FilterStatus MetadataExchangeFilter::onData(Buffer::Instance& data, boo
|
|||
case NeedMoreDataInitialHeader: {
|
||||
tryReadInitialProxyHeader(data);
|
||||
if (conn_state_ == NeedMoreDataInitialHeader) {
|
||||
if (end_stream) {
|
||||
// Upstream has entered a half-closed state, and will be sending no more data.
|
||||
// Since this plugin would expect additional headers, but none is forthcoming,
|
||||
// do not block the tcp_proxy downstream of us from draining the buffer.
|
||||
ENVOY_LOG(debug, "Upstream closed early, aborting istio-peer-exchange");
|
||||
conn_state_ = Invalid;
|
||||
return Network::FilterStatus::Continue;
|
||||
}
|
||||
return Network::FilterStatus::StopIteration;
|
||||
}
|
||||
if (conn_state_ == Invalid) {
|
||||
|
@ -303,10 +311,7 @@ void MetadataExchangeFilter::getMetadata(google::protobuf::Struct* metadata) {
|
|||
std::string MetadataExchangeFilter::getMetadataId() { return local_info_.node().id(); }
|
||||
|
||||
void MetadataExchangeFilter::setMetadataNotFoundFilterState() {
|
||||
auto key = config_->filter_direction_ == FilterDirection::Downstream
|
||||
? ::Wasm::Common::kDownstreamMetadataIdKey
|
||||
: ::Wasm::Common::kUpstreamMetadataIdKey;
|
||||
updatePeerId(toAbslStringView(key), ::Wasm::Common::kMetadataNotFoundValue);
|
||||
updatePeerId(::Wasm::Common::kMetadataNotFoundValue, ::Wasm::Common::kMetadataNotFoundValue);
|
||||
}
|
||||
|
||||
} // namespace MetadataExchange
|
||||
|
|
|
@ -61,7 +61,7 @@ func Get(port uint16, body string) *HTTPCall {
|
|||
}
|
||||
}
|
||||
|
||||
func (g *HTTPCall) Run(_ *Params) error {
|
||||
func (g *HTTPCall) Run(p *Params) error {
|
||||
url := fmt.Sprintf("http://127.0.0.1:%d%v", g.Port, g.Path)
|
||||
if g.Timeout == 0 {
|
||||
g.Timeout = DefaultTimeout
|
||||
|
@ -71,7 +71,11 @@ func (g *HTTPCall) Run(_ *Params) error {
|
|||
return err
|
||||
}
|
||||
for key, val := range g.RequestHeaders {
|
||||
req.Header.Add(key, val)
|
||||
header, err := p.Fill(val)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
req.Header.Add(key, header)
|
||||
}
|
||||
if len(g.Authority) > 0 {
|
||||
req.Host = g.Authority
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -131,3 +132,92 @@ func (t *TCPConnection) Run(p *Params) error {
|
|||
}
|
||||
|
||||
func (t *TCPConnection) Cleanup() {}
|
||||
|
||||
// TCPServerAcceptAndClose implements a TCP server
|
||||
// which accepts the data and then closes the connection
|
||||
// immediately without any response.
|
||||
//
|
||||
// The exception from this description is the "ping" data
|
||||
// which is handled differently for checking if the server
|
||||
// is already up.
|
||||
type TCPServerAcceptAndClose struct {
|
||||
lis net.Listener
|
||||
}
|
||||
|
||||
var _ Step = &TCPServerAcceptAndClose{}
|
||||
|
||||
func (t *TCPServerAcceptAndClose) Run(p *Params) error {
|
||||
var err error
|
||||
t.lis, err = net.Listen("tcp", fmt.Sprintf(":%d", p.Ports.BackendPort))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %v", err)
|
||||
}
|
||||
go t.serve()
|
||||
if err = waitForTCPServer(p.Ports.BackendPort); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TCPServerAcceptAndClose) Cleanup() {
|
||||
t.lis.Close()
|
||||
}
|
||||
|
||||
func (t *TCPServerAcceptAndClose) serve() {
|
||||
for {
|
||||
conn, err := t.lis.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go t.handleConnection(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TCPServerAcceptAndClose) handleConnection(conn net.Conn) {
|
||||
defer conn.Close()
|
||||
reader := bufio.NewReader(conn)
|
||||
bytes, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Println("failed to read data, err:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
bytes = strings.TrimSpace(bytes)
|
||||
if strings.HasSuffix(bytes, "ping") {
|
||||
log.Println("pinged - the TCP Server is available")
|
||||
_, _ = conn.Write([]byte("alive\n"))
|
||||
}
|
||||
log.Println("received data. Closing the connection")
|
||||
}
|
||||
|
||||
// InterceptedTCPConnection is a connection which expects
|
||||
// the terminated connection (before the timeout occurs)
|
||||
type InterceptedTCPConnection struct {
|
||||
ReadTimeout time.Duration
|
||||
}
|
||||
|
||||
var _ Step = &InterceptedTCPConnection{}
|
||||
|
||||
func (t *InterceptedTCPConnection) Run(p *Params) error {
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", p.Ports.ClientPort))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to tcp server: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
fmt.Fprintf(conn, "some data"+"\n")
|
||||
err = conn.SetReadDeadline(time.Now().Add(t.ReadTimeout))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set read deadline: %v", err)
|
||||
}
|
||||
|
||||
_, err = bufio.NewReader(conn).ReadString('\n')
|
||||
if err != io.EOF {
|
||||
return errors.New("the connection should be terminated")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *InterceptedTCPConnection) Cleanup() {}
|
||||
|
|
|
@ -43,8 +43,7 @@ func TestHTTPExchange(t *testing.T) {
|
|||
params := driver.NewTestParams(t, map[string]string{}, envoye2e.ProxyE2ETests)
|
||||
params.Vars["ClientMetadata"] = params.LoadTestData("testdata/client_node_metadata.json.tmpl")
|
||||
params.Vars["ServerMetadata"] = params.LoadTestData("testdata/server_node_metadata.json.tmpl")
|
||||
params.Vars["ServerHTTPFilters"] = params.LoadTestData("testdata/filters/mx_inbound.yaml.tmpl")
|
||||
params.Vars["ClientHTTPFilters"] = params.LoadTestData("testdata/filters/mx_outbound.yaml.tmpl")
|
||||
params.Vars["ServerHTTPFilters"] = params.LoadTestData("testdata/filters/mx_native_inbound.yaml.tmpl")
|
||||
if err := (&driver.Scenario{
|
||||
Steps: []driver.Step{
|
||||
&driver.XDS{},
|
||||
|
@ -87,3 +86,41 @@ func TestHTTPExchange(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNativeHTTPExchange(t *testing.T) {
|
||||
params := driver.NewTestParams(t, map[string]string{}, envoye2e.ProxyE2ETests)
|
||||
params.Vars["ServerMetadata"] = params.LoadTestData("testdata/server_node_metadata.json.tmpl")
|
||||
params.Vars["ServerHTTPFilters"] = params.LoadTestData("testdata/filters/mx_native_inbound.yaml.tmpl")
|
||||
// TCP MX should not break HTTP MX when there is no TCP prefix or TCP MX ALPN.
|
||||
params.Vars["ServerNetworkFilters"] = params.LoadTestData("testdata/filters/server_mx_network_filter.yaml.tmpl")
|
||||
metadata := EncodeMetadata(t, params)
|
||||
if err := (&driver.Scenario{
|
||||
Steps: []driver.Step{
|
||||
&driver.XDS{},
|
||||
&driver.Update{Node: "server", Version: "0", Listeners: []string{driver.LoadTestData("testdata/listener/server.yaml.tmpl")}},
|
||||
&driver.Envoy{Bootstrap: params.LoadTestData("testdata/bootstrap/server.yaml.tmpl"), Concurrency: 2},
|
||||
&driver.Sleep{Duration: 1 * time.Second},
|
||||
&driver.Repeat{
|
||||
// Must be high enough to exercise cache eviction.
|
||||
N: 1000,
|
||||
Step: &driver.HTTPCall{
|
||||
Port: params.Ports.ServerPort,
|
||||
Body: "hello, world!",
|
||||
RequestHeaders: map[string]string{
|
||||
"x-envoy-peer-metadata-id": "client{{ .N }}",
|
||||
"x-envoy-peer-metadata": metadata,
|
||||
},
|
||||
ResponseHeaders: map[string]string{
|
||||
"x-envoy-peer-metadata-id": "server",
|
||||
"x-envoy-peer-metadata": driver.Any,
|
||||
},
|
||||
},
|
||||
},
|
||||
&driver.Stats{AdminPort: params.Ports.ServerAdmin, Matchers: map[string]driver.StatMatcher{
|
||||
"envoy_server_envoy_bug_failures": &driver.ExactStat{Metric: "testdata/metric/envoy_bug_failures.yaml"},
|
||||
}},
|
||||
},
|
||||
}).Run(params); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ func init() {
|
|||
"TestPassthroughCONNECT/quic",
|
||||
"TestPassthroughCONNECT/h2",
|
||||
"TestHTTPExchange",
|
||||
"TestNativeHTTPExchange",
|
||||
"TestHTTPLocalRatelimit",
|
||||
"TestStackdriverAccessLog/AllClientErrorRequestsGetsLoggedOnNoMxAndError",
|
||||
"TestStackdriverAccessLog/AllErrorRequestsGetsLogged",
|
||||
|
@ -89,6 +90,7 @@ func init() {
|
|||
"TestStatsExpiry",
|
||||
"TestTCPMetadataExchange",
|
||||
"TestTCPMetadataExchangeNoAlpn",
|
||||
"TestTCPMetadataExchangeWithConnectionTermination",
|
||||
"TestOtelPayload",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -127,3 +127,52 @@ func TestTCPMetadataExchangeNoAlpn(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTCPMetadataExchangeWithConnectionTermination(t *testing.T) {
|
||||
params := driver.NewTestParams(t, map[string]string{
|
||||
"DisableDirectResponse": "true",
|
||||
"AlpnProtocol": "mx-protocol",
|
||||
"StatsConfig": driver.LoadTestData("testdata/bootstrap/stats.yaml.tmpl"),
|
||||
}, envoye2e.ProxyE2ETests)
|
||||
params.Vars["ClientMetadata"] = params.LoadTestData("testdata/client_node_metadata.json.tmpl")
|
||||
params.Vars["ServerMetadata"] = params.LoadTestData("testdata/server_node_metadata.json.tmpl")
|
||||
params.Vars["ServerNetworkFilters"] = params.LoadTestData("testdata/filters/server_stats_network_filter.yaml.tmpl")
|
||||
params.Vars["ClientUpstreamFilters"] = params.LoadTestData("testdata/filters/client_mx_network_filter.yaml.tmpl")
|
||||
params.Vars["ClientNetworkFilters"] = params.LoadTestData("testdata/filters/server_mx_network_filter.yaml.tmpl") + "\n" +
|
||||
params.LoadTestData("testdata/filters/client_stats_network_filter.yaml.tmpl")
|
||||
params.Vars["ClientClusterTLSContext"] = params.LoadTestData("testdata/transport_socket/client.yaml.tmpl")
|
||||
params.Vars["ServerListenerTLSContext"] = params.LoadTestData("testdata/transport_socket/server.yaml.tmpl")
|
||||
|
||||
if err := (&driver.Scenario{
|
||||
Steps: []driver.Step{
|
||||
&driver.XDS{},
|
||||
&driver.Update{
|
||||
Node: "client",
|
||||
Version: "0",
|
||||
Clusters: []string{params.LoadTestData("testdata/cluster/tcp_client.yaml.tmpl")},
|
||||
Listeners: []string{params.LoadTestData("testdata/listener/tcp_client.yaml.tmpl")},
|
||||
},
|
||||
&driver.Update{
|
||||
Node: "server",
|
||||
Version: "0",
|
||||
Clusters: []string{params.LoadTestData("testdata/cluster/tcp_server.yaml.tmpl")},
|
||||
Listeners: []string{params.LoadTestData("testdata/listener/tcp_server.yaml.tmpl")},
|
||||
},
|
||||
&driver.Envoy{Bootstrap: params.LoadTestData("testdata/bootstrap/client.yaml.tmpl")},
|
||||
&driver.Envoy{Bootstrap: params.LoadTestData("testdata/bootstrap/server.yaml.tmpl")},
|
||||
&driver.Sleep{Duration: 1 * time.Second},
|
||||
&driver.TCPServerAcceptAndClose{},
|
||||
&driver.Repeat{
|
||||
N: 10,
|
||||
Step: &driver.InterceptedTCPConnection{
|
||||
ReadTimeout: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
&driver.Stats{AdminPort: params.Ports.ServerAdmin, Matchers: map[string]driver.StatMatcher{
|
||||
"istio_tcp_connections_opened_total": &driver.ExactStat{Metric: "testdata/metric/tcp_server_connection_open_without_mx.yaml.tmpl"},
|
||||
}},
|
||||
},
|
||||
}).Run(params); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
name: envoy_server_envoy_bug_failures
|
||||
type: COUNTER
|
||||
metric:
|
||||
- counter:
|
||||
value: 0
|
Loading…
Reference in New Issue