Compare commits
97 Commits
Author | SHA1 | Date |
---|---|---|
|
ac67367eb9 | |
|
ec120597bd | |
|
2837b0f410 | |
|
6640774d9f | |
|
4f50f8403b | |
|
b102502cbd | |
|
d58e82441f | |
|
c555eaa812 | |
|
85a94b6cc4 | |
|
3fa6335035 | |
|
dfa3b58bbc | |
|
c2d2534edb | |
|
84f0e52e64 | |
|
f030073f2f | |
|
3233bb1017 | |
|
7df8cf5d08 | |
|
7cddb868e9 | |
|
ac477c15a8 | |
|
5d0352588c | |
|
b86fd9989b | |
|
facd9a28a0 | |
|
224b2c34ac | |
|
c52e0bbdbf | |
|
442923910b | |
|
c616a29092 | |
|
d6d3b606ed | |
|
8d9a56a416 | |
|
615277a05a | |
|
3d1223af09 | |
|
79dfd10249 | |
|
9f6ae51005 | |
|
46acf76463 | |
|
58cf2a0f94 | |
|
9c01d1276d | |
|
d9ea32ce21 | |
|
c96dd032da | |
|
903cf079de | |
|
ad8bea43ef | |
|
6eaa32e8ac | |
|
3470f4bba2 | |
|
93a0973175 | |
|
b8dddb7301 | |
|
ab0cf4c9d8 | |
|
c29cd78a1f | |
|
9a12c0e0da | |
|
e11dea1ab7 | |
|
49f36fa1d5 | |
|
903aab1408 | |
|
dddd5ad653 | |
|
3b04b341ee | |
|
ec70da292a | |
|
5b28370c4b | |
|
4a782ece35 | |
|
980bbfd988 | |
|
576e8a4964 | |
|
01947fb5b3 | |
|
860e2868d5 | |
|
907e1b9899 | |
|
008fb73c19 | |
|
ecfc76d62f | |
|
7e033a762f | |
|
dc54d850a7 | |
|
1b39642fcb | |
|
a665e0807c | |
|
4012c20899 | |
|
9bf0c748c0 | |
|
568d11af44 | |
|
9004c2341d | |
|
9f43a67b94 | |
|
681ddf02cc | |
|
c98f8212b7 | |
|
15f9533afb | |
|
7f147c658f | |
|
4f36708363 | |
|
145b4a0808 | |
|
9e3c8cff67 | |
|
e52cc43c88 | |
|
5532e0dcc9 | |
|
7b126874d8 | |
|
b3c12afd5c | |
|
e57b6ebd28 | |
|
73953985ad | |
|
09df8e9a6f | |
|
77e1f1bb2c | |
|
6be903360c | |
|
10c8745a41 | |
|
0df8081020 | |
|
87aa1e934c | |
|
07ebb76ffa | |
|
b2939cd576 | |
|
6be9f7d911 | |
|
a23735f2c0 | |
|
8df7e88e58 | |
|
0256664b0e | |
|
f3f6c0ed7a | |
|
4c49bb83c4 | |
|
2a9d716bfa |
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "istio build-tools",
|
"name": "istio build-tools",
|
||||||
"image": "gcr.io/istio-testing/build-tools:master-6bfe0028e941afdae35a3c5d4374bc08e3c04153",
|
"image": "gcr.io/istio-testing/build-tools:master-672e6089ff843019a2b28cf9e87754c7b74358ea",
|
||||||
"privileged": true,
|
"privileged": true,
|
||||||
"remoteEnv": {
|
"remoteEnv": {
|
||||||
"USE_GKE_GCLOUD_AUTH_PLUGIN": "True",
|
"USE_GKE_GCLOUD_AUTH_PLUGIN": "True",
|
||||||
|
|
|
@ -27,3 +27,8 @@ The three admin ports (Readiness, Admin, and Metrics) are intentionally split.
|
||||||
* The admin port must be only on localhost, and it should be on the admin thread for isolation
|
* The admin port must be only on localhost, and it should be on the admin thread for isolation
|
||||||
* The metrics port should be on the admin thread to avoid isolation.
|
* The metrics port should be on the admin thread to avoid isolation.
|
||||||
This *could* be on the readiness port, but historically we had found that the stats query can be very expensive and lead to tail latencies in the data plane.
|
This *could* be on the readiness port, but historically we had found that the stats query can be very expensive and lead to tail latencies in the data plane.
|
||||||
|
|
||||||
|
**NOTE** Networking policy must allow inbound and outbound traffic on port 15008 for all application pods, for the ambient mesh to function.
|
||||||
|
The other ports are not relevant for pod-to-pod communication within the ambient mesh, and are only used for traffic redirection and categorization
|
||||||
|
within the application pod's network namespace, or for metrics/readiness scraping of the ztunnel pod itself.
|
||||||
|
See the Istio documentation [Ambient and Kubernetes NetworkPolicy](https://istio.io/latest/docs/ambient/usage/networkpolicy/) for more details.
|
||||||
|
|
60
Cargo.toml
|
@ -1,14 +1,16 @@
|
||||||
[package]
|
[package]
|
||||||
name = "ztunnel"
|
name = "ztunnel"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
rust-version = "1.77"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["tls-ring"]
|
default = ["tls-aws-lc"]
|
||||||
jemalloc = ["dep:tikv-jemallocator", "dep:jemalloc_pprof"]
|
jemalloc = ["dep:tikv-jemallocator", "dep:jemalloc_pprof"]
|
||||||
tls-boring = ["dep:boring", "dep:boring-sys", "boring-rustls-provider/fips-only"]
|
tls-boring = ["dep:boring", "dep:boring-sys", "boring-rustls-provider/fips-only"]
|
||||||
tls-ring = ["dep:ring", "rustls/ring", "tokio-rustls/ring", "hyper-rustls/ring", "dep:rcgen"]
|
tls-ring = ["dep:ring", "rustls/ring", "tokio-rustls/ring", "hyper-rustls/ring", "dep:rcgen"]
|
||||||
|
tls-aws-lc = ["dep:ring", "rustls/aws_lc_rs", "tokio-rustls/aws_lc_rs", "hyper-rustls/aws-lc-rs", "dep:rcgen", "rcgen/aws_lc_rs"]
|
||||||
|
tls-openssl = ["dep:rustls-openssl", "dep:openssl" ]
|
||||||
testing = ["dep:rcgen", "rcgen/x509-parser"] # Enables utilities supporting tests.
|
testing = ["dep:rcgen", "rcgen/x509-parser"] # Enables utilities supporting tests.
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -37,43 +39,48 @@ boring-sys = { version = "4", optional = true }
|
||||||
# Enabled with 'tls-ring'
|
# Enabled with 'tls-ring'
|
||||||
ring = { version = "0.17", optional = true }
|
ring = { version = "0.17", optional = true }
|
||||||
|
|
||||||
|
# Enabled with 'tls-openssl'
|
||||||
|
rustls-openssl = { version = "0.2", optional = true }
|
||||||
|
openssl = { version = "0.10", optional = true }
|
||||||
|
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
base64 = "0.22"
|
base64 = "0.22"
|
||||||
byteorder = "1.5"
|
byteorder = "1.5"
|
||||||
bytes = { version = "1.9", features = ["serde"] }
|
bytes = { version = "1.10", features = ["serde"] }
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
duration-str = "0.11"
|
duration-str = "0.17"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-core = "0.3"
|
futures-core = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
jemalloc_pprof = { version = "0.6.0", optional = true }
|
jemalloc_pprof = { version = "0.6.0", optional = true }
|
||||||
tikv-jemallocator = { version = "0.6.0", features = ["profiling", "unprefixed_malloc_on_supported_platforms"], optional = true }
|
tikv-jemallocator = { version = "0.6.0", features = ["profiling", "unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||||
hashbrown = "0.15"
|
hashbrown = "0.15"
|
||||||
hickory-client = "0.24"
|
hickory-client = "0.25"
|
||||||
hickory-proto = "0.24"
|
hickory-proto = "0.25"
|
||||||
hickory-resolver = "0.24"
|
hickory-resolver = "0.25"
|
||||||
hickory-server = { version = "0.24", features = [ "hickory-resolver" ] }
|
hickory-server = { version = "0.25", features = [ "resolver" ]}
|
||||||
http-body = { package = "http-body", version = "1" }
|
http-body = { package = "http-body", version = "1" }
|
||||||
http-body-util = "0.1"
|
http-body-util = "0.1"
|
||||||
hyper = { version = "1.5", features = ["full"] }
|
hyper = { version = "1.6", features = ["full"] }
|
||||||
hyper-rustls = { version = "0.27.0", default-features = false, features = ["logging", "http1", "http2"] }
|
hyper-rustls = { version = "0.27.0", default-features = false, features = ["logging", "http1", "http2"] }
|
||||||
hyper-util = { version = "0.1", features = ["full"] }
|
hyper-util = { version = "0.1", features = ["full"] }
|
||||||
ipnet = { version = "2.9", features = ["serde"] }
|
ipnet = { version = "2.11", features = ["serde"] }
|
||||||
itertools = "0.13"
|
itertools = "0.14"
|
||||||
keyed_priority_queue = "0.4"
|
keyed_priority_queue = "0.4"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
nix = { version = "0.29", features = ["socket", "sched", "uio", "fs", "ioctl", "user", "net", "mount"] }
|
nix = { version = "0.29", features = ["socket", "sched", "uio", "fs", "ioctl", "user", "net", "mount", "resource" ] }
|
||||||
once_cell = "1.19"
|
once_cell = "1.21"
|
||||||
ppp = "2.2"
|
num_cpus = "1.16"
|
||||||
|
ppp = "2.3"
|
||||||
prometheus-client = { version = "0.23" }
|
prometheus-client = { version = "0.23" }
|
||||||
prometheus-parse = "0.2"
|
prometheus-parse = "0.2"
|
||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
prost-types = "0.13"
|
prost-types = "0.13"
|
||||||
rand = { version = "0.8" , features = ["small_rng"]}
|
rand = { version = "0.9" , features = ["small_rng"]}
|
||||||
rcgen = { version = "0.13", optional = true, features = ["pem"] }
|
rcgen = { version = "0.14", optional = true, features = ["pem"] }
|
||||||
rustls = { version = "0.23", default-features = false }
|
rustls = { version = "0.23", default-features = false }
|
||||||
rustls-native-certs = "0.8"
|
rustls-native-certs = "0.8"
|
||||||
rustls-pemfile = "2.2"
|
rustls-pemfile = "2.2"
|
||||||
|
@ -83,36 +90,36 @@ serde_yaml = "0.9"
|
||||||
socket2 = { version = "0.5", features = ["all"] }
|
socket2 = { version = "0.5", features = ["all"] }
|
||||||
textnonce = { version = "1.0" }
|
textnonce = { version = "1.0" }
|
||||||
thiserror = "2.0"
|
thiserror = "2.0"
|
||||||
tls-listener = { version = "0.10" }
|
tls-listener = { version = "0.11" }
|
||||||
tokio = { version = "1.42", features = ["full", "test-util"] }
|
tokio = { version = "1.44", features = ["full", "test-util"] }
|
||||||
tokio-rustls = { version = "0.26", default-features = false }
|
tokio-rustls = { version = "0.26", default-features = false }
|
||||||
tokio-stream = { version = "0.1", features = ["net"] }
|
tokio-stream = { version = "0.1", features = ["net"] }
|
||||||
tonic = { version = "0.12", default-features = false, features = ["prost", "codegen"] }
|
tonic = { version = "0.13", default-features = false, features = ["prost", "codegen"] }
|
||||||
tower = { version = "0.5", features = ["full"] }
|
tower = { version = "0.5", features = ["full"] }
|
||||||
tracing = { version = "0.1"}
|
tracing = { version = "0.1"}
|
||||||
tracing-subscriber = { version = "0.3", features = ["registry", "env-filter", "json"] }
|
tracing-subscriber = { version = "0.3", features = ["registry", "env-filter", "json"] }
|
||||||
url = "2.5"
|
url = "2.5"
|
||||||
x509-parser = { version = "0.16", default-features = false }
|
x509-parser = { version = "0.17", default-features = false }
|
||||||
tracing-log = "0.2"
|
tracing-log = "0.2"
|
||||||
backoff = "0.4"
|
backoff = "0.4"
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
pingora-pool = "0.4"
|
pingora-pool = "0.4"
|
||||||
flurry = "0.5"
|
flurry = "0.5"
|
||||||
h2 = "0.4"
|
h2 = "0.4"
|
||||||
http = "1.2"
|
http = "1.3"
|
||||||
split-iter = "0.1"
|
split-iter = "0.1"
|
||||||
arcstr = { version = "1.2", features = ["serde"] }
|
arcstr = { version = "1.2", features = ["serde"] }
|
||||||
tracing-core = "0.1"
|
tracing-core = "0.1"
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
tokio-util = { version = "0.7", features = ["io-util"] }
|
tokio-util = { version = "0.7", features = ["io-util"] }
|
||||||
educe = "0.6.0"
|
educe = "0.6"
|
||||||
|
|
||||||
[target.'cfg(target_os = "linux")'.dependencies]
|
[target.'cfg(target_os = "linux")'.dependencies]
|
||||||
netns-rs = "0.1"
|
netns-rs = "0.1"
|
||||||
pprof = { version = "0.14", features = ["protobuf", "protobuf-codec", "criterion"] }
|
pprof = { version = "0.14", features = ["protobuf", "protobuf-codec", "criterion"] }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tonic-build = { version = "0.12", default-features = false, features = ["prost"] }
|
tonic-build = { version = "0.13", default-features = false, features = ["prost"] }
|
||||||
prost-build = "0.13"
|
prost-build = "0.13"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
rustc_version = "0.4"
|
rustc_version = "0.4"
|
||||||
|
@ -145,9 +152,10 @@ diff = "0.1"
|
||||||
local-ip-address = "0.6"
|
local-ip-address = "0.6"
|
||||||
matches = "0.1"
|
matches = "0.1"
|
||||||
test-case = "3.3"
|
test-case = "3.3"
|
||||||
oid-registry = "0.7"
|
oid-registry = "0.8"
|
||||||
rcgen = { version = "0.13", features = ["pem", "x509-parser"] }
|
rcgen = { version = "0.13", features = ["pem", "x509-parser"] }
|
||||||
ctor = "0.2"
|
x509-parser = { version = "0.17", default-features = false, features = ["verify"] }
|
||||||
|
ctor = "0.4"
|
||||||
|
|
||||||
[lints.clippy]
|
[lints.clippy]
|
||||||
# This rule makes code more confusing
|
# This rule makes code more confusing
|
||||||
|
|
|
@ -3,6 +3,10 @@ include common/Makefile.common.mk
|
||||||
FEATURES ?=
|
FEATURES ?=
|
||||||
ifeq ($(TLS_MODE), boring)
|
ifeq ($(TLS_MODE), boring)
|
||||||
FEATURES:=--no-default-features -F tls-boring
|
FEATURES:=--no-default-features -F tls-boring
|
||||||
|
else ifeq ($(TLS_MODE), aws-lc)
|
||||||
|
FEATURES:=--no-default-features -F tls-aws-lc
|
||||||
|
else ifeq ($(TLS_MODE), openssl)
|
||||||
|
FEATURES:=--no-default-features -F tls-openssl
|
||||||
endif
|
endif
|
||||||
|
|
||||||
test:
|
test:
|
||||||
|
@ -21,6 +25,8 @@ inpodserver:
|
||||||
# Test that all important features build
|
# Test that all important features build
|
||||||
check-features:
|
check-features:
|
||||||
cargo check --no-default-features -F tls-boring
|
cargo check --no-default-features -F tls-boring
|
||||||
|
cargo check --no-default-features -F tls-aws-lc
|
||||||
|
cargo check --no-default-features -F tls-openssl
|
||||||
cargo check -F jemalloc
|
cargo check -F jemalloc
|
||||||
(cd fuzz; RUSTFLAGS="--cfg fuzzing" cargo check)
|
(cd fuzz; RUSTFLAGS="--cfg fuzzing" cargo check)
|
||||||
|
|
||||||
|
|
|
@ -3,5 +3,7 @@ BUILD_WITH_CONTAINER ?= 1
|
||||||
# Namespaced tests need sys_admin due to docker being overly restrictive (https://github.com/moby/moby/issues/42441)
|
# Namespaced tests need sys_admin due to docker being overly restrictive (https://github.com/moby/moby/issues/42441)
|
||||||
# Ironically, this gives the container more privilege than is required without.
|
# Ironically, this gives the container more privilege than is required without.
|
||||||
DOCKER_RUN_OPTIONS += --privileged
|
DOCKER_RUN_OPTIONS += --privileged
|
||||||
|
ifeq ($(OS), Linux)
|
||||||
DOCKER_RUN_OPTIONS += -v /fake/path/does/not/exist:/var/run/netns
|
DOCKER_RUN_OPTIONS += -v /fake/path/does/not/exist:/var/run/netns
|
||||||
|
endif
|
||||||
DOCKER_RUN_OPTIONS += -v /dev/null:/run/xtables.lock
|
DOCKER_RUN_OPTIONS += -v /dev/null:/run/xtables.lock
|
||||||
|
|
18
README.md
|
@ -34,10 +34,12 @@ Ztunnel's TLS is built on [rustls](https://github.com/rustls/rustls).
|
||||||
|
|
||||||
Rustls has support for plugging in various crypto providers to meet various needs (compliance, performance, etc).
|
Rustls has support for plugging in various crypto providers to meet various needs (compliance, performance, etc).
|
||||||
|
|
||||||
| Name | How To Enable |
|
| Name | How To Enable |
|
||||||
|-----------------------------------------------|------------------------------------------------|
|
|----------------------------------------------------|------------------------------------------------|
|
||||||
| [ring](https://github.com/briansmith/ring/) | Default (or `--features tls-ring`) |
|
| [aws-lc](https://github.com/aws/aws-lc-rs) | Default (or `--features tls-aws-lc`) |
|
||||||
| [boring](https://github.com/cloudflare/boring) | `--features tls-boring --no-default-features` |
|
| [ring](https://github.com/briansmith/ring/) | `--features tls-ring --no-default-features` |
|
||||||
|
| [boring](https://github.com/cloudflare/boring) | `--features tls-boring --no-default-features` |
|
||||||
|
| [openssl](https://github.com/tofay/rustls-openssl) | `--features tls-openssl --no-default-features` |
|
||||||
|
|
||||||
In all options, only TLS 1.3 with cipher suites `TLS13_AES_256_GCM_SHA384` and `TLS13_AES_128_GCM_SHA256` is used.
|
In all options, only TLS 1.3 with cipher suites `TLS13_AES_256_GCM_SHA384` and `TLS13_AES_128_GCM_SHA256` is used.
|
||||||
|
|
||||||
|
@ -65,15 +67,15 @@ To use these vendored libraries and build ztunnel for either of these OS/arch co
|
||||||
##### For linux/x86_64
|
##### For linux/x86_64
|
||||||
|
|
||||||
``` toml
|
``` toml
|
||||||
BORING_BSSL_PATH = { value = "vendor/boringssl-fips/linux_x86_64", force = true, relative = true }
|
BORING_BSSL_FIPS_PATH = { value = "vendor/boringssl-fips/linux_x86_64", force = true, relative = true }
|
||||||
BORING_BSSL_INCLUDE_PATH = { value = "vendor/boringssl-fips/include/", force = true, relative = true }
|
BORING_BSSL_FIPS_INCLUDE_PATH = { value = "vendor/boringssl-fips/include/", force = true, relative = true }
|
||||||
```
|
```
|
||||||
|
|
||||||
##### For linux/arm64
|
##### For linux/arm64
|
||||||
|
|
||||||
``` toml
|
``` toml
|
||||||
BORING_BSSL_PATH = { value = "vendor/boringssl-fips/linux_arm64", force = true, relative = true }
|
BORING_BSSL_FIPS_PATH = { value = "vendor/boringssl-fips/linux_arm64", force = true, relative = true }
|
||||||
BORING_BSSL_INCLUDE_PATH = { value = "vendor/boringssl-fips/include/", force = true, relative = true }
|
BORING_BSSL_FIPS_INCLUDE_PATH = { value = "vendor/boringssl-fips/include/", force = true, relative = true }
|
||||||
```
|
```
|
||||||
|
|
||||||
Once that's done, you should be able to build:
|
Once that's done, you should be able to build:
|
||||||
|
|
123
benches/basic.rs
|
@ -12,15 +12,26 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use criterion::{criterion_group, criterion_main, Criterion};
|
use criterion::{Criterion, Throughput, criterion_group, criterion_main};
|
||||||
|
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||||
use pprof::criterion::{Output, PProfProfiler};
|
use pprof::criterion::{Output, PProfProfiler};
|
||||||
|
use prometheus_client::registry::Registry;
|
||||||
use ztunnel::state::ProxyState;
|
use tokio::runtime::Runtime;
|
||||||
|
use ztunnel::state::workload::Workload;
|
||||||
|
use ztunnel::state::{DemandProxyState, ProxyState, ServiceResolutionMode};
|
||||||
use ztunnel::strng;
|
use ztunnel::strng;
|
||||||
use ztunnel::xds::ProxyStateUpdateMutator;
|
use ztunnel::xds::ProxyStateUpdateMutator;
|
||||||
|
use ztunnel::xds::istio::workload::LoadBalancing;
|
||||||
|
use ztunnel::xds::istio::workload::Port;
|
||||||
|
use ztunnel::xds::istio::workload::Service as XdsService;
|
||||||
|
use ztunnel::xds::istio::workload::Workload as XdsWorkload;
|
||||||
|
use ztunnel::xds::istio::workload::load_balancing;
|
||||||
|
use ztunnel::xds::istio::workload::{NetworkAddress as XdsNetworkAddress, PortList};
|
||||||
|
|
||||||
pub fn xds(c: &mut Criterion) {
|
pub fn xds(c: &mut Criterion) {
|
||||||
use ztunnel::xds::istio::workload::Port;
|
use ztunnel::xds::istio::workload::Port;
|
||||||
|
@ -76,12 +87,116 @@ pub fn xds(c: &mut Criterion) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn load_balance(c: &mut Criterion) {
|
||||||
|
let mut c = c.benchmark_group("load_balance");
|
||||||
|
c.throughput(Throughput::Elements(1));
|
||||||
|
c.measurement_time(Duration::from_secs(5));
|
||||||
|
let mut run = move |name, wl_count, lb: Option<LoadBalancing>| {
|
||||||
|
let (rt, demand, src_wl, svc_addr) = build_load_balancer(wl_count, lb.clone());
|
||||||
|
c.bench_function(name, move |b| {
|
||||||
|
b.to_async(&rt).iter(|| async {
|
||||||
|
demand
|
||||||
|
.fetch_upstream(
|
||||||
|
"".into(),
|
||||||
|
&src_wl,
|
||||||
|
svc_addr,
|
||||||
|
ServiceResolutionMode::Standard,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
|
});
|
||||||
|
};
|
||||||
|
run("basic-10", 10, None);
|
||||||
|
run("basic-1000", 1000, None);
|
||||||
|
run("basic-10000", 10000, None);
|
||||||
|
let locality = Some(LoadBalancing {
|
||||||
|
routing_preference: vec![
|
||||||
|
load_balancing::Scope::Network as i32,
|
||||||
|
load_balancing::Scope::Region as i32,
|
||||||
|
load_balancing::Scope::Zone as i32,
|
||||||
|
load_balancing::Scope::Subzone as i32,
|
||||||
|
],
|
||||||
|
mode: load_balancing::Mode::Failover as i32,
|
||||||
|
health_policy: 0,
|
||||||
|
});
|
||||||
|
run("locality-10", 10, locality.clone());
|
||||||
|
run("locality-1000", 1000, locality.clone());
|
||||||
|
run("locality-10000", 10000, locality.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_load_balancer(
|
||||||
|
wl_count: usize,
|
||||||
|
load_balancing: Option<LoadBalancing>,
|
||||||
|
) -> (Runtime, DemandProxyState, Arc<Workload>, SocketAddr) {
|
||||||
|
let svc = XdsService {
|
||||||
|
hostname: "example.com".to_string(),
|
||||||
|
addresses: vec![XdsNetworkAddress {
|
||||||
|
network: "".to_string(),
|
||||||
|
address: vec![127, 0, 0, 3],
|
||||||
|
}],
|
||||||
|
ports: vec![Port {
|
||||||
|
service_port: 80,
|
||||||
|
target_port: 0,
|
||||||
|
}],
|
||||||
|
load_balancing,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let mut state = ProxyState::new(None);
|
||||||
|
let updater = ProxyStateUpdateMutator::new_no_fetch();
|
||||||
|
updater.insert_service(&mut state, svc).unwrap();
|
||||||
|
for i in 0..wl_count {
|
||||||
|
updater
|
||||||
|
.insert_workload(
|
||||||
|
&mut state,
|
||||||
|
XdsWorkload {
|
||||||
|
uid: format!("cluster1//v1/Pod/default/{i}"),
|
||||||
|
addresses: vec![Bytes::copy_from_slice(&[
|
||||||
|
127,
|
||||||
|
0,
|
||||||
|
(i / 255) as u8,
|
||||||
|
(i % 255) as u8,
|
||||||
|
])],
|
||||||
|
services: std::collections::HashMap::from([(
|
||||||
|
"/example.com".to_string(),
|
||||||
|
PortList {
|
||||||
|
ports: vec![Port {
|
||||||
|
service_port: 80,
|
||||||
|
target_port: 1234,
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
)]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
let mut registry = Registry::default();
|
||||||
|
let metrics = Arc::new(ztunnel::proxy::Metrics::new(&mut registry));
|
||||||
|
let demand = DemandProxyState::new(
|
||||||
|
Arc::new(RwLock::new(state)),
|
||||||
|
None,
|
||||||
|
ResolverConfig::default(),
|
||||||
|
ResolverOpts::default(),
|
||||||
|
metrics,
|
||||||
|
);
|
||||||
|
let rt = tokio::runtime::Builder::new_current_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
let src_wl = rt
|
||||||
|
.block_on(demand.fetch_workload_by_uid(&"cluster1//v1/Pod/default/0".into()))
|
||||||
|
.unwrap();
|
||||||
|
let svc_addr: SocketAddr = "127.0.0.3:80".parse().unwrap();
|
||||||
|
(rt, demand, src_wl, svc_addr)
|
||||||
|
}
|
||||||
|
|
||||||
criterion_group! {
|
criterion_group! {
|
||||||
name = benches;
|
name = benches;
|
||||||
config = Criterion::default()
|
config = Criterion::default()
|
||||||
.with_profiler(PProfProfiler::new(100, Output::Protobuf))
|
.with_profiler(PProfProfiler::new(100, Output::Protobuf))
|
||||||
.warm_up_time(Duration::from_millis(1));
|
.warm_up_time(Duration::from_millis(1));
|
||||||
targets = xds
|
targets = xds, load_balance
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
|
|
@ -24,7 +24,7 @@ use std::{io, thread};
|
||||||
use bytes::BufMut;
|
use bytes::BufMut;
|
||||||
use criterion::measurement::Measurement;
|
use criterion::measurement::Measurement;
|
||||||
use criterion::{
|
use criterion::{
|
||||||
criterion_group, criterion_main, BenchmarkGroup, Criterion, SamplingMode, Throughput,
|
BenchmarkGroup, Criterion, SamplingMode, Throughput, criterion_group, criterion_main,
|
||||||
};
|
};
|
||||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||||
use pprof::criterion::{Output, PProfProfiler};
|
use pprof::criterion::{Output, PProfProfiler};
|
||||||
|
@ -35,7 +35,7 @@ use tokio::sync::Mutex;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
use ztunnel::rbac::{Authorization, RbacMatch, StringMatch};
|
use ztunnel::rbac::{Authorization, RbacMatch, StringMatch};
|
||||||
use ztunnel::state::workload::{Protocol, Workload};
|
use ztunnel::state::workload::{InboundProtocol, Workload};
|
||||||
use ztunnel::state::{DemandProxyState, ProxyRbacContext, ProxyState};
|
use ztunnel::state::{DemandProxyState, ProxyRbacContext, ProxyState};
|
||||||
use ztunnel::test_helpers::app::{DestinationAddr, TestApp};
|
use ztunnel::test_helpers::app::{DestinationAddr, TestApp};
|
||||||
use ztunnel::test_helpers::linux::{TestMode, WorkloadManager};
|
use ztunnel::test_helpers::linux::{TestMode, WorkloadManager};
|
||||||
|
@ -457,11 +457,11 @@ fn hbone_connection_config() -> ztunnel::config::ConfigSource {
|
||||||
let lwl = LocalWorkload {
|
let lwl = LocalWorkload {
|
||||||
workload: Workload {
|
workload: Workload {
|
||||||
workload_ips: vec![hbone_connection_ip(i)],
|
workload_ips: vec![hbone_connection_ip(i)],
|
||||||
protocol: Protocol::HBONE,
|
protocol: InboundProtocol::HBONE,
|
||||||
uid: strng::format!("cluster1//v1/Pod/default/remote{}", i),
|
uid: strng::format!("cluster1//v1/Pod/default/remote{i}"),
|
||||||
name: strng::format!("workload-{}", i),
|
name: strng::format!("workload-{i}"),
|
||||||
namespace: strng::format!("namespace-{}", i),
|
namespace: strng::format!("namespace-{i}"),
|
||||||
service_account: strng::format!("service-account-{}", i),
|
service_account: strng::format!("service-account-{i}"),
|
||||||
..test_helpers::test_default_workload()
|
..test_helpers::test_default_workload()
|
||||||
},
|
},
|
||||||
services: Default::default(),
|
services: Default::default(),
|
||||||
|
@ -471,7 +471,7 @@ fn hbone_connection_config() -> ztunnel::config::ConfigSource {
|
||||||
let lwl = LocalWorkload {
|
let lwl = LocalWorkload {
|
||||||
workload: Workload {
|
workload: Workload {
|
||||||
workload_ips: vec![],
|
workload_ips: vec![],
|
||||||
protocol: Protocol::HBONE,
|
protocol: InboundProtocol::HBONE,
|
||||||
uid: "cluster1//v1/Pod/default/local-source".into(),
|
uid: "cluster1//v1/Pod/default/local-source".into(),
|
||||||
name: "local-source".into(),
|
name: "local-source".into(),
|
||||||
namespace: "default".into(),
|
namespace: "default".into(),
|
||||||
|
|
7
build.rs
|
@ -79,7 +79,7 @@ fn main() -> Result<(), anyhow::Error> {
|
||||||
for line in String::from_utf8(output.stdout).unwrap().lines() {
|
for line in String::from_utf8(output.stdout).unwrap().lines() {
|
||||||
// Each line looks like `istio.io/pkg/version.buildGitRevision=abc`
|
// Each line looks like `istio.io/pkg/version.buildGitRevision=abc`
|
||||||
if let Some((key, value)) = line.split_once('=') {
|
if let Some((key, value)) = line.split_once('=') {
|
||||||
let key = key.split('.').last().unwrap();
|
let key = key.split('.').next_back().unwrap();
|
||||||
println!("cargo:rustc-env=ZTUNNEL_BUILD_{key}={value}");
|
println!("cargo:rustc-env=ZTUNNEL_BUILD_{key}={value}");
|
||||||
} else {
|
} else {
|
||||||
println!("cargo:warning=invalid build output {line}");
|
println!("cargo:warning=invalid build output {line}");
|
||||||
|
@ -94,9 +94,6 @@ fn main() -> Result<(), anyhow::Error> {
|
||||||
"cargo:rustc-env=ZTUNNEL_BUILD_RUSTC_VERSION={}",
|
"cargo:rustc-env=ZTUNNEL_BUILD_RUSTC_VERSION={}",
|
||||||
rustc_version::version().unwrap()
|
rustc_version::version().unwrap()
|
||||||
);
|
);
|
||||||
println!(
|
println!("cargo:rustc-env=ZTUNNEL_BUILD_PROFILE_NAME={profile_name}");
|
||||||
"cargo:rustc-env=ZTUNNEL_BUILD_PROFILE_NAME={}",
|
|
||||||
profile_name
|
|
||||||
);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
0569152cf7260f891ee02fcef8c10bf4f94ea606
|
d235bc9f4a20f3c78c5aacbfa3f24d08a884a82e
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
# WARNING: DO NOT EDIT, THIS FILE IS PROBABLY A COPY
|
|
||||||
#
|
|
||||||
# The original version of this file is located in the https://github.com/istio/common-files repo.
|
|
||||||
# If you're looking at this file in a different repo and want to make a change, please go to the
|
|
||||||
# common-files repo, make the change there and check it in. Then come back to this repo and run
|
|
||||||
# "make update-common".
|
|
||||||
|
|
||||||
run:
|
|
||||||
# Timeout for analysis, e.g. 30s, 5m.
|
|
||||||
# Default: 1m
|
|
||||||
timeout: 20m
|
|
||||||
build-tags:
|
|
||||||
- integ
|
|
||||||
- integfuzz
|
|
||||||
linters:
|
|
||||||
disable-all: true
|
|
||||||
enable:
|
|
||||||
- goimports
|
|
||||||
- gofumpt
|
|
||||||
- gci
|
|
||||||
fast: false
|
|
||||||
linters-settings:
|
|
||||||
gci:
|
|
||||||
sections:
|
|
||||||
- standard # Captures all standard packages if they do not match another section.
|
|
||||||
- default # Contains all imports that could not be matched to another section type.
|
|
||||||
- prefix(istio.io/) # Groups all imports with the specified Prefix.
|
|
||||||
goimports:
|
|
||||||
# put imports beginning with prefix after 3rd-party packages;
|
|
||||||
# it's a comma-separated list of prefixes
|
|
||||||
local-prefixes: istio.io/
|
|
||||||
issues:
|
|
||||||
# Which dirs to exclude: issues from them won't be reported.
|
|
||||||
# Can use regexp here: `generated.*`, regexp is applied on full path,
|
|
||||||
# including the path prefix if one is set.
|
|
||||||
# Default dirs are skipped independently of this option's value (see exclude-dirs-use-default).
|
|
||||||
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
|
||||||
# Default: []
|
|
||||||
exclude-dirs:
|
|
||||||
- genfiles$
|
|
||||||
- vendor$
|
|
||||||
# Which files to exclude: they will be analyzed, but issues from them won't be reported.
|
|
||||||
# There is no need to include all autogenerated files,
|
|
||||||
# we confidently recognize autogenerated files.
|
|
||||||
# If it's not, please let us know.
|
|
||||||
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
|
||||||
# Default: []
|
|
||||||
exclude-files:
|
|
||||||
- ".*\\.pb\\.go"
|
|
||||||
- ".*\\.gen\\.go"
|
|
||||||
# Maximum issues count per one linter.
|
|
||||||
# Set to 0 to disable.
|
|
||||||
# Default: 50
|
|
||||||
max-issues-per-linter: 0
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
|
||||||
max-same-issues: 0
|
|
|
@ -1,262 +1,225 @@
|
||||||
# WARNING: DO NOT EDIT, THIS FILE IS PROBABLY A COPY
|
version: "2"
|
||||||
#
|
|
||||||
# The original version of this file is located in the https://github.com/istio/common-files repo.
|
|
||||||
# If you're looking at this file in a different repo and want to make a change, please go to the
|
|
||||||
# common-files repo, make the change there and check it in. Then come back to this repo and run
|
|
||||||
# "make update-common".
|
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# Timeout for analysis, e.g. 30s, 5m.
|
|
||||||
# Default: 1m
|
|
||||||
timeout: 20m
|
|
||||||
build-tags:
|
build-tags:
|
||||||
- integ
|
- integ
|
||||||
- integfuzz
|
- integfuzz
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
default: none
|
||||||
enable:
|
enable:
|
||||||
- errcheck
|
|
||||||
- copyloopvar
|
- copyloopvar
|
||||||
- depguard
|
- depguard
|
||||||
|
- errcheck
|
||||||
- gocritic
|
- gocritic
|
||||||
- gofumpt
|
- gosec
|
||||||
- goimports
|
|
||||||
- revive
|
|
||||||
- gosimple
|
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- lll
|
- lll
|
||||||
- misspell
|
- misspell
|
||||||
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- stylecheck
|
|
||||||
- typecheck
|
|
||||||
- unconvert
|
- unconvert
|
||||||
- unparam
|
- unparam
|
||||||
- unused
|
- unused
|
||||||
- gci
|
settings:
|
||||||
- gosec
|
depguard:
|
||||||
fast: false
|
rules:
|
||||||
linters-settings:
|
DenyGogoProtobuf:
|
||||||
errcheck:
|
files:
|
||||||
# report about not checking of errors in type assertions: `a := b.(MyStruct)`;
|
- $all
|
||||||
# default is false: such cases aren't reported by default.
|
deny:
|
||||||
check-type-assertions: false
|
- pkg: github.com/gogo/protobuf
|
||||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
desc: gogo/protobuf is deprecated, use golang/protobuf
|
||||||
# default is false: such cases aren't reported by default.
|
errcheck:
|
||||||
check-blank: false
|
check-type-assertions: false
|
||||||
govet:
|
check-blank: false
|
||||||
disable:
|
gocritic:
|
||||||
# report about shadowed variables
|
disable-all: true
|
||||||
- shadow
|
enabled-checks:
|
||||||
goimports:
|
- appendCombine
|
||||||
# put imports beginning with prefix after 3rd-party packages;
|
- argOrder
|
||||||
# it's a comma-separated list of prefixes
|
- assignOp
|
||||||
local-prefixes: istio.io/
|
- badCond
|
||||||
misspell:
|
- boolExprSimplify
|
||||||
# Correct spellings using locale preferences for US or UK.
|
- builtinShadow
|
||||||
# Default is to use a neutral variety of English.
|
- captLocal
|
||||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
- caseOrder
|
||||||
locale: US
|
- codegenComment
|
||||||
ignore-words:
|
- commentedOutCode
|
||||||
- cancelled
|
- commentedOutImport
|
||||||
lll:
|
- defaultCaseOrder
|
||||||
# max line length, lines longer will be reported. Default is 120.
|
- deprecatedComment
|
||||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
- docStub
|
||||||
line-length: 160
|
- dupArg
|
||||||
# tab width in spaces. Default to 1.
|
- dupBranchBody
|
||||||
tab-width: 1
|
- dupCase
|
||||||
revive:
|
- dupSubExpr
|
||||||
ignore-generated-header: false
|
- elseif
|
||||||
severity: "warning"
|
- emptyFallthrough
|
||||||
confidence: 0.0
|
- equalFold
|
||||||
|
- flagDeref
|
||||||
|
- flagName
|
||||||
|
- hexLiteral
|
||||||
|
- indexAlloc
|
||||||
|
- initClause
|
||||||
|
- methodExprCall
|
||||||
|
- nilValReturn
|
||||||
|
- octalLiteral
|
||||||
|
- offBy1
|
||||||
|
- rangeExprCopy
|
||||||
|
- regexpMust
|
||||||
|
- sloppyLen
|
||||||
|
- stringXbytes
|
||||||
|
- switchTrue
|
||||||
|
- typeAssertChain
|
||||||
|
- typeSwitchVar
|
||||||
|
- typeUnparen
|
||||||
|
- underef
|
||||||
|
- unlambda
|
||||||
|
- unnecessaryBlock
|
||||||
|
- unslice
|
||||||
|
- valSwap
|
||||||
|
- weakCond
|
||||||
|
gosec:
|
||||||
|
includes:
|
||||||
|
- G401
|
||||||
|
- G402
|
||||||
|
- G404
|
||||||
|
govet:
|
||||||
|
disable:
|
||||||
|
- shadow
|
||||||
|
lll:
|
||||||
|
line-length: 160
|
||||||
|
tab-width: 1
|
||||||
|
misspell:
|
||||||
|
locale: US
|
||||||
|
ignore-rules:
|
||||||
|
- cancelled
|
||||||
|
revive:
|
||||||
|
confidence: 0
|
||||||
|
severity: warning
|
||||||
|
rules:
|
||||||
|
- name: blank-imports
|
||||||
|
- name: context-keys-type
|
||||||
|
- name: time-naming
|
||||||
|
- name: var-declaration
|
||||||
|
- name: unexported-return
|
||||||
|
- name: errorf
|
||||||
|
- name: context-as-argument
|
||||||
|
- name: dot-imports
|
||||||
|
- name: error-return
|
||||||
|
- name: error-strings
|
||||||
|
- name: error-naming
|
||||||
|
- name: increment-decrement
|
||||||
|
- name: var-naming
|
||||||
|
- name: package-comments
|
||||||
|
- name: range
|
||||||
|
- name: receiver-naming
|
||||||
|
- name: indent-error-flow
|
||||||
|
- name: superfluous-else
|
||||||
|
- name: modifies-parameter
|
||||||
|
- name: unreachable-code
|
||||||
|
- name: struct-tag
|
||||||
|
- name: constant-logical-expr
|
||||||
|
- name: bool-literal-in-expr
|
||||||
|
- name: redefines-builtin-id
|
||||||
|
- name: imports-blocklist
|
||||||
|
- name: range-val-in-closure
|
||||||
|
- name: range-val-address
|
||||||
|
- name: waitgroup-by-value
|
||||||
|
- name: atomic
|
||||||
|
- name: call-to-gc
|
||||||
|
- name: duplicated-imports
|
||||||
|
- name: string-of-int
|
||||||
|
- name: defer
|
||||||
|
arguments:
|
||||||
|
- - call-chain
|
||||||
|
- name: unconditional-recursion
|
||||||
|
- name: identical-branches
|
||||||
|
unparam:
|
||||||
|
check-exported: false
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
presets:
|
||||||
|
- comments
|
||||||
|
- common-false-positives
|
||||||
|
- legacy
|
||||||
|
- std-error-handling
|
||||||
rules:
|
rules:
|
||||||
- name: blank-imports
|
- linters:
|
||||||
- name: context-keys-type
|
- errcheck
|
||||||
- name: time-naming
|
- maligned
|
||||||
- name: var-declaration
|
path: _test\.go$|tests/|samples/
|
||||||
- name: unexported-return
|
- path: _test\.go$
|
||||||
- name: errorf
|
text: 'dot-imports: should not use dot imports'
|
||||||
- name: context-as-argument
|
- linters:
|
||||||
- name: dot-imports
|
- staticcheck
|
||||||
- name: error-return
|
text: 'SA1019: package github.com/golang/protobuf/jsonpb'
|
||||||
- name: error-strings
|
- linters:
|
||||||
- name: error-naming
|
- staticcheck
|
||||||
- name: increment-decrement
|
text: 'SA1019: "github.com/golang/protobuf/jsonpb"'
|
||||||
- name: var-naming
|
- linters:
|
||||||
- name: package-comments
|
- staticcheck
|
||||||
- name: range
|
text: 'SA1019: grpc.Dial is deprecated: use NewClient instead'
|
||||||
- name: receiver-naming
|
- linters:
|
||||||
- name: indent-error-flow
|
- staticcheck
|
||||||
- name: superfluous-else
|
text: 'SA1019: grpc.DialContext is deprecated: use NewClient instead'
|
||||||
- name: modifies-parameter
|
- linters:
|
||||||
- name: unreachable-code
|
- staticcheck
|
||||||
- name: struct-tag
|
text: 'SA1019: grpc.WithBlock is deprecated'
|
||||||
- name: constant-logical-expr
|
- linters:
|
||||||
- name: bool-literal-in-expr
|
- staticcheck
|
||||||
- name: redefines-builtin-id
|
text: 'SA1019: grpc.FailOnNonTempDialError'
|
||||||
- name: imports-blacklist
|
- linters:
|
||||||
- name: range-val-in-closure
|
- staticcheck
|
||||||
- name: range-val-address
|
text: 'SA1019: grpc.WithReturnConnectionError'
|
||||||
- name: waitgroup-by-value
|
- path: (.+)\.go$
|
||||||
- name: atomic
|
text: composite literal uses unkeyed fields
|
||||||
- name: call-to-gc
|
# TODO: remove following rule in the future
|
||||||
- name: duplicated-imports
|
- linters:
|
||||||
- name: string-of-int
|
- staticcheck
|
||||||
- name: defer
|
text: 'QF'
|
||||||
arguments:
|
- linters:
|
||||||
- - "call-chain"
|
- staticcheck
|
||||||
- name: unconditional-recursion
|
text: 'ST1005'
|
||||||
- name: identical-branches
|
- linters:
|
||||||
# the following rules can be enabled in the future
|
- staticcheck
|
||||||
# - name: empty-lines
|
text: 'S1007'
|
||||||
# - name: confusing-results
|
# TODO: remove once we have updated package names
|
||||||
# - name: empty-block
|
- linters:
|
||||||
# - name: get-return
|
- revive
|
||||||
# - name: confusing-naming
|
text: "var-naming: avoid meaningless package names"
|
||||||
# - name: unexported-naming
|
paths:
|
||||||
# - name: early-return
|
- .*\.pb\.go
|
||||||
# - name: unused-parameter
|
- .*\.gen\.go
|
||||||
# - name: unnecessary-stmt
|
- genfiles$
|
||||||
# - name: deep-exit
|
- vendor$
|
||||||
# - name: import-shadowing
|
- third_party$
|
||||||
# - name: modifies-value-receiver
|
- builtin$
|
||||||
# - name: unused-receiver
|
- examples$
|
||||||
# - name: bare-return
|
|
||||||
# - name: flag-parameter
|
|
||||||
# - name: unhandled-error
|
|
||||||
# - name: if-return
|
|
||||||
unparam:
|
|
||||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
|
||||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
|
||||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
|
||||||
# with golangci-lint call it on a directory with the changed file.
|
|
||||||
check-exported: false
|
|
||||||
gci:
|
|
||||||
sections:
|
|
||||||
- standard # Captures all standard packages if they do not match another section.
|
|
||||||
- default # Contains all imports that could not be matched to another section type.
|
|
||||||
- prefix(istio.io/) # Groups all imports with the specified Prefix.
|
|
||||||
gocritic:
|
|
||||||
# Disable all checks.
|
|
||||||
# Default: false
|
|
||||||
disable-all: true
|
|
||||||
# Which checks should be enabled in addition to default checks. Since we don't want
|
|
||||||
# all of the default checks, we do the disable-all first.
|
|
||||||
enabled-checks:
|
|
||||||
- appendCombine
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCond
|
|
||||||
- boolExprSimplify
|
|
||||||
- builtinShadow
|
|
||||||
- captLocal
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
- commentedOutCode
|
|
||||||
- commentedOutImport
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- docStub
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
- emptyFallthrough
|
|
||||||
- equalFold
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
- hexLiteral
|
|
||||||
- indexAlloc
|
|
||||||
- initClause
|
|
||||||
- methodExprCall
|
|
||||||
- nilValReturn
|
|
||||||
- octalLiteral
|
|
||||||
- offBy1
|
|
||||||
- rangeExprCopy
|
|
||||||
- regexpMust
|
|
||||||
- sloppyLen
|
|
||||||
- stringXbytes
|
|
||||||
- switchTrue
|
|
||||||
- typeAssertChain
|
|
||||||
- typeSwitchVar
|
|
||||||
- typeUnparen
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unnecessaryBlock
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- weakCond
|
|
||||||
depguard:
|
|
||||||
rules:
|
|
||||||
DenyGogoProtobuf:
|
|
||||||
files:
|
|
||||||
- $all
|
|
||||||
deny:
|
|
||||||
- pkg: github.com/gogo/protobuf
|
|
||||||
desc: "gogo/protobuf is deprecated, use golang/protobuf"
|
|
||||||
gosec:
|
|
||||||
includes:
|
|
||||||
- G401
|
|
||||||
- G402
|
|
||||||
- G404
|
|
||||||
issues:
|
issues:
|
||||||
# List of regexps of issue texts to exclude, empty list by default.
|
|
||||||
# But independently from this option we use default exclude patterns,
|
|
||||||
# it can be disabled by `exclude-use-default: false`. To list all
|
|
||||||
# excluded by default patterns execute `golangci-lint run --help`
|
|
||||||
exclude:
|
|
||||||
- composite literal uses unkeyed fields
|
|
||||||
# Which dirs to exclude: issues from them won't be reported.
|
|
||||||
# Can use regexp here: `generated.*`, regexp is applied on full path,
|
|
||||||
# including the path prefix if one is set.
|
|
||||||
# Default dirs are skipped independently of this option's value (see exclude-dirs-use-default).
|
|
||||||
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
|
||||||
# Default: []
|
|
||||||
exclude-dirs:
|
|
||||||
- genfiles$
|
|
||||||
- vendor$
|
|
||||||
# Which files to exclude: they will be analyzed, but issues from them won't be reported.
|
|
||||||
# There is no need to include all autogenerated files,
|
|
||||||
# we confidently recognize autogenerated files.
|
|
||||||
# If it's not, please let us know.
|
|
||||||
# "/" will be replaced by current OS file path separator to properly work on Windows.
|
|
||||||
# Default: []
|
|
||||||
exclude-files:
|
|
||||||
- ".*\\.pb\\.go"
|
|
||||||
- ".*\\.gen\\.go"
|
|
||||||
exclude-rules:
|
|
||||||
# Exclude some linters from running on test files.
|
|
||||||
- path: _test\.go$|^tests/|^samples/
|
|
||||||
linters:
|
|
||||||
- errcheck
|
|
||||||
- maligned
|
|
||||||
- path: _test\.go$
|
|
||||||
text: "dot-imports: should not use dot imports"
|
|
||||||
# We need to use the deprecated module since the jsonpb replacement is not backwards compatible.
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: "SA1019: package github.com/golang/protobuf/jsonpb"
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: 'SA1019: "github.com/golang/protobuf/jsonpb"'
|
|
||||||
# This is not helpful. The new function is not very usable and the current function will not be removed
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: 'SA1019: grpc.Dial is deprecated: use NewClient instead'
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: 'SA1019: grpc.DialContext is deprecated: use NewClient instead'
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: "SA1019: grpc.WithBlock is deprecated"
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: "SA1019: grpc.FailOnNonTempDialError"
|
|
||||||
- linters: [staticcheck]
|
|
||||||
text: "SA1019: grpc.WithReturnConnectionError"
|
|
||||||
# Independently from option `exclude` we use default exclude patterns,
|
|
||||||
# it can be disabled by this option. To list all
|
|
||||||
# excluded by default patterns execute `golangci-lint run --help`.
|
|
||||||
# Default value for this option is true.
|
|
||||||
exclude-use-default: true
|
|
||||||
# Maximum issues count per one linter.
|
|
||||||
# Set to 0 to disable.
|
|
||||||
# Default: 50
|
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gci
|
||||||
|
- gofumpt
|
||||||
|
- goimports
|
||||||
|
settings:
|
||||||
|
gci:
|
||||||
|
sections:
|
||||||
|
- standard
|
||||||
|
- default
|
||||||
|
- prefix(istio.io/)
|
||||||
|
goimports:
|
||||||
|
local-prefixes:
|
||||||
|
- istio.io/
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
paths:
|
||||||
|
- .*\.pb\.go
|
||||||
|
- .*\.gen\.go
|
||||||
|
- genfiles$
|
||||||
|
- vendor$
|
||||||
|
- third_party$
|
||||||
|
- builtin$
|
||||||
|
- examples$
|
||||||
|
|
|
@ -140,3 +140,6 @@ allowlisted_modules:
|
||||||
|
|
||||||
# Apache 2.0
|
# Apache 2.0
|
||||||
- github.com/aws/smithy-go
|
- github.com/aws/smithy-go
|
||||||
|
|
||||||
|
# Simplified BSD License: https://github.com/gomarkdown/markdown/blob/master/LICENSE.txt
|
||||||
|
- github.com/gomarkdown/markdown
|
||||||
|
|
|
@ -21,4 +21,4 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
golangci-lint run --fix -c ./common/config/.golangci-format.yml
|
golangci-lint run --fix -c ./common/config/.golangci.yml
|
||||||
|
|
|
@ -32,7 +32,7 @@ set -x
|
||||||
####################################################################
|
####################################################################
|
||||||
|
|
||||||
# DEFAULT_KIND_IMAGE is used to set the Kubernetes version for KinD unless overridden in params to setup_kind_cluster(s)
|
# DEFAULT_KIND_IMAGE is used to set the Kubernetes version for KinD unless overridden in params to setup_kind_cluster(s)
|
||||||
DEFAULT_KIND_IMAGE="gcr.io/istio-testing/kind-node:v1.32.0"
|
DEFAULT_KIND_IMAGE="gcr.io/istio-testing/kind-node:v1.33.1"
|
||||||
|
|
||||||
# the default kind cluster should be ipv4 if not otherwise specified
|
# the default kind cluster should be ipv4 if not otherwise specified
|
||||||
KIND_IP_FAMILY="${KIND_IP_FAMILY:-ipv4}"
|
KIND_IP_FAMILY="${KIND_IP_FAMILY:-ipv4}"
|
||||||
|
|
|
@ -21,8 +21,10 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
GOLANGCILINT_RUN_ARGS=(--output.text.path stdout --output.junit-xml.path "${ARTIFACTS}"/junit-lint.xml)
|
||||||
|
|
||||||
if [[ "${ARTIFACTS}" != "" ]]; then
|
if [[ "${ARTIFACTS}" != "" ]]; then
|
||||||
golangci-lint run -v -c ./common/config/.golangci.yml --out-format colored-line-number,junit-xml:"${ARTIFACTS}"/junit-lint.xml
|
golangci-lint run -v -c ./common/config/.golangci.yml "${GOLANGCILINT_RUN_ARGS[@]}"
|
||||||
else
|
else
|
||||||
golangci-lint run -v -c ./common/config/.golangci.yml
|
golangci-lint run -v -c ./common/config/.golangci.yml
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -47,7 +47,9 @@ read -ra DOCKER_RUN_OPTIONS <<< "${DOCKER_RUN_OPTIONS:-}"
|
||||||
"${DOCKER_RUN_OPTIONS[@]}" \
|
"${DOCKER_RUN_OPTIONS[@]}" \
|
||||||
--init \
|
--init \
|
||||||
--sig-proxy=true \
|
--sig-proxy=true \
|
||||||
|
--cap-add=SYS_ADMIN \
|
||||||
${DOCKER_SOCKET_MOUNT:--v /var/run/docker.sock:/var/run/docker.sock} \
|
${DOCKER_SOCKET_MOUNT:--v /var/run/docker.sock:/var/run/docker.sock} \
|
||||||
|
-e DOCKER_HOST=${DOCKER_SOCKET_HOST:-unix:///var/run/docker.sock} \
|
||||||
$CONTAINER_OPTIONS \
|
$CONTAINER_OPTIONS \
|
||||||
--env-file <(env | grep -v ${ENV_BLOCKLIST}) \
|
--env-file <(env | grep -v ${ENV_BLOCKLIST}) \
|
||||||
-e IN_BUILD_CONTAINER=1 \
|
-e IN_BUILD_CONTAINER=1 \
|
||||||
|
|
|
@ -75,7 +75,7 @@ fi
|
||||||
TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io}
|
TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io}
|
||||||
PROJECT_ID=${PROJECT_ID:-istio-testing}
|
PROJECT_ID=${PROJECT_ID:-istio-testing}
|
||||||
if [[ "${IMAGE_VERSION:-}" == "" ]]; then
|
if [[ "${IMAGE_VERSION:-}" == "" ]]; then
|
||||||
IMAGE_VERSION=master-6bfe0028e941afdae35a3c5d4374bc08e3c04153
|
IMAGE_VERSION=master-672e6089ff843019a2b28cf9e87754c7b74358ea
|
||||||
fi
|
fi
|
||||||
if [[ "${IMAGE_NAME:-}" == "" ]]; then
|
if [[ "${IMAGE_NAME:-}" == "" ]]; then
|
||||||
IMAGE_NAME=build-tools
|
IMAGE_NAME=build-tools
|
||||||
|
|
|
@ -4,7 +4,7 @@ targets = [
|
||||||
{ triple = "x86_64-unknown-linux-gnu" },
|
{ triple = "x86_64-unknown-linux-gnu" },
|
||||||
{ triple = "aarch64-unknown-linux-gnu" },
|
{ triple = "aarch64-unknown-linux-gnu" },
|
||||||
]
|
]
|
||||||
features = ["tls-boring", "tls-ring"]
|
features = ["tls-boring", "tls-ring", "tls-aws-lc", "tls-openssl" ]
|
||||||
|
|
||||||
[advisories]
|
[advisories]
|
||||||
version = 2
|
version = 2
|
||||||
|
|
|
@ -17,7 +17,7 @@ use std::os::fd::AsRawFd;
|
||||||
use ztunnel::test_helpers::inpod::StartZtunnelMessage;
|
use ztunnel::test_helpers::inpod::StartZtunnelMessage;
|
||||||
use ztunnel::{
|
use ztunnel::{
|
||||||
inpod::istio::zds::WorkloadInfo,
|
inpod::istio::zds::WorkloadInfo,
|
||||||
test_helpers::inpod::{start_ztunnel_server, Message},
|
test_helpers::inpod::{Message, start_ztunnel_server},
|
||||||
};
|
};
|
||||||
|
|
||||||
const PROXY_WORKLOAD_INFO: &str = "PROXY_WORKLOAD_INFO";
|
const PROXY_WORKLOAD_INFO: &str = "PROXY_WORKLOAD_INFO";
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
name = "ztunnel-fuzz"
|
name = "ztunnel-fuzz"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
publish = false
|
publish = false
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
|
|
||||||
[package.metadata]
|
[package.metadata]
|
||||||
cargo-fuzz = true
|
cargo-fuzz = true
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
|
|
||||||
#![no_main]
|
#![no_main]
|
||||||
|
|
||||||
use hyper::{http::HeaderValue, HeaderMap};
|
use hyper::{HeaderMap, http::HeaderValue};
|
||||||
use libfuzzer_sys::fuzz_target;
|
use libfuzzer_sys::fuzz_target;
|
||||||
use ztunnel::baggage::parse_baggage_header;
|
use ztunnel::baggage::parse_baggage_header;
|
||||||
use ztunnel::proxy::BAGGAGE_HEADER;
|
use ztunnel::proxy::BAGGAGE_HEADER;
|
||||||
|
|
After Width: | Height: | Size: 72 KiB |
|
@ -0,0 +1,4 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<rect width="712" height="712" fill="#151927"/>
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.446 174.181 331.691V534.901C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.901V331.691C228.563 261.429 285.72 204.273 355.982 204.273C426.244 204.273 483.4 261.429 483.4 331.691L483.31 507.71H324.222L448.324 346.991C456.263 336.93 457.822 322.954 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.118H379.511L255.41 507.855C247.47 517.915 245.911 531.892 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.446 456.245 149.89 355.982 149.89Z" fill="#466BB0"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 820 B |
After Width: | Height: | Size: 72 KiB |
|
@ -0,0 +1,4 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<rect width="712" height="712" fill="white"/>
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.446 174.181 331.691V534.901C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.901V331.691C228.563 261.429 285.72 204.273 355.982 204.273C426.244 204.273 483.4 261.429 483.4 331.691L483.31 507.71H324.222L448.324 346.991C456.263 336.93 457.822 322.954 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.118H379.511L255.41 507.855C247.47 517.915 245.911 531.892 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.446 456.245 149.89 355.982 149.89Z" fill="#466BB0"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 818 B |
After Width: | Height: | Size: 69 KiB |
|
@ -0,0 +1,3 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.445 174.181 331.691V534.9C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.9V331.691C228.563 261.428 285.72 204.272 355.982 204.272C426.244 204.272 483.4 261.428 483.4 331.691L483.31 507.709H324.222L448.324 346.99C456.263 336.93 457.822 322.953 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.117H379.511L255.41 507.854C247.47 517.915 245.911 531.891 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.445 456.245 149.89 355.982 149.89Z" fill="#466BB0"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 768 B |
After Width: | Height: | Size: 72 KiB |
|
@ -0,0 +1,4 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<rect width="712" height="712" fill="white"/>
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.445 174.181 331.691V534.9C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.9V331.691C228.563 261.428 285.72 204.272 355.982 204.272C426.244 204.272 483.4 261.428 483.4 331.691L483.31 507.709H324.222L448.324 346.99C456.263 336.93 457.822 322.953 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.117H379.511L255.41 507.854C247.47 517.915 245.911 531.891 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.445 456.245 149.89 355.982 149.89Z" fill="#151927"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 814 B |
After Width: | Height: | Size: 71 KiB |
|
@ -0,0 +1,3 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.445 174.181 331.691V534.9C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.9V331.691C228.563 261.428 285.72 204.272 355.982 204.272C426.244 204.272 483.4 261.428 483.4 331.691L483.31 507.709H324.222L448.324 346.99C456.263 336.93 457.822 322.953 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.117H379.511L255.41 507.854C247.47 517.915 245.911 531.891 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.445 456.245 149.89 355.982 149.89Z" fill="#151927"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 768 B |
After Width: | Height: | Size: 70 KiB |
|
@ -0,0 +1,4 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<rect width="712" height="712" fill="white"/>
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.445 174.181 331.691V534.9C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.9V331.691C228.563 261.428 285.72 204.272 355.982 204.272C426.244 204.272 483.4 261.428 483.4 331.691L483.31 507.709H324.222L448.324 346.99C456.263 336.93 457.822 322.953 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.117H379.511L255.41 507.854C247.47 517.915 245.911 531.891 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.445 456.245 149.89 355.982 149.89Z" fill="#67696D"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 814 B |
After Width: | Height: | Size: 70 KiB |
|
@ -0,0 +1,3 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.445 174.181 331.691V534.9C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.9V331.691C228.563 261.428 285.72 204.272 355.982 204.272C426.244 204.272 483.4 261.428 483.4 331.691L483.31 507.709H324.222L448.324 346.99C456.263 336.93 457.822 322.953 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.117H379.511L255.41 507.854C247.47 517.915 245.911 531.891 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.445 456.245 149.89 355.982 149.89Z" fill="#67696D"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 768 B |
After Width: | Height: | Size: 73 KiB |
|
@ -0,0 +1,4 @@
|
||||||
|
<svg width="712" height="712" viewBox="0 0 712 712" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<rect width="712" height="712" fill="#466BB0"/>
|
||||||
|
<path d="M355.982 149.89C255.737 149.89 174.181 231.446 174.181 331.691V534.901C174.181 549.91 186.362 562.092 201.372 562.092C216.382 562.092 228.563 549.91 228.563 534.901V331.691C228.563 261.429 285.72 204.273 355.982 204.273C426.244 204.273 483.4 261.429 483.4 331.691L483.31 507.71H324.222L448.324 346.991C456.263 336.93 457.822 322.954 452.239 311.225C446.819 299.823 435.797 292.735 423.453 292.735H262.244V347.118H379.511L255.41 507.855C247.47 517.915 245.911 531.892 251.495 543.62C256.915 555.022 267.936 562.11 280.281 562.11H483.346C513.383 562.11 537.819 537.674 537.819 507.637V331.691C537.801 231.446 456.245 149.89 355.982 149.89Z" fill="white"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 818 B |
|
@ -16,6 +16,7 @@ syntax = "proto3";
|
||||||
|
|
||||||
package istio.workload;
|
package istio.workload;
|
||||||
|
|
||||||
|
import "google/protobuf/wrappers.proto";
|
||||||
import "google/protobuf/any.proto";
|
import "google/protobuf/any.proto";
|
||||||
|
|
||||||
option go_package="pkg/workloadapi";
|
option go_package="pkg/workloadapi";
|
||||||
|
@ -267,6 +268,11 @@ message Workload {
|
||||||
// Extension provides a mechanism to attach arbitrary additional configuration to an object.
|
// Extension provides a mechanism to attach arbitrary additional configuration to an object.
|
||||||
repeated Extension extensions = 26;
|
repeated Extension extensions = 26;
|
||||||
|
|
||||||
|
// Capacity for this workload.
|
||||||
|
// This represents the amount of traffic the workload can handle, relative to other workloads
|
||||||
|
// If unset, the capacity is default to 1.
|
||||||
|
google.protobuf.UInt32Value capacity = 27;
|
||||||
|
|
||||||
// Reservations for deleted fields.
|
// Reservations for deleted fields.
|
||||||
reserved 15;
|
reserved 15;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,10 @@ if [[ "$TLS_MODE" == "boring" ]]; then
|
||||||
sed -i 's/x86_64/arm64/g' .cargo/config.toml
|
sed -i 's/x86_64/arm64/g' .cargo/config.toml
|
||||||
fi
|
fi
|
||||||
cargo build --release --no-default-features -F tls-boring
|
cargo build --release --no-default-features -F tls-boring
|
||||||
|
elif [[ "$TLS_MODE" == "aws-lc" ]]; then
|
||||||
|
cargo build --release --no-default-features -F tls-aws-lc
|
||||||
|
elif [[ "$TLS_MODE" == "openssl" ]]; then
|
||||||
|
cargo build --release --no-default-features -F tls-openssl
|
||||||
else
|
else
|
||||||
cargo build --release
|
cargo build --release
|
||||||
fi
|
fi
|
||||||
|
|
72
src/admin.rs
|
@ -13,7 +13,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::config::Config;
|
use crate::config::Config;
|
||||||
use crate::hyper_util::{empty_response, plaintext_response, Server};
|
use crate::hyper_util::{Server, empty_response, plaintext_response};
|
||||||
use crate::identity::SecretManager;
|
use crate::identity::SecretManager;
|
||||||
use crate::state::DemandProxyState;
|
use crate::state::DemandProxyState;
|
||||||
use crate::tls::Certificate;
|
use crate::tls::Certificate;
|
||||||
|
@ -25,7 +25,7 @@ use base64::engine::general_purpose::STANDARD;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use http_body_util::Full;
|
use http_body_util::Full;
|
||||||
use hyper::body::Incoming;
|
use hyper::body::Incoming;
|
||||||
use hyper::{header::HeaderValue, header::CONTENT_TYPE, Request, Response};
|
use hyper::{Request, Response, header::CONTENT_TYPE, header::HeaderValue};
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
@ -86,6 +86,7 @@ pub struct CertsDump {
|
||||||
identity: String,
|
identity: String,
|
||||||
state: String,
|
state: String,
|
||||||
cert_chain: Vec<CertDump>,
|
cert_chain: Vec<CertDump>,
|
||||||
|
root_certs: Vec<CertDump>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
|
@ -220,10 +221,12 @@ async fn dump_certs(cert_manager: &SecretManager) -> Vec<CertsDump> {
|
||||||
Unavailable(err) => dump.state = format!("Unavailable: {err}"),
|
Unavailable(err) => dump.state = format!("Unavailable: {err}"),
|
||||||
Available(certs) => {
|
Available(certs) => {
|
||||||
dump.state = "Available".to_string();
|
dump.state = "Available".to_string();
|
||||||
dump.cert_chain = std::iter::once(&certs.cert)
|
dump.cert_chain = certs
|
||||||
.chain(certs.chain.iter())
|
.cert_and_intermediates()
|
||||||
|
.iter()
|
||||||
.map(dump_cert)
|
.map(dump_cert)
|
||||||
.collect();
|
.collect();
|
||||||
|
dump.root_certs = certs.roots.iter().map(dump_cert).collect();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
dump
|
dump
|
||||||
|
@ -387,7 +390,7 @@ fn change_log_level(reset: bool, level: &str) -> Response<Full<Bytes>> {
|
||||||
// Invalid level provided
|
// Invalid level provided
|
||||||
return plaintext_response(
|
return plaintext_response(
|
||||||
hyper::StatusCode::BAD_REQUEST,
|
hyper::StatusCode::BAD_REQUEST,
|
||||||
format!("Invalid level provided: {}\n{}", level, HELP_STRING),
|
format!("Invalid level provided: {level}\n{HELP_STRING}"),
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -395,7 +398,7 @@ fn change_log_level(reset: bool, level: &str) -> Response<Full<Bytes>> {
|
||||||
Ok(_) => list_loggers(),
|
Ok(_) => list_loggers(),
|
||||||
Err(e) => plaintext_response(
|
Err(e) => plaintext_response(
|
||||||
hyper::StatusCode::BAD_REQUEST,
|
hyper::StatusCode::BAD_REQUEST,
|
||||||
format!("Failed to set new level: {}\n{}", e, HELP_STRING),
|
format!("Failed to set new level: {e}\n{HELP_STRING}"),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -441,17 +444,16 @@ fn base64_encode(data: String) -> String {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use super::ConfigDump;
|
||||||
use super::change_log_level;
|
use super::change_log_level;
|
||||||
use super::dump_certs;
|
use super::dump_certs;
|
||||||
use super::handle_config_dump;
|
use super::handle_config_dump;
|
||||||
use super::ConfigDump;
|
|
||||||
use crate::admin::HELP_STRING;
|
use crate::admin::HELP_STRING;
|
||||||
use crate::config::construct_config;
|
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::config::construct_config;
|
||||||
use crate::identity;
|
use crate::identity;
|
||||||
use crate::strng;
|
use crate::strng;
|
||||||
use crate::test_helpers::{get_response_str, helpers, new_proxy_state};
|
use crate::test_helpers::{get_response_str, helpers, new_proxy_state};
|
||||||
use crate::xds::istio::security::string_match::MatchType as XdsMatchType;
|
|
||||||
use crate::xds::istio::security::Address as XdsAddress;
|
use crate::xds::istio::security::Address as XdsAddress;
|
||||||
use crate::xds::istio::security::Authorization as XdsAuthorization;
|
use crate::xds::istio::security::Authorization as XdsAuthorization;
|
||||||
use crate::xds::istio::security::Clause as XdsClause;
|
use crate::xds::istio::security::Clause as XdsClause;
|
||||||
|
@ -459,7 +461,7 @@ mod tests {
|
||||||
use crate::xds::istio::security::Rule as XdsRule;
|
use crate::xds::istio::security::Rule as XdsRule;
|
||||||
use crate::xds::istio::security::ServiceAccountMatch as XdsServiceAccountMatch;
|
use crate::xds::istio::security::ServiceAccountMatch as XdsServiceAccountMatch;
|
||||||
use crate::xds::istio::security::StringMatch as XdsStringMatch;
|
use crate::xds::istio::security::StringMatch as XdsStringMatch;
|
||||||
use crate::xds::istio::workload::gateway_address::Destination as XdsDestination;
|
use crate::xds::istio::security::string_match::MatchType as XdsMatchType;
|
||||||
use crate::xds::istio::workload::GatewayAddress as XdsGatewayAddress;
|
use crate::xds::istio::workload::GatewayAddress as XdsGatewayAddress;
|
||||||
use crate::xds::istio::workload::LoadBalancing as XdsLoadBalancing;
|
use crate::xds::istio::workload::LoadBalancing as XdsLoadBalancing;
|
||||||
use crate::xds::istio::workload::Locality as XdsLocality;
|
use crate::xds::istio::workload::Locality as XdsLocality;
|
||||||
|
@ -469,6 +471,7 @@ mod tests {
|
||||||
use crate::xds::istio::workload::Service as XdsService;
|
use crate::xds::istio::workload::Service as XdsService;
|
||||||
use crate::xds::istio::workload::Workload as XdsWorkload;
|
use crate::xds::istio::workload::Workload as XdsWorkload;
|
||||||
use crate::xds::istio::workload::WorkloadType as XdsWorkloadType;
|
use crate::xds::istio::workload::WorkloadType as XdsWorkloadType;
|
||||||
|
use crate::xds::istio::workload::gateway_address::Destination as XdsDestination;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
@ -542,11 +545,13 @@ mod tests {
|
||||||
let want = serde_json::json!([
|
let want = serde_json::json!([
|
||||||
{
|
{
|
||||||
"certChain": [],
|
"certChain": [],
|
||||||
|
"rootCerts": [],
|
||||||
"identity": "spiffe://error/ns/forgotten/sa/sa-failed",
|
"identity": "spiffe://error/ns/forgotten/sa/sa-failed",
|
||||||
"state": "Unavailable: the identity is no longer needed"
|
"state": "Unavailable: the identity is no longer needed"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"certChain": [],
|
"certChain": [],
|
||||||
|
"rootCerts": [],
|
||||||
"identity": "spiffe://test/ns/test/sa/sa-pending",
|
"identity": "spiffe://test/ns/test/sa/sa-pending",
|
||||||
"state": "Initializing"
|
"state": "Initializing"
|
||||||
},
|
},
|
||||||
|
@ -554,15 +559,17 @@ mod tests {
|
||||||
"certChain": [
|
"certChain": [
|
||||||
{
|
{
|
||||||
"expirationTime": "2023-03-11T12:57:26Z",
|
"expirationTime": "2023-03-11T12:57:26Z",
|
||||||
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNXekNDQVVPZ0F3SUJBZ0lVWnlUOTI5c3d0QjhPSG1qUmFURWFENnlqcWc0d0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZUZ3MHlNekF6TVRFd05UVTMKTWpaYUZ3MHlNekF6TVRFeE1qVTNNalphTUJneEZqQVVCZ05WQkFvTURXTnNkWE4wWlhJdWJHOWpZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFSYXIyQm1JWUFndkptT3JTcENlRlE3OUpQeQo4Y3c0K3pFRThmcXI1N2svdW1NcDVqWFpFR0JwZWRCSVkrcWZtSlBYRWlyYTlFOTJkU21rZks1QUtNV3gKbzJnd1pqQTFCZ05WSFJFRUxqQXNoaXB6Y0dsbVptVTZMeTkwY25WemRGOWtiMjFoYVc0dmJuTXZibUZ0ClpYTndZV05sTDNOaEwzTmhMVEF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzRwpBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFjTzNlMjAvK0ZrRkwKUmttMTNtQlFNYjVPUmpTOGhwWjBRMkZKd2wrSXV4TGY2MUJDZS9RVlhOVklpSUdlMXRVRTh5UTRoMXZrCjhVb01sSmpTQkdiM3VDdHVLRFVKN0xOM1VBUmV4YU1uQkZobC9mWmQxU3ZZcmhlWjU3WDlrTElVa2hkSQpDUVdxOFVFcXBWZEloNGxTZjhoYnFRQksvUWhCN0I2bUJOSW5uMThZTEhiOEpmU0N2aXBWYTRuNXByTlYKbVNWc1JPMUtpY1FQYVhpUzJta0xBWVFRanROYkVJdnJwQldCYytmVWZPaEQ0YmhwUFVmSVFIN1dFcUZLCm5TMnQwSmh1d08zM2FoUDhLZVBWWDRDRkJ4VXc2SDhrd1dJUkh5dW9YbGFwMmVST1EycFRyYmtmVjJZbgpmWjZxV0huREJ5ZjN6bkFQQVM1ZnZ4b1RoKzBYTHc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
|
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNYRENDQVVTZ0F3SUJBZ0lVTDVaZ0toTEI1YUt3YXRuZE1sR25CZWZ3Qkxnd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZUZ3MHlNekF6TVRFd05UVTMKTWpaYUZ3MHlNekF6TVRFeE1qVTNNalphTUJneEZqQVVCZ05WQkFvTURXTnNkWE4wWlhJdWJHOWpZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFSYXIyQm1JWUFndkptT3JTcENlRlE3OUpQeQo4Y3c0K3pFRThmcXI1N2svdW1NcDVqWFpFR0JwZWRCSVkrcWZtSlBYRWlyYTlFOTJkU21rZks1QUtNV3gKbzJrd1p6QTFCZ05WSFJFRUxqQXNoaXB6Y0dsbVptVTZMeTkwY25WemRGOWtiMjFoYVc0dmJuTXZibUZ0ClpYTndZV05sTDNOaEwzTmhMVEF3RHdZRFZSMFBBUUgvQkFVREF3ZWdBREFkQmdOVkhTVUVGakFVQmdncgpCZ0VGQlFjREFRWUlLd1lCQlFVSEF3SXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2xKZVJpdmpLYVkKdm5TUHhjUXZPNTNxVFpiUUdHWFc5OHI5Qm1FWGUwYm5YeXZlMWJUVlNYcWVNMXZHdE1DalJGai91dE9VCkRwcHphQVJGRlRzenN2QWdJNStwNFhpbVU4U0FwTlhUYVZjWHkwcG04c2dIWUF6U2drMExBcW1wTWJxbwpvNDB6dmFxVk9nQ1F0c2Vobkg5SCtMQXd1WDl1T08vY2J5NnRidjhrSkhrMWZOTmZ6RTlxZVUwUGFhWWQKZjZXQzhkaWliRGJoN0tjR29rSG80NDMvT05Mb0tJZU9aTFJIbXBFdDdyYnprTDl4elNlNnVZaGQ1SlNGCk55dlY2T3Zoc1FXVVpqd1BmanUvUVJUTzFPdWgrUUZYaTAxNFpvUjRVRnRZaDRjcXphcUlpYVQ0MERyMgpNTHk4eEhJUzRmM1ltUXJEei9VN1pUSG9xaWFLaVBZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
|
||||||
"serialNumber": "588850990443535479077311695632745359443207891470",
|
"serialNumber": "271676055104741785552467469040731750696653685944",
|
||||||
"validFrom": "2023-03-11T05:57:26Z"
|
"validFrom": "2023-03-11T05:57:26Z"
|
||||||
},
|
},
|
||||||
|
],
|
||||||
|
"rootCerts": [
|
||||||
{
|
{
|
||||||
"expirationTime": "2296-12-24T18:31:28Z",
|
"expirationTime": "2299-01-17T23:35:46Z",
|
||||||
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lVQytjLzYwZStGMWVFKzdWcXhuYVdjT09abm1Fd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlNekF6TVRFeE9ETXgKTWpoYUdBOHlNamsyTVRJeU5ERTRNekV5T0Zvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1lQ1R4UEp0dWQwVXh3KwpDYWFkZFdEN2ErUUV1UVkrQlBUS0pkbk1lajBzQk1mVU1iVDE2SkxrWU5GZ3JqMVVWSEhjcFNvSUhvY3AKMnNkMzJTWTRiZGJva1Fjb3ArQmp0azU1alE0NktMWXNKZ2IyTnd2WW8xdDhFMWFldEpxRkdWN3JtZVpiCkZZZWFpKzZxN2lNamxiQ0dBdTcvVW5LSnNkR25hSlFnTjhkdTBUMUtEZ2pxS1B5SHFkc3U5a2JwQ3FpRQpYTVJtdzQvQkVoRkd6bUlEMm9VREtCMzZkdVZiZHpTRW01MVF2Z1U1SUxYSWd5VnJlak41Q0ZzQytXK3gKamVPWExFenRmSEZVb3FiM3dXaGtCdUV4bXI4MUoyaEdXOXBVTEoyd2tRZ2RmWFA3Z3RNa0I2RXlLdy94CkllYU5tTHpQSUdyWDAxelFZSWRaVHVEd01ZMENBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGRDhrNGYxYQpya3V3UitVUmhLQWUySVRaS1o3Vk1COEdBMVVkSXdRWU1CYUFGRDhrNGYxYXJrdXdSK1VSaEtBZTJJVFoKS1o3Vk1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLcm5BZVNzClNTSzMvOHp4K2h6ajZTRlhkSkE5Q1EwMkdFSjdoSHJLaWpHV1ZZZGRhbDlkQWJTNXRMZC8vcUtPOXVJcwpHZXR5L09rMmJSUTZjcXFNbGdkTnozam1tcmJTbFlXbUlYSTB5SEdtQ2lTYXpIc1hWYkVGNkl3eTN0Y1IKNHZvWFdLSUNXUGgrQzJjVGdMbWVaMEV1ekZ4cTR3Wm5DZjQwd0tvQUo5aTFhd1NyQm5FOWpXdG5wNEY0CmhXbkpUcEdreTVkUkFMRTBsLzJBYnJsMzh3Z2ZNOHI0SW90bVBUaEZLbkZlSUhVN2JRMXJZQW9xcGJBaApDdjBCTjVQakFRUldNazZib28zZjBha1MwN25sWUlWcVhoeHFjWW5PZ3drZGxUdFg5TXFHSXEyNm44bjEKTldXd25tS09qTnNrNnFSbXVsRWdlR080dnhUdlNKWWIraFU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJRENDQWdpZ0F3SUJBZ0lVUmxsdFV1bTJRbTE1dFQ5end1MmtwaDR2ZWRjd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlOVEEwTURNeU16TTEKTkRaYUdBOHlNams1TURFeE56SXpNelUwTmxvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxxVHVwVXlMK2pvd3FOZQpMQUxFbnlXYS9VNmgyaktCYzFYWUFtekR1MDN4S0VhM3JhU1ZzU05BYjFnN1hybmgxaTViNEg0enBtY3gKdStsZURlMDh4OEdOOFJRVjBoUlE0bkkvb0lseHhmc2NOWDZoNGwyVlRRSGNLcnFaYUFRQ2NDTVJuc2EzCk9tUFNPQmRPdTR2ZkFxeVVxMS9ici82TEczRWFQMDYxQ09lMzVWUTFhbkZJYXQrVWJ6bEcrZmpGbXZXbwpxZFdFMVFaekV4UWdXV3VKNjh6RjJBN25MTXVxc0k5cG8wR2FKcHhwajZnc0tIZ3NRZ1JoYWR4UlR3ejAKc0hrVE0rS216SkY0aTJ1NDJ3VHc5YWpzME5NZmQ5WjdBbWlvRXpnS0J3bURBdGQra04zUFdyby8vaHAxClRtOUVqTVFac2s3QmV6NVVyUDA4Y09yTXNOTUNBd0VBQWFOZ01GNHdIUVlEVlIwT0JCWUVGRzlmWGRqQgo0THN2RUpxWUxZNllQc2xWMWxXVU1COEdBMVVkSXdRWU1CYUFGRzlmWGRqQjRMc3ZFSnFZTFk2WVBzbFYKMWxXVU1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0N3WURWUjBQQkFRREFnSUVNQTBHQ1NxR1NJYjNEUUVCCkN3VUFBNElCQVFDaXVMUzljZkNjRDNDblNGbUpOays5MkNhRXEyUmxTMXF1dmdTa3Z5ckhZNTV4cUxrYQpCbUVDU3VCT2FCT3lHNlZMaFlPMy9OeDBwRERJbUJYak1GZTRJRVJER3QvQTA0am41S2RFTGRiK1laOWUKdUZvY09xdWpucnFVYkxXT2Zra21rd3E5TDFWNjNsKzAxdGRFUlhYa0ZuWHM4QTFhUnh6U2RCSVUrZEtKCmpyRHNtUzdnK1B5dWNEZzJ2WWtTcExoMTdhTm1RdndrOWRPMlpvVHdMcW1JSEZYcHhlNW1PdmlyRVE1RQpYL1JzRW9IY0hURTNGUk0xaDBVdUI1SjN4ekVoOXpHUFRwNWljS2d1TC9vUElmUXVJdWhaRCtWNWg3ZzcKS3k1RHlNVWNLT0l1T0c2SStLdDJYaWpHMld5UHRwWEJBTXJoU2ZaM2ViQWd0WjZJdjZxdgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
|
||||||
"serialNumber": "67955938755654933561614970125599055831405010529",
|
"serialNumber": "401623643733315109898464329860171355725264550359",
|
||||||
"validFrom": "2023-03-11T18:31:28Z"
|
"validFrom": "2025-04-03T23:35:46Z"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"identity": "spiffe://trust_domain/ns/namespace/sa/sa-0",
|
"identity": "spiffe://trust_domain/ns/namespace/sa/sa-0",
|
||||||
|
@ -572,15 +579,17 @@ mod tests {
|
||||||
"certChain": [
|
"certChain": [
|
||||||
{
|
{
|
||||||
"expirationTime": "2023-03-11T13:57:26Z",
|
"expirationTime": "2023-03-11T13:57:26Z",
|
||||||
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNXekNDQVVPZ0F3SUJBZ0lVWElQK29ySVF3dDZFUGRLSFdRU0VMOTM0bjdFd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZUZ3MHlNekF6TVRFd05qVTMKTWpaYUZ3MHlNekF6TVRFeE16VTNNalphTUJneEZqQVVCZ05WQkFvTURXTnNkWE4wWlhJdWJHOWpZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFSYXIyQm1JWUFndkptT3JTcENlRlE3OUpQeQo4Y3c0K3pFRThmcXI1N2svdW1NcDVqWFpFR0JwZWRCSVkrcWZtSlBYRWlyYTlFOTJkU21rZks1QUtNV3gKbzJnd1pqQTFCZ05WSFJFRUxqQXNoaXB6Y0dsbVptVTZMeTkwY25WemRGOWtiMjFoYVc0dmJuTXZibUZ0ClpYTndZV05sTDNOaEwzTmhMVEV3RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzRwpBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHV2tCY1plUEhrZisKSEpoazY5NHhDaHZLVENkVlRoNE9QNTBvWC9TdE0vK3NsazU0Y2RkcnRpOG0rdEFnai8wK0FLaFhpSTJaCjBNRFZPaEpOWTVRT1VXdkVBUWNYVTlPR2NCWmsyRWNGVW9BOC9RRzFpcVB3ejJJRGluakYrb3lTWExEdApFRGxPdW1Sa3VETWtyME51TGNZTlJuYUI0LzMreDAvdVlRM2M3TXpvUEtUQmZQdW1DY0wzbG5mR1dGR3kKc1d3b1p5V01CK1ZFdjYzK2psdTZDZmwzUGN1NEtFNHVhQUJiWHVvRkhjeU8yMW5sZVVvT3Z2VXhLZDdGCkxvQWNsVDNaSUI3dzNUcXE2MFR3UlV6ZGZkQlA5UURabEVSL1JLTDZWbnBBUVZhbXZBWmNjZFVuTWZjOAppT0N6TWVqV2tweGxXL3MrMW1nMUxzQWxyYlJMdHc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
|
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNYRENDQVVTZ0F3SUJBZ0lVSlVGNVVGbU52OVhYQlFWaDFDbFk0VFNLRng4d0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZUZ3MHlNekF6TVRFd05qVTMKTWpaYUZ3MHlNekF6TVRFeE16VTNNalphTUJneEZqQVVCZ05WQkFvTURXTnNkWE4wWlhJdWJHOWpZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFSYXIyQm1JWUFndkptT3JTcENlRlE3OUpQeQo4Y3c0K3pFRThmcXI1N2svdW1NcDVqWFpFR0JwZWRCSVkrcWZtSlBYRWlyYTlFOTJkU21rZks1QUtNV3gKbzJrd1p6QTFCZ05WSFJFRUxqQXNoaXB6Y0dsbVptVTZMeTkwY25WemRGOWtiMjFoYVc0dmJuTXZibUZ0ClpYTndZV05sTDNOaEwzTmhMVEV3RHdZRFZSMFBBUUgvQkFVREF3ZWdBREFkQmdOVkhTVUVGakFVQmdncgpCZ0VGQlFjREFRWUlLd1lCQlFVSEF3SXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBSWdscTIvNnJyWlIKa25UUmZqM201SnU0MmFycGlxVVNHR3A2Mks3L09zeDc5RmovZDBwdU1hMzFkMFhwS0w3N0F2QmtvcVk3CjFWejJKOHRzUkZhZEM1ZmFtQlRXdUN4OUE5R0V3WHEzQmllK2l1a2RGWjZqUTRsb2EybHVWWWFZanhUbgpqR3NLQm0xR0hwMHpacFFVNkdENzA2c2RaTjltaGlqWVA4RnpxWGg1TTlzTzQ4UldveElOUmhXd0pKejQKYUlaZWlRTlJWdkRNZm93MGtxdFFtN001TnQzanA2RkJjTzhGQkJvV0p3MXNCSitLME5XN0VuUG82Yyt0CjE5MkZ0Nmx0eXpvV1BSMnVIYUZENi9FRjZVTkowcTN1ejZicjNYRFg1Q3lrRjQxSEMrNHRSMjQ3RWhmZgpGQkpyUVc0dXAxdHAzdnZGYTdHYnl6bkZWUEc4M3dvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
|
||||||
"serialNumber": "528170730419860468572163268563070820131458817969",
|
"serialNumber": "212692774886610945930036647276614034927450199839",
|
||||||
"validFrom": "2023-03-11T06:57:26Z"
|
"validFrom": "2023-03-11T06:57:26Z"
|
||||||
},
|
},
|
||||||
|
],
|
||||||
|
"rootCerts": [
|
||||||
{
|
{
|
||||||
"expirationTime": "2296-12-24T18:31:28Z",
|
"expirationTime": "2299-01-17T23:35:46Z",
|
||||||
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lVQytjLzYwZStGMWVFKzdWcXhuYVdjT09abm1Fd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlNekF6TVRFeE9ETXgKTWpoYUdBOHlNamsyTVRJeU5ERTRNekV5T0Zvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1lQ1R4UEp0dWQwVXh3KwpDYWFkZFdEN2ErUUV1UVkrQlBUS0pkbk1lajBzQk1mVU1iVDE2SkxrWU5GZ3JqMVVWSEhjcFNvSUhvY3AKMnNkMzJTWTRiZGJva1Fjb3ArQmp0azU1alE0NktMWXNKZ2IyTnd2WW8xdDhFMWFldEpxRkdWN3JtZVpiCkZZZWFpKzZxN2lNamxiQ0dBdTcvVW5LSnNkR25hSlFnTjhkdTBUMUtEZ2pxS1B5SHFkc3U5a2JwQ3FpRQpYTVJtdzQvQkVoRkd6bUlEMm9VREtCMzZkdVZiZHpTRW01MVF2Z1U1SUxYSWd5VnJlak41Q0ZzQytXK3gKamVPWExFenRmSEZVb3FiM3dXaGtCdUV4bXI4MUoyaEdXOXBVTEoyd2tRZ2RmWFA3Z3RNa0I2RXlLdy94CkllYU5tTHpQSUdyWDAxelFZSWRaVHVEd01ZMENBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGRDhrNGYxYQpya3V3UitVUmhLQWUySVRaS1o3Vk1COEdBMVVkSXdRWU1CYUFGRDhrNGYxYXJrdXdSK1VSaEtBZTJJVFoKS1o3Vk1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLcm5BZVNzClNTSzMvOHp4K2h6ajZTRlhkSkE5Q1EwMkdFSjdoSHJLaWpHV1ZZZGRhbDlkQWJTNXRMZC8vcUtPOXVJcwpHZXR5L09rMmJSUTZjcXFNbGdkTnozam1tcmJTbFlXbUlYSTB5SEdtQ2lTYXpIc1hWYkVGNkl3eTN0Y1IKNHZvWFdLSUNXUGgrQzJjVGdMbWVaMEV1ekZ4cTR3Wm5DZjQwd0tvQUo5aTFhd1NyQm5FOWpXdG5wNEY0CmhXbkpUcEdreTVkUkFMRTBsLzJBYnJsMzh3Z2ZNOHI0SW90bVBUaEZLbkZlSUhVN2JRMXJZQW9xcGJBaApDdjBCTjVQakFRUldNazZib28zZjBha1MwN25sWUlWcVhoeHFjWW5PZ3drZGxUdFg5TXFHSXEyNm44bjEKTldXd25tS09qTnNrNnFSbXVsRWdlR080dnhUdlNKWWIraFU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
|
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJRENDQWdpZ0F3SUJBZ0lVUmxsdFV1bTJRbTE1dFQ5end1MmtwaDR2ZWRjd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlOVEEwTURNeU16TTEKTkRaYUdBOHlNams1TURFeE56SXpNelUwTmxvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxxVHVwVXlMK2pvd3FOZQpMQUxFbnlXYS9VNmgyaktCYzFYWUFtekR1MDN4S0VhM3JhU1ZzU05BYjFnN1hybmgxaTViNEg0enBtY3gKdStsZURlMDh4OEdOOFJRVjBoUlE0bkkvb0lseHhmc2NOWDZoNGwyVlRRSGNLcnFaYUFRQ2NDTVJuc2EzCk9tUFNPQmRPdTR2ZkFxeVVxMS9ici82TEczRWFQMDYxQ09lMzVWUTFhbkZJYXQrVWJ6bEcrZmpGbXZXbwpxZFdFMVFaekV4UWdXV3VKNjh6RjJBN25MTXVxc0k5cG8wR2FKcHhwajZnc0tIZ3NRZ1JoYWR4UlR3ejAKc0hrVE0rS216SkY0aTJ1NDJ3VHc5YWpzME5NZmQ5WjdBbWlvRXpnS0J3bURBdGQra04zUFdyby8vaHAxClRtOUVqTVFac2s3QmV6NVVyUDA4Y09yTXNOTUNBd0VBQWFOZ01GNHdIUVlEVlIwT0JCWUVGRzlmWGRqQgo0THN2RUpxWUxZNllQc2xWMWxXVU1COEdBMVVkSXdRWU1CYUFGRzlmWGRqQjRMc3ZFSnFZTFk2WVBzbFYKMWxXVU1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0N3WURWUjBQQkFRREFnSUVNQTBHQ1NxR1NJYjNEUUVCCkN3VUFBNElCQVFDaXVMUzljZkNjRDNDblNGbUpOays5MkNhRXEyUmxTMXF1dmdTa3Z5ckhZNTV4cUxrYQpCbUVDU3VCT2FCT3lHNlZMaFlPMy9OeDBwRERJbUJYak1GZTRJRVJER3QvQTA0am41S2RFTGRiK1laOWUKdUZvY09xdWpucnFVYkxXT2Zra21rd3E5TDFWNjNsKzAxdGRFUlhYa0ZuWHM4QTFhUnh6U2RCSVUrZEtKCmpyRHNtUzdnK1B5dWNEZzJ2WWtTcExoMTdhTm1RdndrOWRPMlpvVHdMcW1JSEZYcHhlNW1PdmlyRVE1RQpYL1JzRW9IY0hURTNGUk0xaDBVdUI1SjN4ekVoOXpHUFRwNWljS2d1TC9vUElmUXVJdWhaRCtWNWg3ZzcKS3k1RHlNVWNLT0l1T0c2SStLdDJYaWpHMld5UHRwWEJBTXJoU2ZaM2ViQWd0WjZJdjZxdgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
|
||||||
"serialNumber": "67955938755654933561614970125599055831405010529",
|
"serialNumber": "401623643733315109898464329860171355725264550359",
|
||||||
"validFrom": "2023-03-11T18:31:28Z"
|
"validFrom": "2025-04-03T23:35:46Z"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"identity": "spiffe://trust_domain/ns/namespace/sa/sa-1",
|
"identity": "spiffe://trust_domain/ns/namespace/sa/sa-1",
|
||||||
|
@ -659,6 +668,7 @@ mod tests {
|
||||||
subzone: "subezone".to_string(),
|
subzone: "subezone".to_string(),
|
||||||
}),
|
}),
|
||||||
extensions: Default::default(),
|
extensions: Default::default(),
|
||||||
|
capacity: Default::default(),
|
||||||
// ..Default::default() // intentionally don't default. we want all fields populated
|
// ..Default::default() // intentionally don't default. we want all fields populated
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -847,17 +857,23 @@ mod tests {
|
||||||
|
|
||||||
let resp = change_log_level(true, "trace");
|
let resp = change_log_level(true, "trace");
|
||||||
let resp_str = get_response_str(resp).await;
|
let resp_str = get_response_str(resp).await;
|
||||||
assert!(resp_str
|
assert!(
|
||||||
.contains("current log level is hickory_server::server::server_future=off,trace\n"));
|
resp_str
|
||||||
|
.contains("current log level is hickory_server::server::server_future=off,trace\n")
|
||||||
|
);
|
||||||
|
|
||||||
let resp = change_log_level(true, "info");
|
let resp = change_log_level(true, "info");
|
||||||
let resp_str = get_response_str(resp).await;
|
let resp_str = get_response_str(resp).await;
|
||||||
assert!(resp_str
|
assert!(
|
||||||
.contains("current log level is hickory_server::server::server_future=off,info\n"));
|
resp_str
|
||||||
|
.contains("current log level is hickory_server::server::server_future=off,info\n")
|
||||||
|
);
|
||||||
|
|
||||||
let resp = change_log_level(true, "off");
|
let resp = change_log_level(true, "off");
|
||||||
let resp_str = get_response_str(resp).await;
|
let resp_str = get_response_str(resp).await;
|
||||||
assert!(resp_str
|
assert!(
|
||||||
.contains("current log level is hickory_server::server::server_future=off,off\n"));
|
resp_str
|
||||||
|
.contains("current log level is hickory_server::server::server_future=off,off\n")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
26
src/app.rs
|
@ -22,10 +22,10 @@ use prometheus_client::registry::Registry;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{mpsc, Arc};
|
use std::sync::{Arc, mpsc};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use tokio::task::JoinSet;
|
use tokio::task::JoinSet;
|
||||||
use tracing::{warn, Instrument};
|
use tracing::{Instrument, warn};
|
||||||
|
|
||||||
use crate::identity::SecretManager;
|
use crate::identity::SecretManager;
|
||||||
use crate::state::ProxyStateManager;
|
use crate::state::ProxyStateManager;
|
||||||
|
@ -136,6 +136,25 @@ pub async fn build_with_cert(
|
||||||
|
|
||||||
if config.proxy_mode == config::ProxyMode::Shared {
|
if config.proxy_mode == config::ProxyMode::Shared {
|
||||||
tracing::info!("shared proxy mode - in-pod mode enabled");
|
tracing::info!("shared proxy mode - in-pod mode enabled");
|
||||||
|
|
||||||
|
// Create ztunnel inbound listener only if its specific identity and workload info are configured.
|
||||||
|
if let Some(inbound) = proxy_gen.create_ztunnel_self_proxy_listener().await? {
|
||||||
|
// Run the inbound listener in the data plane worker pool
|
||||||
|
let mut xds_rx_for_inbound = xds_rx.clone();
|
||||||
|
data_plane_pool.send(DataPlaneTask {
|
||||||
|
block_shutdown: true,
|
||||||
|
fut: Box::pin(async move {
|
||||||
|
tracing::info!("Starting ztunnel inbound listener task");
|
||||||
|
let _ = xds_rx_for_inbound.changed().await;
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
inbound.run().in_current_span().await;
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}),
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
let run_future = init_inpod_proxy_mgr(
|
let run_future = init_inpod_proxy_mgr(
|
||||||
&mut registry,
|
&mut registry,
|
||||||
&mut admin_server,
|
&mut admin_server,
|
||||||
|
@ -247,7 +266,8 @@ fn new_data_plane_pool(num_worker_threads: usize) -> mpsc::Sender<DataPlaneTask>
|
||||||
.thread_name_fn(|| {
|
.thread_name_fn(|| {
|
||||||
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
format!("ztunnel-proxy-{id}")
|
// Thread name can only be 16 chars so keep it short
|
||||||
|
format!("ztunnel-{id}")
|
||||||
})
|
})
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.build()
|
.build()
|
||||||
|
|
|
@ -25,6 +25,8 @@ pub struct Baggage {
|
||||||
pub workload_name: Option<Strng>,
|
pub workload_name: Option<Strng>,
|
||||||
pub service_name: Option<Strng>,
|
pub service_name: Option<Strng>,
|
||||||
pub revision: Option<Strng>,
|
pub revision: Option<Strng>,
|
||||||
|
pub region: Option<Strng>,
|
||||||
|
pub zone: Option<Strng>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_baggage_header(headers: GetAll<HeaderValue>) -> Result<Baggage, ToStrError> {
|
pub fn parse_baggage_header(headers: GetAll<HeaderValue>) -> Result<Baggage, ToStrError> {
|
||||||
|
@ -49,6 +51,9 @@ pub fn parse_baggage_header(headers: GetAll<HeaderValue>) -> Result<Baggage, ToS
|
||||||
| "k8s.job.name" => baggage.workload_name = val,
|
| "k8s.job.name" => baggage.workload_name = val,
|
||||||
"service.name" => baggage.service_name = val,
|
"service.name" => baggage.service_name = val,
|
||||||
"service.version" => baggage.revision = val,
|
"service.version" => baggage.revision = val,
|
||||||
|
// https://opentelemetry.io/docs/specs/semconv/attributes-registry/cloud/
|
||||||
|
"cloud.region" => baggage.region = val,
|
||||||
|
"cloud.availability_zone" => baggage.zone = val,
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -59,7 +64,7 @@ pub fn parse_baggage_header(headers: GetAll<HeaderValue>) -> Result<Baggage, ToS
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use hyper::{http::HeaderValue, HeaderMap};
|
use hyper::{HeaderMap, http::HeaderValue};
|
||||||
|
|
||||||
use crate::proxy::BAGGAGE_HEADER;
|
use crate::proxy::BAGGAGE_HEADER;
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ use crate::config;
|
||||||
use crate::config::ProxyMode;
|
use crate::config::ProxyMode;
|
||||||
use crate::identity::Priority::Warmup;
|
use crate::identity::Priority::Warmup;
|
||||||
use crate::identity::{Identity, Request, SecretManager};
|
use crate::identity::{Identity, Request, SecretManager};
|
||||||
use crate::state::workload::{Protocol, Workload};
|
use crate::state::workload::{InboundProtocol, Workload};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tracing::{debug, error, info};
|
use tracing::{debug, error, info};
|
||||||
|
@ -96,7 +96,7 @@ impl CertFetcherImpl {
|
||||||
// We only get certs for our own node
|
// We only get certs for our own node
|
||||||
Some(w.node.as_ref()) == self.local_node.as_deref() &&
|
Some(w.node.as_ref()) == self.local_node.as_deref() &&
|
||||||
// If it doesn't support HBONE it *probably* doesn't need a cert.
|
// If it doesn't support HBONE it *probably* doesn't need a cert.
|
||||||
(w.native_tunnel || w.protocol == Protocol::HBONE)
|
(w.native_tunnel || w.protocol == InboundProtocol::HBONE)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
297
src/config.rs
|
@ -12,6 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use serde::ser::SerializeSeq;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||||
|
@ -20,12 +21,13 @@ use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{cmp, env, fs};
|
use std::{cmp, env, fs};
|
||||||
|
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue};
|
||||||
|
|
||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hickory_resolver::config::{LookupIpStrategy, ResolverConfig, ResolverOpts};
|
use hickory_resolver::config::{LookupIpStrategy, ResolverConfig, ResolverOpts};
|
||||||
use hyper::http::uri::InvalidUri;
|
|
||||||
use hyper::Uri;
|
use hyper::Uri;
|
||||||
|
use hyper::http::uri::InvalidUri;
|
||||||
|
|
||||||
use crate::strng::Strng;
|
use crate::strng::Strng;
|
||||||
use crate::{identity, state};
|
use crate::{identity, state};
|
||||||
|
@ -52,10 +54,12 @@ const LOCAL_XDS_PATH: &str = "LOCAL_XDS_PATH";
|
||||||
const LOCAL_XDS: &str = "LOCAL_XDS";
|
const LOCAL_XDS: &str = "LOCAL_XDS";
|
||||||
const XDS_ON_DEMAND: &str = "XDS_ON_DEMAND";
|
const XDS_ON_DEMAND: &str = "XDS_ON_DEMAND";
|
||||||
const XDS_ADDRESS: &str = "XDS_ADDRESS";
|
const XDS_ADDRESS: &str = "XDS_ADDRESS";
|
||||||
|
const PREFERED_SERVICE_NAMESPACE: &str = "PREFERED_SERVICE_NAMESPACE";
|
||||||
const CA_ADDRESS: &str = "CA_ADDRESS";
|
const CA_ADDRESS: &str = "CA_ADDRESS";
|
||||||
const SECRET_TTL: &str = "SECRET_TTL";
|
const SECRET_TTL: &str = "SECRET_TTL";
|
||||||
const FAKE_CA: &str = "FAKE_CA";
|
const FAKE_CA: &str = "FAKE_CA";
|
||||||
const ZTUNNEL_WORKER_THREADS: &str = "ZTUNNEL_WORKER_THREADS";
|
const ZTUNNEL_WORKER_THREADS: &str = "ZTUNNEL_WORKER_THREADS";
|
||||||
|
const ZTUNNEL_CPU_LIMIT: &str = "ZTUNNEL_CPU_LIMIT";
|
||||||
const POOL_MAX_STREAMS_PER_CONNECTION: &str = "POOL_MAX_STREAMS_PER_CONNECTION";
|
const POOL_MAX_STREAMS_PER_CONNECTION: &str = "POOL_MAX_STREAMS_PER_CONNECTION";
|
||||||
const POOL_UNUSED_RELEASE_TIMEOUT: &str = "POOL_UNUSED_RELEASE_TIMEOUT";
|
const POOL_UNUSED_RELEASE_TIMEOUT: &str = "POOL_UNUSED_RELEASE_TIMEOUT";
|
||||||
// CONNECTION_TERMINATION_DEADLINE configures an explicit deadline
|
// CONNECTION_TERMINATION_DEADLINE configures an explicit deadline
|
||||||
|
@ -68,6 +72,10 @@ const ENABLE_ORIG_SRC: &str = "ENABLE_ORIG_SRC";
|
||||||
const PROXY_CONFIG: &str = "PROXY_CONFIG";
|
const PROXY_CONFIG: &str = "PROXY_CONFIG";
|
||||||
const IPV6_ENABLED: &str = "IPV6_ENABLED";
|
const IPV6_ENABLED: &str = "IPV6_ENABLED";
|
||||||
|
|
||||||
|
const HTTP2_STREAM_WINDOW_SIZE: &str = "HTTP2_STREAM_WINDOW_SIZE";
|
||||||
|
const HTTP2_CONNECTION_WINDOW_SIZE: &str = "HTTP2_CONNECTION_WINDOW_SIZE";
|
||||||
|
const HTTP2_FRAME_SIZE: &str = "HTTP2_FRAME_SIZE";
|
||||||
|
|
||||||
const UNSTABLE_ENABLE_SOCKS5: &str = "UNSTABLE_ENABLE_SOCKS5";
|
const UNSTABLE_ENABLE_SOCKS5: &str = "UNSTABLE_ENABLE_SOCKS5";
|
||||||
|
|
||||||
const DEFAULT_WORKER_THREADS: u16 = 2;
|
const DEFAULT_WORKER_THREADS: u16 = 2;
|
||||||
|
@ -88,6 +96,9 @@ const ISTIO_META_PREFIX: &str = "ISTIO_META_";
|
||||||
const DNS_CAPTURE_METADATA: &str = "DNS_CAPTURE";
|
const DNS_CAPTURE_METADATA: &str = "DNS_CAPTURE";
|
||||||
const DNS_PROXY_ADDR_METADATA: &str = "DNS_PROXY_ADDR";
|
const DNS_PROXY_ADDR_METADATA: &str = "DNS_PROXY_ADDR";
|
||||||
|
|
||||||
|
const ISTIO_XDS_HEADER_PREFIX: &str = "XDS_HEADER_";
|
||||||
|
const ISTIO_CA_HEADER_PREFIX: &str = "CA_HEADER_";
|
||||||
|
|
||||||
/// Fetch the XDS/CA root cert file path based on below constants
|
/// Fetch the XDS/CA root cert file path based on below constants
|
||||||
const XDS_ROOT_CA_ENV: &str = "XDS_ROOT_CA";
|
const XDS_ROOT_CA_ENV: &str = "XDS_ROOT_CA";
|
||||||
const CA_ROOT_CA_ENV: &str = "CA_ROOT_CA";
|
const CA_ROOT_CA_ENV: &str = "CA_ROOT_CA";
|
||||||
|
@ -101,6 +112,8 @@ const CERT_SYSTEM: &str = "SYSTEM";
|
||||||
const PROXY_MODE_DEDICATED: &str = "dedicated";
|
const PROXY_MODE_DEDICATED: &str = "dedicated";
|
||||||
const PROXY_MODE_SHARED: &str = "shared";
|
const PROXY_MODE_SHARED: &str = "shared";
|
||||||
|
|
||||||
|
const LOCALHOST_APP_TUNNEL: &str = "LOCALHOST_APP_TUNNEL";
|
||||||
|
|
||||||
#[derive(serde::Serialize, Clone, Debug, PartialEq, Eq)]
|
#[derive(serde::Serialize, Clone, Debug, PartialEq, Eq)]
|
||||||
pub enum RootCert {
|
pub enum RootCert {
|
||||||
File(PathBuf),
|
File(PathBuf),
|
||||||
|
@ -134,6 +147,37 @@ pub enum ProxyMode {
|
||||||
Dedicated,
|
Dedicated,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct MetadataVector {
|
||||||
|
pub vec: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl serde::Serialize for MetadataVector {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
let mut seq: <S as serde::Serializer>::SerializeSeq =
|
||||||
|
serializer.serialize_seq(Some(self.vec.len()))?;
|
||||||
|
|
||||||
|
for (k, v) in &self.vec {
|
||||||
|
let serialized_key = k.to_string();
|
||||||
|
|
||||||
|
match v.to_str() {
|
||||||
|
Ok(serialized_val) => {
|
||||||
|
seq.serialize_element(&(serialized_key, serialized_val))?;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
return Err(serde::ser::Error::custom(
|
||||||
|
"failed to serialize metadata value",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq.end()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(serde::Serialize, Clone, Debug)]
|
#[derive(serde::Serialize, Clone, Debug)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
|
@ -199,6 +243,12 @@ pub struct Config {
|
||||||
// Allow custom alternative XDS hostname verification
|
// Allow custom alternative XDS hostname verification
|
||||||
pub alt_xds_hostname: Option<String>,
|
pub alt_xds_hostname: Option<String>,
|
||||||
|
|
||||||
|
/// Prefered service namespace to use for service resolution.
|
||||||
|
/// If unset, local namespaces is preferred and other namespaces have equal priority.
|
||||||
|
/// If set, the local namespace is preferred, then the defined prefered_service_namespace
|
||||||
|
/// and finally other namespaces at an equal priority.
|
||||||
|
pub prefered_service_namespace: Option<String>,
|
||||||
|
|
||||||
/// TTL for CSR requests
|
/// TTL for CSR requests
|
||||||
pub secret_ttl: Duration,
|
pub secret_ttl: Duration,
|
||||||
/// YAML config for local XDS workloads
|
/// YAML config for local XDS workloads
|
||||||
|
@ -246,6 +296,21 @@ pub struct Config {
|
||||||
pub packet_mark: Option<u32>,
|
pub packet_mark: Option<u32>,
|
||||||
|
|
||||||
pub socket_config: SocketConfig,
|
pub socket_config: SocketConfig,
|
||||||
|
|
||||||
|
// Headers to be added to XDS discovery requests
|
||||||
|
pub xds_headers: MetadataVector,
|
||||||
|
|
||||||
|
// Headers to be added to certificate requests
|
||||||
|
pub ca_headers: MetadataVector,
|
||||||
|
|
||||||
|
// If true, when AppTunnel is set for
|
||||||
|
pub localhost_app_tunnel: bool,
|
||||||
|
|
||||||
|
pub ztunnel_identity: Option<identity::Identity>,
|
||||||
|
|
||||||
|
pub ztunnel_workload: Option<state::WorkloadInfo>,
|
||||||
|
|
||||||
|
pub ipv6_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(serde::Serialize, Clone, Copy, Debug)]
|
#[derive(serde::Serialize, Clone, Copy, Debug)]
|
||||||
|
@ -281,6 +346,10 @@ pub enum Error {
|
||||||
InvalidUri(#[from] Arc<InvalidUri>),
|
InvalidUri(#[from] Arc<InvalidUri>),
|
||||||
#[error("invalid configuration: {0}")]
|
#[error("invalid configuration: {0}")]
|
||||||
InvalidState(String),
|
InvalidState(String),
|
||||||
|
#[error("failed to parse header key: {0}")]
|
||||||
|
InvalidHeaderKey(String),
|
||||||
|
#[error("failed to parse header value: {0}")]
|
||||||
|
InvalidHeaderValue(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<InvalidUri> for Error {
|
impl From<InvalidUri> for Error {
|
||||||
|
@ -326,6 +395,82 @@ fn parse_args() -> String {
|
||||||
cli_args[1..].join(" ")
|
cli_args[1..].join(" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn parse_headers(prefix: &str) -> Result<MetadataVector, Error> {
|
||||||
|
let mut metadata: MetadataVector = MetadataVector { vec: Vec::new() };
|
||||||
|
|
||||||
|
for (key, value) in env::vars() {
|
||||||
|
let stripped_key: Option<&str> = key.strip_prefix(prefix);
|
||||||
|
match stripped_key {
|
||||||
|
Some(stripped_key) => {
|
||||||
|
// attempt to parse the stripped key
|
||||||
|
let metadata_key = AsciiMetadataKey::from_str(stripped_key)
|
||||||
|
.map_err(|_| Error::InvalidHeaderKey(key))?;
|
||||||
|
// attempt to parse the value
|
||||||
|
let metadata_value = AsciiMetadataValue::from_str(&value)
|
||||||
|
.map_err(|_| Error::InvalidHeaderValue(value))?;
|
||||||
|
metadata.vec.push((metadata_key, metadata_value));
|
||||||
|
}
|
||||||
|
None => continue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_cpu_count() -> Result<usize, Error> {
|
||||||
|
// Allow overriding the count with an env var. This can be used to pass the CPU limit on Kubernetes
|
||||||
|
// from the downward API.
|
||||||
|
// Note the downward API will return the total thread count ("logical cores") if no limit is set,
|
||||||
|
// so it is really the same as num_cpus.
|
||||||
|
// We allow num_cpus for cases its not set (not on Kubernetes, etc).
|
||||||
|
match parse::<usize>(ZTUNNEL_CPU_LIMIT)? {
|
||||||
|
Some(limit) => Ok(limit),
|
||||||
|
// This is *logical cores*
|
||||||
|
None => Ok(num_cpus::get()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse worker threads configuration, supporting both fixed numbers and percentages
|
||||||
|
fn parse_worker_threads(default: usize) -> Result<usize, Error> {
|
||||||
|
match parse::<String>(ZTUNNEL_WORKER_THREADS)? {
|
||||||
|
Some(value) => {
|
||||||
|
if let Some(percent_str) = value.strip_suffix('%') {
|
||||||
|
// Parse as percentage
|
||||||
|
let percent: f64 = percent_str.parse().map_err(|e| {
|
||||||
|
Error::EnvVar(
|
||||||
|
ZTUNNEL_WORKER_THREADS.to_string(),
|
||||||
|
value.clone(),
|
||||||
|
format!("invalid percentage: {e}"),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if percent <= 0.0 || percent > 100.0 {
|
||||||
|
return Err(Error::EnvVar(
|
||||||
|
ZTUNNEL_WORKER_THREADS.to_string(),
|
||||||
|
value,
|
||||||
|
"percentage must be between 0 and 100".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let cpu_count = get_cpu_count()?;
|
||||||
|
// Round up, minimum of 1
|
||||||
|
let threads = ((cpu_count as f64 * percent / 100.0).ceil() as usize).max(1);
|
||||||
|
Ok(threads)
|
||||||
|
} else {
|
||||||
|
// Parse as fixed number
|
||||||
|
value.parse::<usize>().map_err(|e| {
|
||||||
|
Error::EnvVar(
|
||||||
|
ZTUNNEL_WORKER_THREADS.to_string(),
|
||||||
|
value,
|
||||||
|
format!("invalid number: {e}"),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => Ok(default),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn parse_config() -> Result<Config, Error> {
|
pub fn parse_config() -> Result<Config, Error> {
|
||||||
let pc = parse_proxy_config()?;
|
let pc = parse_proxy_config()?;
|
||||||
construct_config(pc)
|
construct_config(pc)
|
||||||
|
@ -365,6 +510,14 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
.or_else(|| Some(default_istiod_address.clone())),
|
.or_else(|| Some(default_istiod_address.clone())),
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
|
let prefered_service_namespace = match parse::<String>(PREFERED_SERVICE_NAMESPACE) {
|
||||||
|
Ok(ns) => ns,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(err=?e, "failed to parse {PREFERED_SERVICE_NAMESPACE}, continuing with default behavior");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let istio_meta_cluster_id = ISTIO_META_PREFIX.to_owned() + CLUSTER_ID;
|
let istio_meta_cluster_id = ISTIO_META_PREFIX.to_owned() + CLUSTER_ID;
|
||||||
let cluster_id: String = match parse::<String>(&istio_meta_cluster_id)? {
|
let cluster_id: String = match parse::<String>(&istio_meta_cluster_id)? {
|
||||||
Some(id) => id,
|
Some(id) => id,
|
||||||
|
@ -446,7 +599,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
// on a pod-by-pod basis.
|
// on a pod-by-pod basis.
|
||||||
let dns_proxy_addr: Address = match pc.proxy_metadata.get(DNS_PROXY_ADDR_METADATA) {
|
let dns_proxy_addr: Address = match pc.proxy_metadata.get(DNS_PROXY_ADDR_METADATA) {
|
||||||
Some(dns_addr) => Address::new(ipv6_localhost_enabled, dns_addr)
|
Some(dns_addr) => Address::new(ipv6_localhost_enabled, dns_addr)
|
||||||
.unwrap_or_else(|_| panic!("failed to parse DNS_PROXY_ADDR: {}", dns_addr)),
|
.unwrap_or_else(|_| panic!("failed to parse DNS_PROXY_ADDR: {dns_addr}")),
|
||||||
None => Address::Localhost(ipv6_localhost_enabled, DEFAULT_DNS_PORT),
|
None => Address::Localhost(ipv6_localhost_enabled, DEFAULT_DNS_PORT),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -484,7 +637,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
format!(
|
format!(
|
||||||
"PROXY_MODE must be one of {PROXY_MODE_DEDICATED}, {PROXY_MODE_SHARED}"
|
"PROXY_MODE must be one of {PROXY_MODE_DEDICATED}, {PROXY_MODE_SHARED}"
|
||||||
),
|
),
|
||||||
))
|
));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => ProxyMode::Shared,
|
None => ProxyMode::Shared,
|
||||||
|
@ -518,7 +671,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
(Some(_), Some(_)) => {
|
(Some(_), Some(_)) => {
|
||||||
return Err(Error::InvalidState(format!(
|
return Err(Error::InvalidState(format!(
|
||||||
"only one of {LOCAL_XDS_PATH} or {LOCAL_XDS} may be set"
|
"only one of {LOCAL_XDS_PATH} or {LOCAL_XDS} may be set"
|
||||||
)))
|
)));
|
||||||
}
|
}
|
||||||
(Some(f), _) => Some(ConfigSource::File(f)),
|
(Some(f), _) => Some(ConfigSource::File(f)),
|
||||||
(_, Some(d)) => Some(ConfigSource::Static(Bytes::from(d))),
|
(_, Some(d)) => Some(ConfigSource::Static(Bytes::from(d))),
|
||||||
|
@ -527,6 +680,29 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
|
|
||||||
let socket_config_defaults = SocketConfig::default();
|
let socket_config_defaults = SocketConfig::default();
|
||||||
|
|
||||||
|
// Read ztunnel identity and workload info from Downward API if available
|
||||||
|
let (ztunnel_identity, ztunnel_workload) = match (
|
||||||
|
parse::<String>("POD_NAMESPACE")?,
|
||||||
|
parse::<String>("SERVICE_ACCOUNT")?,
|
||||||
|
parse::<String>("POD_NAME")?,
|
||||||
|
) {
|
||||||
|
(Some(namespace), Some(service_account), Some(pod_name)) => {
|
||||||
|
let trust_domain = std::env::var("TRUST_DOMAIN")
|
||||||
|
.unwrap_or_else(|_| crate::identity::manager::DEFAULT_TRUST_DOMAIN.to_string());
|
||||||
|
|
||||||
|
let identity = identity::Identity::from_parts(
|
||||||
|
trust_domain.into(),
|
||||||
|
namespace.clone().into(),
|
||||||
|
service_account.clone().into(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let workload = state::WorkloadInfo::new(pod_name, namespace, service_account);
|
||||||
|
|
||||||
|
(Some(identity), Some(workload))
|
||||||
|
}
|
||||||
|
_ => (None, None),
|
||||||
|
};
|
||||||
|
|
||||||
validate_config(Config {
|
validate_config(Config {
|
||||||
proxy: parse_default(ENABLE_PROXY, true)?,
|
proxy: parse_default(ENABLE_PROXY, true)?,
|
||||||
// Enable by default; running the server is not an issue, clients still need to opt-in to sending their
|
// Enable by default; running the server is not an issue, clients still need to opt-in to sending their
|
||||||
|
@ -534,7 +710,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
dns_proxy: pc
|
dns_proxy: pc
|
||||||
.proxy_metadata
|
.proxy_metadata
|
||||||
.get(DNS_CAPTURE_METADATA)
|
.get(DNS_CAPTURE_METADATA)
|
||||||
.map_or(true, |value| value.to_lowercase() == "true"),
|
.is_none_or(|value| value.to_lowercase() == "true"),
|
||||||
|
|
||||||
pool_max_streams_per_conn: parse_default(
|
pool_max_streams_per_conn: parse_default(
|
||||||
POOL_MAX_STREAMS_PER_CONNECTION,
|
POOL_MAX_STREAMS_PER_CONNECTION,
|
||||||
|
@ -546,9 +722,15 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
DEFAULT_POOL_UNUSED_RELEASE_TIMEOUT,
|
DEFAULT_POOL_UNUSED_RELEASE_TIMEOUT,
|
||||||
)?,
|
)?,
|
||||||
|
|
||||||
window_size: 4 * 1024 * 1024,
|
// window size: per-stream limit
|
||||||
connection_window_size: 4 * 1024 * 1024,
|
window_size: parse_default(HTTP2_STREAM_WINDOW_SIZE, 4 * 1024 * 1024)?,
|
||||||
frame_size: 1024 * 1024,
|
// connection window size: per connection.
|
||||||
|
// Setting this to the same value as window_size can introduce deadlocks in some applications
|
||||||
|
// where clients do not read data on streamA until they receive data on streamB.
|
||||||
|
// If streamA consumes the entire connection window, we enter a deadlock.
|
||||||
|
// A 4x limit should be appropriate without introducing too much potential buffering.
|
||||||
|
connection_window_size: parse_default(HTTP2_CONNECTION_WINDOW_SIZE, 16 * 1024 * 1024)?,
|
||||||
|
frame_size: parse_default(HTTP2_FRAME_SIZE, 1024 * 1024)?,
|
||||||
|
|
||||||
self_termination_deadline: match parse_duration(CONNECTION_TERMINATION_DEADLINE)? {
|
self_termination_deadline: match parse_duration(CONNECTION_TERMINATION_DEADLINE)? {
|
||||||
Some(period) => period,
|
Some(period) => period,
|
||||||
|
@ -602,6 +784,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
|
|
||||||
xds_address,
|
xds_address,
|
||||||
xds_root_cert,
|
xds_root_cert,
|
||||||
|
prefered_service_namespace,
|
||||||
ca_address,
|
ca_address,
|
||||||
ca_root_cert,
|
ca_root_cert,
|
||||||
alt_xds_hostname: parse(ALT_XDS_HOSTNAME)?,
|
alt_xds_hostname: parse(ALT_XDS_HOSTNAME)?,
|
||||||
|
@ -615,8 +798,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
fake_ca,
|
fake_ca,
|
||||||
auth,
|
auth,
|
||||||
|
|
||||||
num_worker_threads: parse_default(
|
num_worker_threads: parse_worker_threads(
|
||||||
ZTUNNEL_WORKER_THREADS,
|
|
||||||
pc.concurrency.unwrap_or(DEFAULT_WORKER_THREADS).into(),
|
pc.concurrency.unwrap_or(DEFAULT_WORKER_THREADS).into(),
|
||||||
)?,
|
)?,
|
||||||
|
|
||||||
|
@ -676,6 +858,13 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
fake_self_inbound: false,
|
fake_self_inbound: false,
|
||||||
|
xds_headers: parse_headers(ISTIO_XDS_HEADER_PREFIX)?,
|
||||||
|
ca_headers: parse_headers(ISTIO_CA_HEADER_PREFIX)?,
|
||||||
|
|
||||||
|
localhost_app_tunnel: parse_default(LOCALHOST_APP_TUNNEL, true)?,
|
||||||
|
ztunnel_identity,
|
||||||
|
ztunnel_workload,
|
||||||
|
ipv6_enabled,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -909,9 +1098,14 @@ pub mod tests {
|
||||||
}"#,
|
}"#,
|
||||||
);
|
);
|
||||||
|
|
||||||
env::set_var("ISTIO_META_INCLUDE_THIS", "foobar-env");
|
unsafe {
|
||||||
env::set_var("NOT_INCLUDE", "not-include");
|
env::set_var("ISTIO_META_INCLUDE_THIS", "foobar-env");
|
||||||
env::set_var("ISTIO_META_CLUSTER_ID", "test-cluster");
|
env::set_var("NOT_INCLUDE", "not-include");
|
||||||
|
env::set_var("ISTIO_META_CLUSTER_ID", "test-cluster");
|
||||||
|
env::set_var("XDS_HEADER_HEADER_FOO", "foo");
|
||||||
|
env::set_var("XDS_HEADER_HEADER_BAR", "bar");
|
||||||
|
env::set_var("CA_HEADER_HEADER_BAZ", "baz");
|
||||||
|
}
|
||||||
|
|
||||||
let pc = construct_proxy_config("", pc_env).unwrap();
|
let pc = construct_proxy_config("", pc_env).unwrap();
|
||||||
let cfg = construct_config(pc).unwrap();
|
let cfg = construct_config(pc).unwrap();
|
||||||
|
@ -925,14 +1119,32 @@ pub mod tests {
|
||||||
);
|
);
|
||||||
assert_eq!(cfg.proxy_metadata["BAR"], "bar");
|
assert_eq!(cfg.proxy_metadata["BAR"], "bar");
|
||||||
assert_eq!(cfg.proxy_metadata["FOOBAR"], "foobar-overwritten");
|
assert_eq!(cfg.proxy_metadata["FOOBAR"], "foobar-overwritten");
|
||||||
|
assert_eq!(cfg.proxy_metadata["NO_PREFIX"], "no-prefix");
|
||||||
|
assert_eq!(cfg.proxy_metadata["INCLUDE_THIS"], "foobar-env");
|
||||||
|
assert_eq!(cfg.proxy_metadata.get("NOT_INCLUDE"), None);
|
||||||
|
assert_eq!(cfg.proxy_metadata["CLUSTER_ID"], "test-cluster");
|
||||||
assert_eq!(cfg.cluster_id, "test-cluster");
|
assert_eq!(cfg.cluster_id, "test-cluster");
|
||||||
|
|
||||||
|
let mut expected_xds_headers = HashMap::new();
|
||||||
|
expected_xds_headers.insert("HEADER_FOO".to_string(), "foo".to_string());
|
||||||
|
expected_xds_headers.insert("HEADER_BAR".to_string(), "bar".to_string());
|
||||||
|
|
||||||
|
let mut expected_ca_headers = HashMap::new();
|
||||||
|
expected_ca_headers.insert("HEADER_BAZ".to_string(), "baz".to_string());
|
||||||
|
|
||||||
|
validate_metadata_vector(&cfg.xds_headers, expected_xds_headers.clone());
|
||||||
|
|
||||||
|
validate_metadata_vector(&cfg.ca_headers, expected_ca_headers.clone());
|
||||||
|
|
||||||
// both (with a field override and metadata override)
|
// both (with a field override and metadata override)
|
||||||
let pc = construct_proxy_config(mesh_config_path, pc_env).unwrap();
|
let pc = construct_proxy_config(mesh_config_path, pc_env).unwrap();
|
||||||
let cfg = construct_config(pc).unwrap();
|
let cfg = construct_config(pc).unwrap();
|
||||||
|
|
||||||
env::remove_var("ISTIO_META_INCLUDE_THIS");
|
unsafe {
|
||||||
env::remove_var("NOT_INCLUDE");
|
env::remove_var("ISTIO_META_INCLUDE_THIS");
|
||||||
|
env::remove_var("NOT_INCLUDE");
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(cfg.stats_addr.port(), 15888);
|
assert_eq!(cfg.stats_addr.port(), 15888);
|
||||||
assert_eq!(cfg.admin_addr.port(), 15999);
|
assert_eq!(cfg.admin_addr.port(), 15999);
|
||||||
assert_eq!(cfg.proxy_metadata["FOO"], "foo");
|
assert_eq!(cfg.proxy_metadata["FOO"], "foo");
|
||||||
|
@ -940,5 +1152,60 @@ pub mod tests {
|
||||||
assert_eq!(cfg.proxy_metadata["FOOBAR"], "foobar-overwritten");
|
assert_eq!(cfg.proxy_metadata["FOOBAR"], "foobar-overwritten");
|
||||||
assert_eq!(cfg.proxy_metadata["NO_PREFIX"], "no-prefix");
|
assert_eq!(cfg.proxy_metadata["NO_PREFIX"], "no-prefix");
|
||||||
assert_eq!(cfg.proxy_metadata["INCLUDE_THIS"], "foobar-env");
|
assert_eq!(cfg.proxy_metadata["INCLUDE_THIS"], "foobar-env");
|
||||||
|
assert_eq!(cfg.proxy_metadata["CLUSTER_ID"], "test-cluster");
|
||||||
|
assert_eq!(cfg.cluster_id, "test-cluster");
|
||||||
|
|
||||||
|
validate_metadata_vector(&cfg.xds_headers, expected_xds_headers.clone());
|
||||||
|
|
||||||
|
validate_metadata_vector(&cfg.ca_headers, expected_ca_headers.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_metadata_vector(metadata: &MetadataVector, header_map: HashMap<String, String>) {
|
||||||
|
for (k, v) in header_map {
|
||||||
|
let key: AsciiMetadataKey = AsciiMetadataKey::from_str(&k).unwrap();
|
||||||
|
let value: AsciiMetadataValue = AsciiMetadataValue::from_str(&v).unwrap();
|
||||||
|
assert!(metadata.vec.contains(&(key, value)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_worker_threads() {
|
||||||
|
unsafe {
|
||||||
|
// Test fixed number
|
||||||
|
env::set_var(ZTUNNEL_WORKER_THREADS, "4");
|
||||||
|
assert_eq!(parse_worker_threads(2).unwrap(), 4);
|
||||||
|
|
||||||
|
// Test percentage with CPU limit
|
||||||
|
env::set_var(ZTUNNEL_CPU_LIMIT, "8");
|
||||||
|
env::set_var(ZTUNNEL_WORKER_THREADS, "50%");
|
||||||
|
assert_eq!(parse_worker_threads(2).unwrap(), 4); // 50% of 8 CPUs = 4 threads
|
||||||
|
|
||||||
|
// Test percentage with CPU limit
|
||||||
|
env::set_var(ZTUNNEL_CPU_LIMIT, "16");
|
||||||
|
env::set_var(ZTUNNEL_WORKER_THREADS, "30%");
|
||||||
|
assert_eq!(parse_worker_threads(2).unwrap(), 5); // Round up to 5
|
||||||
|
|
||||||
|
// Test low percentage that rounds up to 1
|
||||||
|
env::set_var(ZTUNNEL_CPU_LIMIT, "4");
|
||||||
|
env::set_var(ZTUNNEL_WORKER_THREADS, "10%");
|
||||||
|
assert_eq!(parse_worker_threads(2).unwrap(), 1); // 10% of 4 CPUs = 0.4, rounds up to 1
|
||||||
|
|
||||||
|
// Test default when no env var is set
|
||||||
|
env::remove_var(ZTUNNEL_WORKER_THREADS);
|
||||||
|
assert_eq!(parse_worker_threads(2).unwrap(), 2);
|
||||||
|
|
||||||
|
// Test without CPU limit (should use system CPU count)
|
||||||
|
env::remove_var(ZTUNNEL_CPU_LIMIT);
|
||||||
|
let system_cpus = num_cpus::get();
|
||||||
|
assert_eq!(get_cpu_count().unwrap(), system_cpus);
|
||||||
|
|
||||||
|
// Test with CPU limit
|
||||||
|
env::set_var(ZTUNNEL_CPU_LIMIT, "12");
|
||||||
|
assert_eq!(get_cpu_count().unwrap(), 12);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
env::remove_var(ZTUNNEL_WORKER_THREADS);
|
||||||
|
env::remove_var(ZTUNNEL_CPU_LIMIT);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
48
src/copy.rs
|
@ -18,14 +18,14 @@ use crate::proxy::Error::{BackendDisconnected, ClientDisconnected, ReceiveError,
|
||||||
use bytes::{Buf, Bytes, BytesMut};
|
use bytes::{Buf, Bytes, BytesMut};
|
||||||
use pin_project_lite::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::io::{Error, IoSlice};
|
use std::io::Error;
|
||||||
use std::marker::PhantomPinned;
|
use std::marker::PhantomPinned;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{ready, Context, Poll};
|
use std::task::{Context, Poll, ready};
|
||||||
use tokio::io;
|
use tokio::io;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
|
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
|
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
|
||||||
use tracing::trace;
|
use tracing::trace;
|
||||||
|
|
||||||
// BufferedSplitter is a trait to expose splitting an IO object into a buffered reader and a writer
|
// BufferedSplitter is a trait to expose splitting an IO object into a buffered reader and a writer
|
||||||
|
@ -340,14 +340,6 @@ impl<R: AsyncRead> BufReader<R> {
|
||||||
buffer_size: INITIAL_BUFFER_SIZE,
|
buffer_size: INITIAL_BUFFER_SIZE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_ref(&self) -> &R {
|
|
||||||
&self.inner
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
|
|
||||||
self.project().inner
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<R: AsyncRead> ResizeBufRead for BufReader<R> {
|
impl<R: AsyncRead> ResizeBufRead for BufReader<R> {
|
||||||
|
@ -366,36 +358,6 @@ impl<R: AsyncRead> ResizeBufRead for BufReader<R> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<R: AsyncRead + AsyncWrite> AsyncWrite for BufReader<R> {
|
|
||||||
fn poll_write(
|
|
||||||
self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
buf: &[u8],
|
|
||||||
) -> Poll<io::Result<usize>> {
|
|
||||||
self.get_pin_mut().poll_write(cx, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
self.get_pin_mut().poll_flush(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
self.get_pin_mut().poll_shutdown(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_write_vectored(
|
|
||||||
self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
bufs: &[IoSlice<'_>],
|
|
||||||
) -> Poll<io::Result<usize>> {
|
|
||||||
self.get_pin_mut().poll_write_vectored(cx, bufs)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_write_vectored(&self) -> bool {
|
|
||||||
self.get_ref().is_write_vectored()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pin_project! {
|
pin_project! {
|
||||||
/// A future used to shutdown an I/O object.
|
/// A future used to shutdown an I/O object.
|
||||||
///
|
///
|
||||||
|
@ -547,8 +509,8 @@ mod tests {
|
||||||
if buf.is_empty() {
|
if buf.is_empty() {
|
||||||
return Poll::Ready(Ok(0));
|
return Poll::Ready(Ok(0));
|
||||||
}
|
}
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::rng();
|
||||||
let end = rng.gen_range(1..=buf.len()); // Ensure at least 1 byte is written
|
let end = rng.random_range(1..=buf.len()); // Ensure at least 1 byte is written
|
||||||
Pin::new(&mut self.0).poll_write(cx, &buf[0..end])
|
Pin::new(&mut self.0).poll_write(cx, &buf[0..end])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,21 +14,23 @@
|
||||||
|
|
||||||
use crate::dns::resolver::{Answer, Resolver};
|
use crate::dns::resolver::{Answer, Resolver};
|
||||||
use crate::proxy::SocketFactory;
|
use crate::proxy::SocketFactory;
|
||||||
use hickory_proto::iocompat::AsyncIoTokioAsStd;
|
use hickory_proto::runtime::RuntimeProvider;
|
||||||
|
use hickory_proto::runtime::iocompat::AsyncIoTokioAsStd;
|
||||||
|
use hickory_resolver::ResolveError;
|
||||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||||
use hickory_resolver::error::ResolveError;
|
use hickory_resolver::name_server::GenericConnector;
|
||||||
use hickory_resolver::name_server;
|
|
||||||
use hickory_resolver::name_server::{GenericConnector, RuntimeProvider};
|
|
||||||
use hickory_server::authority::LookupError;
|
use hickory_server::authority::LookupError;
|
||||||
use hickory_server::server::Request;
|
use hickory_server::server::Request;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
|
use std::io;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
use tokio::net::{TcpStream, UdpSocket};
|
use tokio::net::{TcpStream, UdpSocket};
|
||||||
|
|
||||||
/// A forwarding [Resolver] that delegates requests to an upstream [TokioAsyncResolver].
|
/// A forwarding [Resolver] that delegates requests to an upstream [TokioAsyncResolver].
|
||||||
pub struct Forwarder(hickory_resolver::AsyncResolver<GenericConnector<RuntimeProviderAdaptor>>);
|
pub struct Forwarder(hickory_resolver::Resolver<GenericConnector<RuntimeProviderAdaptor>>);
|
||||||
|
|
||||||
impl Forwarder {
|
impl Forwarder {
|
||||||
/// Creates a new [Forwarder] from the provided resolver configuration.
|
/// Creates a new [Forwarder] from the provided resolver configuration.
|
||||||
|
@ -41,20 +43,21 @@ impl Forwarder {
|
||||||
socket_factory,
|
socket_factory,
|
||||||
handle: Default::default(),
|
handle: Default::default(),
|
||||||
});
|
});
|
||||||
let resolver = hickory_resolver::AsyncResolver::new(cfg, opts, provider);
|
let mut resolver = hickory_resolver::Resolver::builder_with_config(cfg, provider);
|
||||||
Ok(Self(resolver))
|
*resolver.options_mut() = opts;
|
||||||
|
Ok(Self(resolver.build()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct RuntimeProviderAdaptor {
|
struct RuntimeProviderAdaptor {
|
||||||
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
||||||
handle: name_server::TokioHandle,
|
handle: hickory_proto::runtime::TokioHandle,
|
||||||
}
|
}
|
||||||
|
const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
impl RuntimeProvider for RuntimeProviderAdaptor {
|
impl RuntimeProvider for RuntimeProviderAdaptor {
|
||||||
type Handle = name_server::TokioHandle;
|
type Handle = hickory_proto::runtime::TokioHandle;
|
||||||
type Timer = hickory_proto::TokioTime;
|
type Timer = hickory_proto::runtime::TokioTime;
|
||||||
type Udp = UdpSocket;
|
type Udp = UdpSocket;
|
||||||
type Tcp = AsyncIoTokioAsStd<TcpStream>;
|
type Tcp = AsyncIoTokioAsStd<TcpStream>;
|
||||||
|
|
||||||
|
@ -65,6 +68,8 @@ impl RuntimeProvider for RuntimeProviderAdaptor {
|
||||||
fn connect_tcp(
|
fn connect_tcp(
|
||||||
&self,
|
&self,
|
||||||
server_addr: SocketAddr,
|
server_addr: SocketAddr,
|
||||||
|
bind_addr: Option<SocketAddr>,
|
||||||
|
wait_for: Option<Duration>,
|
||||||
) -> Pin<Box<dyn Send + Future<Output = std::io::Result<Self::Tcp>>>> {
|
) -> Pin<Box<dyn Send + Future<Output = std::io::Result<Self::Tcp>>>> {
|
||||||
let sf = self.socket_factory.clone();
|
let sf = self.socket_factory.clone();
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
|
@ -73,7 +78,20 @@ impl RuntimeProvider for RuntimeProviderAdaptor {
|
||||||
} else {
|
} else {
|
||||||
sf.new_tcp_v6()
|
sf.new_tcp_v6()
|
||||||
}?;
|
}?;
|
||||||
socket.connect(server_addr).await.map(AsyncIoTokioAsStd)
|
|
||||||
|
if let Some(bind_addr) = bind_addr {
|
||||||
|
socket.bind(bind_addr)?;
|
||||||
|
}
|
||||||
|
let future = socket.connect(server_addr);
|
||||||
|
let wait_for = wait_for.unwrap_or(CONNECT_TIMEOUT);
|
||||||
|
match tokio::time::timeout(wait_for, future).await {
|
||||||
|
Ok(Ok(socket)) => Ok(AsyncIoTokioAsStd(socket)),
|
||||||
|
Ok(Err(e)) => Err(e),
|
||||||
|
Err(_) => Err(io::Error::new(
|
||||||
|
io::ErrorKind::TimedOut,
|
||||||
|
format!("connection to {server_addr:?} timed out after {wait_for:?}"),
|
||||||
|
)),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,8 +109,9 @@ impl RuntimeProvider for RuntimeProviderAdaptor {
|
||||||
impl Resolver for Forwarder {
|
impl Resolver for Forwarder {
|
||||||
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
||||||
// TODO(nmittler): Should we allow requests to the upstream resolver to be authoritative?
|
// TODO(nmittler): Should we allow requests to the upstream resolver to be authoritative?
|
||||||
let name = request.query().name();
|
let query = request.request_info()?.query;
|
||||||
let rr_type = request.query().query_type();
|
let name = query.name();
|
||||||
|
let rr_type = query.query_type();
|
||||||
self.0
|
self.0
|
||||||
.lookup(name, rr_type)
|
.lookup(name, rr_type)
|
||||||
.await
|
.await
|
||||||
|
@ -107,10 +126,11 @@ mod tests {
|
||||||
use crate::dns::resolver::Resolver;
|
use crate::dns::resolver::Resolver;
|
||||||
use crate::test_helpers::dns::{a_request, ip, n, run_dns, socket_addr};
|
use crate::test_helpers::dns::{a_request, ip, n, run_dns, socket_addr};
|
||||||
use crate::test_helpers::helpers::initialize_telemetry;
|
use crate::test_helpers::helpers::initialize_telemetry;
|
||||||
|
use hickory_proto::ProtoErrorKind;
|
||||||
use hickory_proto::op::ResponseCode;
|
use hickory_proto::op::ResponseCode;
|
||||||
use hickory_proto::rr::RecordType;
|
use hickory_proto::rr::RecordType;
|
||||||
use hickory_resolver::error::ResolveErrorKind;
|
use hickory_proto::xfer::Protocol;
|
||||||
use hickory_server::server::Protocol;
|
use hickory_resolver::ResolveErrorKind;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -160,12 +180,13 @@ mod tests {
|
||||||
.expect("expected resolve error");
|
.expect("expected resolve error");
|
||||||
|
|
||||||
// Expect NoRecordsFound with a NXDomain response code.
|
// Expect NoRecordsFound with a NXDomain response code.
|
||||||
let kind = err.kind();
|
if let ResolveErrorKind::Proto(proto) = err.kind() {
|
||||||
match kind {
|
if let ProtoErrorKind::NoRecordsFound { response_code, .. } = proto.kind() {
|
||||||
ResolveErrorKind::NoRecordsFound { response_code, .. } => {
|
// Respond with the error code.
|
||||||
assert_eq!(&ResponseCode::NXDomain, response_code);
|
assert_eq!(&ResponseCode::NXDomain, response_code);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
_ => panic!("unexpected error kind {kind}"),
|
|
||||||
}
|
}
|
||||||
|
panic!("unexpected error kind {}", err.kind())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,9 +13,10 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::dns::resolver::{Answer, Resolver};
|
use crate::dns::resolver::{Answer, Resolver};
|
||||||
|
use hickory_proto::ProtoErrorKind;
|
||||||
use hickory_proto::op::{Edns, Header, MessageType, OpCode, ResponseCode};
|
use hickory_proto::op::{Edns, Header, MessageType, OpCode, ResponseCode};
|
||||||
use hickory_proto::rr::Record;
|
use hickory_proto::rr::Record;
|
||||||
use hickory_resolver::error::ResolveErrorKind;
|
use hickory_resolver::ResolveErrorKind;
|
||||||
use hickory_server::authority::{LookupError, MessageResponse, MessageResponseBuilder};
|
use hickory_server::authority::{LookupError, MessageResponse, MessageResponseBuilder};
|
||||||
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -117,16 +118,14 @@ async fn send_lookup_error<R: ResponseHandler>(
|
||||||
}
|
}
|
||||||
LookupError::ResponseCode(code) => send_error(request, response_handle, code).await,
|
LookupError::ResponseCode(code) => send_error(request, response_handle, code).await,
|
||||||
LookupError::ResolveError(e) => {
|
LookupError::ResolveError(e) => {
|
||||||
match e.kind() {
|
if let ResolveErrorKind::Proto(proto) = e.kind() {
|
||||||
ResolveErrorKind::NoRecordsFound { response_code, .. } => {
|
if let ProtoErrorKind::NoRecordsFound { response_code, .. } = proto.kind() {
|
||||||
// Respond with the error code.
|
// Respond with the error code.
|
||||||
send_error(request, response_handle, *response_code).await
|
return send_error(request, response_handle, *response_code).await;
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
// TODO(nmittler): log?
|
|
||||||
send_error(request, response_handle, ResponseCode::ServFail).await
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// TODO(nmittler): log?
|
||||||
|
send_error(request, response_handle, ResponseCode::ServFail).await
|
||||||
}
|
}
|
||||||
LookupError::Io(_) => {
|
LookupError::Io(_) => {
|
||||||
// TODO(nmittler): log?
|
// TODO(nmittler): log?
|
||||||
|
@ -189,7 +188,7 @@ fn response_edns(request: &Request) -> Option<Edns> {
|
||||||
let mut resp_edns: Edns = Edns::new();
|
let mut resp_edns: Edns = Edns::new();
|
||||||
resp_edns.set_max_payload(req_edns.max_payload().max(512));
|
resp_edns.set_max_payload(req_edns.max_payload().max(512));
|
||||||
resp_edns.set_version(req_edns.version());
|
resp_edns.set_version(req_edns.version());
|
||||||
resp_edns.set_dnssec_ok(req_edns.dnssec_ok());
|
resp_edns.set_dnssec_ok(req_edns.flags().dnssec_ok);
|
||||||
|
|
||||||
Some(resp_edns)
|
Some(resp_edns)
|
||||||
} else {
|
} else {
|
||||||
|
@ -207,11 +206,10 @@ mod tests {
|
||||||
use hickory_proto::op::{Message, MessageType, OpCode, ResponseCode};
|
use hickory_proto::op::{Message, MessageType, OpCode, ResponseCode};
|
||||||
use hickory_proto::rr::{Name, Record, RecordType};
|
use hickory_proto::rr::{Name, Record, RecordType};
|
||||||
use hickory_proto::serialize::binary::BinEncoder;
|
use hickory_proto::serialize::binary::BinEncoder;
|
||||||
|
use hickory_proto::xfer::Protocol;
|
||||||
use hickory_server::authority::LookupError;
|
use hickory_server::authority::LookupError;
|
||||||
use hickory_server::authority::MessageResponse;
|
use hickory_server::authority::MessageResponse;
|
||||||
use hickory_server::server::{
|
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
||||||
Protocol, Request, RequestHandler, ResponseHandler, ResponseInfo,
|
|
||||||
};
|
|
||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
@ -262,7 +260,7 @@ mod tests {
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Resolver for FakeResolver {
|
impl Resolver for FakeResolver {
|
||||||
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
||||||
let name = Name::from(request.query().name().clone());
|
let name = Name::from(request.request_info()?.query.name().clone());
|
||||||
let records = vec![a(name, Ipv4Addr::new(127, 0, 0, 1))];
|
let records = vec![a(name, Ipv4Addr::new(127, 0, 0, 1))];
|
||||||
Ok(Answer::new(records, false))
|
Ok(Answer::new(records, false))
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ use std::time::Duration;
|
||||||
use crate::metrics::{DefaultedUnknown, DeferRecorder, Recorder};
|
use crate::metrics::{DefaultedUnknown, DeferRecorder, Recorder};
|
||||||
|
|
||||||
use crate::state::workload::Workload;
|
use crate::state::workload::Workload;
|
||||||
|
use crate::strng;
|
||||||
use crate::strng::RichStrng;
|
use crate::strng::RichStrng;
|
||||||
|
|
||||||
pub struct Metrics {
|
pub struct Metrics {
|
||||||
|
@ -56,7 +57,7 @@ impl Metrics {
|
||||||
);
|
);
|
||||||
|
|
||||||
let forwarded_duration = Family::<DnsLabels, Histogram>::new_with_constructor(|| {
|
let forwarded_duration = Family::<DnsLabels, Histogram>::new_with_constructor(|| {
|
||||||
Histogram::new(vec![0.005f64, 0.001, 0.01, 0.1, 1.0, 5.0].into_iter())
|
Histogram::new(vec![0.005f64, 0.001, 0.01, 0.1, 1.0, 5.0])
|
||||||
});
|
});
|
||||||
registry.register_with_unit(
|
registry.register_with_unit(
|
||||||
"dns_upstream_request_duration",
|
"dns_upstream_request_duration",
|
||||||
|
@ -78,7 +79,7 @@ impl DeferRecorder for Metrics {}
|
||||||
|
|
||||||
#[derive(Clone, Hash, Debug, PartialEq, Eq, EncodeLabelSet)]
|
#[derive(Clone, Hash, Debug, PartialEq, Eq, EncodeLabelSet)]
|
||||||
pub struct DnsLabels {
|
pub struct DnsLabels {
|
||||||
request_query_type: RichStrng,
|
request_query_type: DefaultedUnknown<RichStrng>,
|
||||||
request_protocol: RichStrng,
|
request_protocol: RichStrng,
|
||||||
|
|
||||||
// Source workload.
|
// Source workload.
|
||||||
|
@ -89,7 +90,12 @@ pub struct DnsLabels {
|
||||||
impl DnsLabels {
|
impl DnsLabels {
|
||||||
pub fn new(r: &Request) -> Self {
|
pub fn new(r: &Request) -> Self {
|
||||||
Self {
|
Self {
|
||||||
request_query_type: r.query().query_type().to_string().to_lowercase().into(),
|
request_query_type: r
|
||||||
|
.request_info()
|
||||||
|
.map(|q| q.query.query_type().to_string().to_lowercase())
|
||||||
|
.ok()
|
||||||
|
.map(|s| RichStrng::from(strng::new(s)))
|
||||||
|
.into(),
|
||||||
request_protocol: r.protocol().to_string().to_lowercase().into(),
|
request_protocol: r.protocol().to_string().to_lowercase().into(),
|
||||||
source_canonical_service: Default::default(),
|
source_canonical_service: Default::default(),
|
||||||
source_canonical_revision: Default::default(),
|
source_canonical_revision: Default::default(),
|
||||||
|
|
|
@ -39,7 +39,9 @@ pub fn trim_domain(name: &Name, domain: &Name) -> Option<Name> {
|
||||||
// Create a Name from the labels leading up to the domain.
|
// Create a Name from the labels leading up to the domain.
|
||||||
let iter = name.iter();
|
let iter = name.iter();
|
||||||
let num_labels = iter.len() - domain.num_labels() as usize;
|
let num_labels = iter.len() - domain.num_labels() as usize;
|
||||||
Some(Name::from_labels(iter.take(num_labels)).unwrap())
|
let mut name = Name::from_labels(iter.take(num_labels)).unwrap();
|
||||||
|
name.set_fqdn(false);
|
||||||
|
Some(name)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,19 +12,19 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use hickory_proto::error::ProtoErrorKind;
|
use hickory_proto::ProtoErrorKind;
|
||||||
use hickory_proto::op::ResponseCode;
|
use hickory_proto::op::ResponseCode;
|
||||||
use hickory_proto::rr::rdata::{A, AAAA, CNAME};
|
use hickory_proto::rr::rdata::{A, AAAA, CNAME};
|
||||||
use hickory_proto::rr::{Name, RData, Record, RecordType};
|
use hickory_proto::rr::{Name, RData, Record, RecordType};
|
||||||
use hickory_resolver::config::{NameServerConfig, ResolverConfig, ResolverOpts};
|
use hickory_resolver::config::{NameServerConfig, ResolverConfig, ResolverOpts};
|
||||||
use hickory_resolver::system_conf::read_system_conf;
|
use hickory_resolver::system_conf::read_system_conf;
|
||||||
|
use hickory_server::ServerFuture;
|
||||||
use hickory_server::authority::LookupError;
|
use hickory_server::authority::LookupError;
|
||||||
use hickory_server::server::Request;
|
use hickory_server::server::Request;
|
||||||
use hickory_server::ServerFuture;
|
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use rand::rng;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand::thread_rng;
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
@ -32,6 +32,7 @@ use std::ops::Deref;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tracing::event;
|
||||||
use tracing::{debug, info, instrument, trace, warn};
|
use tracing::{debug, info, instrument, trace, warn};
|
||||||
|
|
||||||
use crate::proxy::{LocalWorkloadFetcher, SocketFactory};
|
use crate::proxy::{LocalWorkloadFetcher, SocketFactory};
|
||||||
|
@ -45,10 +46,10 @@ use crate::dns::resolver::{Answer, Resolver};
|
||||||
use crate::drain::{DrainMode, DrainWatcher};
|
use crate::drain::{DrainMode, DrainWatcher};
|
||||||
use crate::metrics::{DeferRecorder, IncrementRecorder, Recorder};
|
use crate::metrics::{DeferRecorder, IncrementRecorder, Recorder};
|
||||||
use crate::proxy::Error;
|
use crate::proxy::Error;
|
||||||
use crate::state::service::IpFamily;
|
|
||||||
use crate::state::workload::address::Address;
|
|
||||||
use crate::state::workload::Workload;
|
|
||||||
use crate::state::DemandProxyState;
|
use crate::state::DemandProxyState;
|
||||||
|
use crate::state::service::{IpFamily, Service};
|
||||||
|
use crate::state::workload::Workload;
|
||||||
|
use crate::state::workload::address::Address;
|
||||||
use crate::{config, dns};
|
use crate::{config, dns};
|
||||||
|
|
||||||
const DEFAULT_TCP_REQUEST_TIMEOUT: u64 = 5;
|
const DEFAULT_TCP_REQUEST_TIMEOUT: u64 = 5;
|
||||||
|
@ -84,6 +85,8 @@ impl Server {
|
||||||
drain: DrainWatcher,
|
drain: DrainWatcher,
|
||||||
socket_factory: &(dyn SocketFactory + Send + Sync),
|
socket_factory: &(dyn SocketFactory + Send + Sync),
|
||||||
local_workload_information: Arc<LocalWorkloadFetcher>,
|
local_workload_information: Arc<LocalWorkloadFetcher>,
|
||||||
|
prefered_service_namespace: Option<String>,
|
||||||
|
ipv6_enabled: bool,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
// if the address we got from config is supposed to be v6-enabled,
|
// if the address we got from config is supposed to be v6-enabled,
|
||||||
// actually check if the local pod context our socketfactory operates in supports V6.
|
// actually check if the local pod context our socketfactory operates in supports V6.
|
||||||
|
@ -101,6 +104,8 @@ impl Server {
|
||||||
forwarder,
|
forwarder,
|
||||||
metrics,
|
metrics,
|
||||||
local_workload_information,
|
local_workload_information,
|
||||||
|
prefered_service_namespace,
|
||||||
|
ipv6_enabled,
|
||||||
);
|
);
|
||||||
let store = Arc::new(store);
|
let store = Arc::new(store);
|
||||||
let handler = dns::handler::Handler::new(store.clone());
|
let handler = dns::handler::Handler::new(store.clone());
|
||||||
|
@ -190,6 +195,8 @@ struct Store {
|
||||||
svc_domain: Name,
|
svc_domain: Name,
|
||||||
metrics: Arc<Metrics>,
|
metrics: Arc<Metrics>,
|
||||||
local_workload: Arc<LocalWorkloadFetcher>,
|
local_workload: Arc<LocalWorkloadFetcher>,
|
||||||
|
prefered_service_namespace: Option<String>,
|
||||||
|
ipv6_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Store {
|
impl Store {
|
||||||
|
@ -199,6 +206,8 @@ impl Store {
|
||||||
forwarder: Arc<dyn Forwarder>,
|
forwarder: Arc<dyn Forwarder>,
|
||||||
metrics: Arc<Metrics>,
|
metrics: Arc<Metrics>,
|
||||||
local_workload_information: Arc<LocalWorkloadFetcher>,
|
local_workload_information: Arc<LocalWorkloadFetcher>,
|
||||||
|
prefered_service_namespace: Option<String>,
|
||||||
|
ipv6_enabled: bool,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let domain = as_name(domain);
|
let domain = as_name(domain);
|
||||||
let svc_domain = append_name(as_name("svc"), &domain);
|
let svc_domain = append_name(as_name("svc"), &domain);
|
||||||
|
@ -210,6 +219,8 @@ impl Store {
|
||||||
svc_domain,
|
svc_domain,
|
||||||
metrics,
|
metrics,
|
||||||
local_workload: local_workload_information,
|
local_workload: local_workload_information,
|
||||||
|
prefered_service_namespace,
|
||||||
|
ipv6_enabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,7 +369,7 @@ impl Store {
|
||||||
let search_name_str = search_name.to_string().into();
|
let search_name_str = search_name.to_string().into();
|
||||||
search_name.set_fqdn(true);
|
search_name.set_fqdn(true);
|
||||||
|
|
||||||
let service = state
|
let services: Vec<Arc<Service>> = state
|
||||||
.services
|
.services
|
||||||
.get_by_host(&search_name_str)
|
.get_by_host(&search_name_str)
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -381,13 +392,30 @@ impl Store {
|
||||||
})
|
})
|
||||||
// Get the service matching the client namespace. If no match exists, just
|
// Get the service matching the client namespace. If no match exists, just
|
||||||
// return the first service.
|
// return the first service.
|
||||||
.find_or_first(|service| service.namespace == client.namespace)
|
// .find_or_first(|service| service.namespace == client.namespace)
|
||||||
.cloned();
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// TODO: ideally we'd sort these by creation time so that the oldest would be used if there are no namespace matches
|
||||||
|
// presently service doesn't have creation time in WDS, but we could add it
|
||||||
|
// TODO: if the local namespace doesn't define a service, kube service should be prioritized over se
|
||||||
|
let service = match services
|
||||||
|
.iter()
|
||||||
|
.find(|service| service.namespace == client.namespace)
|
||||||
|
{
|
||||||
|
Some(service) => Some(service),
|
||||||
|
None => match self.prefered_service_namespace.as_ref() {
|
||||||
|
Some(prefered_namespace) => services.iter().find_or_first(|service| {
|
||||||
|
service.namespace == prefered_namespace.as_str()
|
||||||
|
}),
|
||||||
|
None => services.first(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
// First, lookup the host as a service.
|
// First, lookup the host as a service.
|
||||||
if let Some(service) = service {
|
if let Some(service) = service {
|
||||||
return Some(ServerMatch {
|
return Some(ServerMatch {
|
||||||
server: Address::Service(Arc::new(service)),
|
server: Address::Service(service.clone()),
|
||||||
name: search_name,
|
name: search_name,
|
||||||
alias,
|
alias,
|
||||||
});
|
});
|
||||||
|
@ -399,6 +427,13 @@ impl Store {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn record_type_enabled(&self, addr: &IpAddr) -> bool {
|
||||||
|
match addr {
|
||||||
|
IpAddr::V4(_) => true, // IPv4 always
|
||||||
|
IpAddr::V6(_) => self.ipv6_enabled, // IPv6 must be not be disabled in config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Gets the list of addresses of the requested record type from the server.
|
/// Gets the list of addresses of the requested record type from the server.
|
||||||
fn get_addresses(
|
fn get_addresses(
|
||||||
&self,
|
&self,
|
||||||
|
@ -411,7 +446,7 @@ impl Store {
|
||||||
.workload_ips
|
.workload_ips
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|addr| {
|
.filter_map(|addr| {
|
||||||
if is_record_type(addr, record_type) {
|
if is_record_type(addr, record_type) && self.record_type_enabled(addr) {
|
||||||
Some(*addr)
|
Some(*addr)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
@ -430,10 +465,9 @@ impl Store {
|
||||||
debug!("failed to fetch workload for {}", ep.workload_uid);
|
debug!("failed to fetch workload for {}", ep.workload_uid);
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
wl.workload_ips
|
wl.workload_ips.iter().copied().find(|addr| {
|
||||||
.iter()
|
is_record_type(addr, record_type) && self.record_type_enabled(addr)
|
||||||
.copied()
|
})
|
||||||
.find(|addr| is_record_type(addr, record_type))
|
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
} else {
|
} else {
|
||||||
|
@ -445,6 +479,7 @@ impl Store {
|
||||||
.filter_map(|vip| {
|
.filter_map(|vip| {
|
||||||
if is_record_type(&vip.address, record_type)
|
if is_record_type(&vip.address, record_type)
|
||||||
&& client.network == vip.network
|
&& client.network == vip.network
|
||||||
|
&& self.record_type_enabled(&vip.address)
|
||||||
{
|
{
|
||||||
Some(vip.address)
|
Some(vip.address)
|
||||||
} else {
|
} else {
|
||||||
|
@ -457,7 +492,7 @@ impl Store {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Randomize the order of the returned addresses.
|
// Randomize the order of the returned addresses.
|
||||||
addrs.shuffle(&mut thread_rng());
|
addrs.shuffle(&mut rng());
|
||||||
|
|
||||||
addrs
|
addrs
|
||||||
}
|
}
|
||||||
|
@ -506,6 +541,25 @@ impl Store {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn access_log(request: &Request, source: Option<&Workload>, result: &str, ep_count: usize) {
|
||||||
|
let src = source.as_ref();
|
||||||
|
let query = request.request_info().ok().map(|info| info.query);
|
||||||
|
event!(
|
||||||
|
target: "dns",
|
||||||
|
parent: None,
|
||||||
|
tracing::Level::DEBUG,
|
||||||
|
|
||||||
|
src.workload = src.map(|w| w.name.as_str()).unwrap_or("unknown"),
|
||||||
|
src.namespace = src.map(|w| w.namespace.as_str()).unwrap_or("unknown"),
|
||||||
|
|
||||||
|
query = query.map(|q| q.query_type().to_string()),
|
||||||
|
domain = query.map(|q| q.name().to_string()),
|
||||||
|
|
||||||
|
result = result,
|
||||||
|
endpoints = ep_count,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Resolver for Store {
|
impl Resolver for Store {
|
||||||
#[instrument(
|
#[instrument(
|
||||||
|
@ -513,8 +567,8 @@ impl Resolver for Store {
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
src=%request.src(),
|
src=%request.src(),
|
||||||
query=%request.query().query_type(),
|
query=%request.request_info()?.query.query_type(),
|
||||||
name=%request.query().name(),
|
name=%request.request_info()?.query.name(),
|
||||||
),
|
),
|
||||||
)]
|
)]
|
||||||
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
||||||
|
@ -527,20 +581,63 @@ impl Resolver for Store {
|
||||||
LookupError::ResponseCode(ResponseCode::ServFail)
|
LookupError::ResponseCode(ResponseCode::ServFail)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
let query = request.request_info()?.query;
|
||||||
// Make sure the request is for IP records. Anything else, we forward.
|
// Make sure the request is for IP records. Anything else, we forward.
|
||||||
let record_type = request.query().query_type();
|
let record_type = query.query_type();
|
||||||
if !is_record_type_supported(record_type) {
|
if !is_record_type_supported(record_type) {
|
||||||
debug!("unknown record type");
|
debug!("unknown record type");
|
||||||
return self.forward(Some(&client), request).await;
|
let result = self.forward(Some(&client), request).await;
|
||||||
|
match result {
|
||||||
|
Ok(ref answer) => {
|
||||||
|
access_log(
|
||||||
|
request,
|
||||||
|
Some(&client),
|
||||||
|
"forwarded",
|
||||||
|
answer.record_iter().count(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// Forwarding failed. Just return the error.
|
||||||
|
access_log(
|
||||||
|
request,
|
||||||
|
Some(&client),
|
||||||
|
&format!("forwarding failed ({e})"),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the service for the requested host.
|
// Find the service for the requested host.
|
||||||
let requested_name = Name::from(request.query().name().clone());
|
let requested_name = Name::from(query.name().clone());
|
||||||
trace!("incoming request {requested_name:?}");
|
trace!("incoming request {requested_name:?}");
|
||||||
let Some(service_match) = self.find_server(&client, &requested_name) else {
|
let Some(service_match) = self.find_server(&client, &requested_name) else {
|
||||||
trace!("unknown host, forwarding");
|
trace!("unknown host, forwarding");
|
||||||
// Unknown host. Forward to the upstream resolver.
|
// Unknown host. Forward to the upstream resolver.
|
||||||
return self.forward(Some(&client), request).await;
|
let result = self.forward(Some(&client), request).await;
|
||||||
|
match result {
|
||||||
|
Ok(ref answer) => {
|
||||||
|
access_log(
|
||||||
|
request,
|
||||||
|
Some(&client),
|
||||||
|
"forwarded",
|
||||||
|
answer.record_iter().count(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// Forwarding failed. Just return the error.
|
||||||
|
access_log(
|
||||||
|
request,
|
||||||
|
Some(&client),
|
||||||
|
&format!("forwarding failed ({e})"),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Increment counter for all requests.
|
// Increment counter for all requests.
|
||||||
|
@ -552,8 +649,13 @@ impl Resolver for Store {
|
||||||
// From this point on, we are the authority for the response.
|
// From this point on, we are the authority for the response.
|
||||||
let is_authoritative = true;
|
let is_authoritative = true;
|
||||||
|
|
||||||
if !service_family_allowed(&service_match.server, record_type) {
|
if !service_family_allowed(&service_match.server, record_type, self.ipv6_enabled) {
|
||||||
debug!(alias=%service_match.alias, %record_type, ans=?Answer::new(Vec::default(), is_authoritative), "service does not support this record type");
|
access_log(
|
||||||
|
request,
|
||||||
|
Some(&client),
|
||||||
|
"service does not support this record type",
|
||||||
|
0,
|
||||||
|
);
|
||||||
// This is not NXDOMAIN, since we found the host. Just return an empty set of records.
|
// This is not NXDOMAIN, since we found the host. Just return an empty set of records.
|
||||||
return Ok(Answer::new(Vec::default(), is_authoritative));
|
return Ok(Answer::new(Vec::default(), is_authoritative));
|
||||||
}
|
}
|
||||||
|
@ -562,7 +664,7 @@ impl Resolver for Store {
|
||||||
let addresses = self.get_addresses(&client, &service_match.server, record_type);
|
let addresses = self.get_addresses(&client, &service_match.server, record_type);
|
||||||
|
|
||||||
if addresses.is_empty() {
|
if addresses.is_empty() {
|
||||||
debug!(alias=%service_match.alias, name=%service_match.name, "no records");
|
access_log(request, Some(&client), "no records", 0);
|
||||||
// Lookup succeeded, but no records were returned. This is not NXDOMAIN, since we
|
// Lookup succeeded, but no records were returned. This is not NXDOMAIN, since we
|
||||||
// found the host. Just return an empty set of records.
|
// found the host. Just return an empty set of records.
|
||||||
return Ok(Answer::new(Vec::default(), is_authoritative));
|
return Ok(Answer::new(Vec::default(), is_authoritative));
|
||||||
|
@ -574,7 +676,6 @@ impl Resolver for Store {
|
||||||
// Assume that we'll just use the requested name as the record name.
|
// Assume that we'll just use the requested name as the record name.
|
||||||
let mut ip_record_name = requested_name.clone();
|
let mut ip_record_name = requested_name.clone();
|
||||||
|
|
||||||
debug!(alias=%service_match.alias, name=%service_match.name, "success");
|
|
||||||
// If the service was found by stripping off one of the search domains, create a
|
// If the service was found by stripping off one of the search domains, create a
|
||||||
// CNAME record to map to the appropriate canonical name.
|
// CNAME record to map to the appropriate canonical name.
|
||||||
if let Some(stripped) = service_match.alias.stripped {
|
if let Some(stripped) = service_match.alias.stripped {
|
||||||
|
@ -604,6 +705,7 @@ impl Resolver for Store {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
access_log(request, Some(&client), "success", records.len());
|
||||||
// Add the IP records.
|
// Add the IP records.
|
||||||
ip_records(ip_record_name, addresses, &mut records);
|
ip_records(ip_record_name, addresses, &mut records);
|
||||||
|
|
||||||
|
@ -616,7 +718,13 @@ impl Resolver for Store {
|
||||||
/// anyway, so would naturally work.
|
/// anyway, so would naturally work.
|
||||||
/// Headless services, however, do not have VIPs, and the Pods behind them can have dual stack IPs even with
|
/// Headless services, however, do not have VIPs, and the Pods behind them can have dual stack IPs even with
|
||||||
/// the Service being single-stack. In this case, we are NOT supposed to return both IPs.
|
/// the Service being single-stack. In this case, we are NOT supposed to return both IPs.
|
||||||
fn service_family_allowed(server: &Address, record_type: RecordType) -> bool {
|
/// If IPv6 is globally disabled, AAAA records are not allowed.
|
||||||
|
fn service_family_allowed(server: &Address, record_type: RecordType, ipv6_enabled: bool) -> bool {
|
||||||
|
// If IPv6 is globally disabled, don't allow AAAA records
|
||||||
|
if !ipv6_enabled && record_type == RecordType::AAAA {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
match server {
|
match server {
|
||||||
Address::Service(service) => match service.ip_families {
|
Address::Service(service) => match service.ip_families {
|
||||||
Some(IpFamily::IPv4) if record_type == RecordType::AAAA => false,
|
Some(IpFamily::IPv4) if record_type == RecordType::AAAA => false,
|
||||||
|
@ -863,7 +971,7 @@ mod tests {
|
||||||
use std::net::{SocketAddrV4, SocketAddrV6};
|
use std::net::{SocketAddrV4, SocketAddrV6};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hickory_server::server::Protocol;
|
use hickory_proto::xfer::Protocol;
|
||||||
use prometheus_client::registry::Registry;
|
use prometheus_client::registry::Registry;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -888,6 +996,7 @@ mod tests {
|
||||||
|
|
||||||
const NS1: &str = "ns1";
|
const NS1: &str = "ns1";
|
||||||
const NS2: &str = "ns2";
|
const NS2: &str = "ns2";
|
||||||
|
const PREFERRED: &str = "preferred-ns";
|
||||||
const NW1: Strng = strng::literal!("nw1");
|
const NW1: Strng = strng::literal!("nw1");
|
||||||
const NW2: Strng = strng::literal!("nw2");
|
const NW2: Strng = strng::literal!("nw2");
|
||||||
|
|
||||||
|
@ -995,6 +1104,8 @@ mod tests {
|
||||||
forwarder,
|
forwarder,
|
||||||
metrics: test_metrics(),
|
metrics: test_metrics(),
|
||||||
local_workload,
|
local_workload,
|
||||||
|
prefered_service_namespace: None,
|
||||||
|
ipv6_enabled: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
let namespaced_domain = n(format!("{}.svc.cluster.local", c.client_namespace));
|
let namespaced_domain = n(format!("{}.svc.cluster.local", c.client_namespace));
|
||||||
|
@ -1067,45 +1178,49 @@ mod tests {
|
||||||
name: "success: non k8s host with search namespace yields cname+A record",
|
name: "success: non k8s host with search namespace yields cname+A record",
|
||||||
host: "www.google.com.ns1.svc.cluster.local.",
|
host: "www.google.com.ns1.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![
|
||||||
cname(n("www.google.com.ns1.svc.cluster.local."), n("www.google.com.")),
|
cname(
|
||||||
a(n("www.google.com."), ipv4("1.1.1.1"))],
|
n("www.google.com.ns1.svc.cluster.local."),
|
||||||
|
n("www.google.com."),
|
||||||
|
),
|
||||||
|
a(n("www.google.com."), ipv4("1.1.1.1")),
|
||||||
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: non k8s host not in local cache",
|
name: "success: non k8s host not in local cache",
|
||||||
host: "www.bing.com",
|
host: "www.bing.com",
|
||||||
expect_authoritative: false,
|
expect_authoritative: false,
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("www.bing.com."), ipv4("1.1.1.1"))],
|
||||||
a(n("www.bing.com."), ipv4("1.1.1.1"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host - fqdn",
|
name: "success: k8s host - fqdn",
|
||||||
host: "productpage.ns1.svc.cluster.local.",
|
host: "productpage.ns1.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("productpage.ns1.svc.cluster.local."), ipv4("9.9.9.9"))],
|
||||||
a(n("productpage.ns1.svc.cluster.local."), ipv4("9.9.9.9"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host - name.namespace",
|
name: "success: k8s host - name.namespace",
|
||||||
host: "productpage.ns1.",
|
host: "productpage.ns1.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("productpage.ns1."), ipv4("9.9.9.9"))],
|
||||||
a(n("productpage.ns1."), ipv4("9.9.9.9"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host - shortname",
|
name: "success: k8s host - shortname",
|
||||||
host: "productpage.",
|
host: "productpage.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("productpage."), ipv4("9.9.9.9"))],
|
||||||
a(n("productpage."), ipv4("9.9.9.9"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host (name.namespace) with search namespace yields cname+A record",
|
name: "success: k8s host (name.namespace) with search namespace yields cname+A record",
|
||||||
host: "productpage.ns1.ns1.svc.cluster.local.",
|
host: "productpage.ns1.ns1.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![
|
||||||
cname(n("productpage.ns1.ns1.svc.cluster.local."), n("productpage.ns1.")),
|
cname(
|
||||||
a(n("productpage.ns1."), ipv4("9.9.9.9"))],
|
n("productpage.ns1.ns1.svc.cluster.local."),
|
||||||
|
n("productpage.ns1."),
|
||||||
|
),
|
||||||
|
a(n("productpage.ns1."), ipv4("9.9.9.9")),
|
||||||
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1118,22 +1233,19 @@ mod tests {
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host - non local namespace - name.namespace",
|
name: "success: k8s host - non local namespace - name.namespace",
|
||||||
host: "example.ns2.",
|
host: "example.ns2.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("example.ns2."), ipv4("10.10.10.10"))],
|
||||||
a(n("example.ns2."), ipv4("10.10.10.10"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host - non local namespace - fqdn",
|
name: "success: k8s host - non local namespace - fqdn",
|
||||||
host: "example.ns2.svc.cluster.local.",
|
host: "example.ns2.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("example.ns2.svc.cluster.local."), ipv4("10.10.10.10"))],
|
||||||
a(n("example.ns2.svc.cluster.local."), ipv4("10.10.10.10"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: k8s host - non local namespace - name.namespace.svc",
|
name: "success: k8s host - non local namespace - name.namespace.svc",
|
||||||
host: "example.ns2.svc.",
|
host: "example.ns2.svc.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("example.ns2.svc."), ipv4("10.10.10.10"))],
|
||||||
a(n("example.ns2.svc."), ipv4("10.10.10.10"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1150,7 +1262,8 @@ mod tests {
|
||||||
a(n("details.ns2.svc.cluster.remote."), ipv4("11.11.11.11")),
|
a(n("details.ns2.svc.cluster.remote."), ipv4("11.11.11.11")),
|
||||||
a(n("details.ns2.svc.cluster.remote."), ipv4("12.12.12.12")),
|
a(n("details.ns2.svc.cluster.remote."), ipv4("12.12.12.12")),
|
||||||
a(n("details.ns2.svc.cluster.remote."), ipv4("13.13.13.13")),
|
a(n("details.ns2.svc.cluster.remote."), ipv4("13.13.13.13")),
|
||||||
a(n("details.ns2.svc.cluster.remote."), ipv4("14.14.14.14"))],
|
a(n("details.ns2.svc.cluster.remote."), ipv4("14.14.14.14")),
|
||||||
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1163,16 +1276,17 @@ mod tests {
|
||||||
Case {
|
Case {
|
||||||
name: "success: TypeA query returns A records only",
|
name: "success: TypeA query returns A records only",
|
||||||
host: "dual.localhost.",
|
host: "dual.localhost.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("dual.localhost."), ipv4("2.2.2.2"))],
|
||||||
a(n("dual.localhost."), ipv4("2.2.2.2"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: TypeAAAA query returns AAAA records only",
|
name: "success: TypeAAAA query returns AAAA records only",
|
||||||
host: "dual.localhost.",
|
host: "dual.localhost.",
|
||||||
query_type: RecordType::AAAA,
|
query_type: RecordType::AAAA,
|
||||||
expect_records: vec![
|
expect_records: vec![aaaa(
|
||||||
aaaa(n("dual.localhost."), ipv6("2001:db8:0:0:0:ff00:42:8329"))],
|
n("dual.localhost."),
|
||||||
|
ipv6("2001:db8:0:0:0:ff00:42:8329"),
|
||||||
|
)],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1191,37 +1305,40 @@ mod tests {
|
||||||
Case {
|
Case {
|
||||||
name: "success: wild card returns A record correctly",
|
name: "success: wild card returns A record correctly",
|
||||||
host: "foo.wildcard.",
|
host: "foo.wildcard.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("foo.wildcard."), ipv4("10.10.10.10"))],
|
||||||
a(n("foo.wildcard."), ipv4("10.10.10.10"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: specific wild card returns A record correctly",
|
name: "success: specific wild card returns A record correctly",
|
||||||
host: "a.b.wildcard.",
|
host: "a.b.wildcard.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("a.b.wildcard."), ipv4("11.11.11.11"))],
|
||||||
a(n("a.b.wildcard."), ipv4("11.11.11.11"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: wild card with domain returns A record correctly",
|
name: "success: wild card with domain returns A record correctly",
|
||||||
host: "foo.svc.mesh.company.net.",
|
host: "foo.svc.mesh.company.net.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("foo.svc.mesh.company.net."), ipv4("10.1.2.3"))],
|
||||||
a(n("foo.svc.mesh.company.net."), ipv4("10.1.2.3"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: wild card with namespace with domain returns A record correctly",
|
name: "success: wild card with namespace with domain returns A record correctly",
|
||||||
host: "foo.foons.svc.mesh.company.net.",
|
host: "foo.foons.svc.mesh.company.net.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(n("foo.foons.svc.mesh.company.net."), ipv4("10.1.2.3"))],
|
||||||
a(n("foo.foons.svc.mesh.company.net."), ipv4("10.1.2.3"))],
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: wild card with search domain returns A record correctly",
|
name: "success: wild card with search domain returns A record correctly",
|
||||||
host: "foo.svc.mesh.company.net.ns1.svc.cluster.local.",
|
host: "foo.svc.mesh.company.net.ns1.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![
|
||||||
cname(n("*.svc.mesh.company.net.ns1.svc.cluster.local."), n("*.svc.mesh.company.net.")),
|
cname(
|
||||||
a(n("foo.svc.mesh.company.net.ns1.svc.cluster.local."), ipv4("10.1.2.3"))],
|
n("*.svc.mesh.company.net.ns1.svc.cluster.local."),
|
||||||
|
n("*.svc.mesh.company.net."),
|
||||||
|
),
|
||||||
|
a(
|
||||||
|
n("foo.svc.mesh.company.net.ns1.svc.cluster.local."),
|
||||||
|
ipv4("10.1.2.3"),
|
||||||
|
),
|
||||||
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1233,8 +1350,10 @@ mod tests {
|
||||||
Case {
|
Case {
|
||||||
name: "success: return vip for client network only",
|
name: "success: return vip for client network only",
|
||||||
host: "both-networks.ns1.svc.cluster.local.",
|
host: "both-networks.ns1.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![a(
|
||||||
a(n("both-networks.ns1.svc.cluster.local."), ipv4("21.21.21.21"))],
|
n("both-networks.ns1.svc.cluster.local."),
|
||||||
|
ipv4("21.21.21.21"),
|
||||||
|
)],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1242,7 +1361,8 @@ mod tests {
|
||||||
host: "headless.ns1.svc.cluster.local.",
|
host: "headless.ns1.svc.cluster.local.",
|
||||||
expect_records: vec![
|
expect_records: vec![
|
||||||
a(n("headless.ns1.svc.cluster.local."), ipv4("30.30.30.30")),
|
a(n("headless.ns1.svc.cluster.local."), ipv4("30.30.30.30")),
|
||||||
a(n("headless.ns1.svc.cluster.local."), ipv4("31.31.31.31"))],
|
a(n("headless.ns1.svc.cluster.local."), ipv4("31.31.31.31")),
|
||||||
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1251,15 +1371,18 @@ mod tests {
|
||||||
query_type: RecordType::AAAA,
|
query_type: RecordType::AAAA,
|
||||||
expect_records: vec![
|
expect_records: vec![
|
||||||
aaaa(n("headless.ns1.svc.cluster.local."), ipv6("2001:db8::30")),
|
aaaa(n("headless.ns1.svc.cluster.local."), ipv6("2001:db8::30")),
|
||||||
aaaa(n("headless.ns1.svc.cluster.local."), ipv6("2001:db8::31"))],
|
aaaa(n("headless.ns1.svc.cluster.local."), ipv6("2001:db8::31")),
|
||||||
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
name: "success: headless-ipv6 service returns records for AAAA",
|
name: "success: headless-ipv6 service returns records for AAAA",
|
||||||
host: "headless-ipv6.ns1.svc.cluster.local.",
|
host: "headless-ipv6.ns1.svc.cluster.local.",
|
||||||
query_type: RecordType::AAAA,
|
query_type: RecordType::AAAA,
|
||||||
expect_records: vec![
|
expect_records: vec![aaaa(
|
||||||
aaaa(n("headless-ipv6.ns1.svc.cluster.local."), ipv6("2001:db8::33"))],
|
n("headless-ipv6.ns1.svc.cluster.local."),
|
||||||
|
ipv6("2001:db8::33"),
|
||||||
|
)],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
Case {
|
Case {
|
||||||
|
@ -1298,6 +1421,18 @@ mod tests {
|
||||||
expect_code: ResponseCode::NXDomain,
|
expect_code: ResponseCode::NXDomain,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
Case {
|
||||||
|
name: "success: preferred namespace is chosen if local namespace is not defined",
|
||||||
|
host: "preferred.io.",
|
||||||
|
expect_records: vec![a(n("preferred.io."), ipv4("10.10.10.211"))],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Case {
|
||||||
|
name: "success: external service resolves to local namespace's address",
|
||||||
|
host: "everywhere.io.",
|
||||||
|
expect_records: vec![a(n("everywhere.io."), ipv4("10.10.10.112"))],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
// Create and start the proxy.
|
// Create and start the proxy.
|
||||||
|
@ -1315,6 +1450,8 @@ mod tests {
|
||||||
drain,
|
drain,
|
||||||
&factory,
|
&factory,
|
||||||
local_workload,
|
local_workload,
|
||||||
|
Some(PREFERRED.to_string()),
|
||||||
|
true, // ipv6_enabled for tests
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -1334,8 +1471,8 @@ mod tests {
|
||||||
tasks.push(async move {
|
tasks.push(async move {
|
||||||
let name = format!("[{protocol}] {}", c.name);
|
let name = format!("[{protocol}] {}", c.name);
|
||||||
let resp = send_request(&mut client, n(c.host), c.query_type).await;
|
let resp = send_request(&mut client, n(c.host), c.query_type).await;
|
||||||
assert_eq!(c.expect_authoritative, resp.authoritative(), "{}", name);
|
assert_eq!(c.expect_authoritative, resp.authoritative(), "{name}");
|
||||||
assert_eq!(c.expect_code, resp.response_code(), "{}", name);
|
assert_eq!(c.expect_code, resp.response_code(), "{name}");
|
||||||
|
|
||||||
if c.expect_code == ResponseCode::NoError {
|
if c.expect_code == ResponseCode::NoError {
|
||||||
let mut actual = resp.answers().to_vec();
|
let mut actual = resp.answers().to_vec();
|
||||||
|
@ -1346,7 +1483,7 @@ mod tests {
|
||||||
if c.expect_authoritative {
|
if c.expect_authoritative {
|
||||||
sort_records(&mut actual);
|
sort_records(&mut actual);
|
||||||
}
|
}
|
||||||
assert_eq!(c.expect_records, actual, "{}", name);
|
assert_eq!(c.expect_records, actual, "{name}");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -1401,6 +1538,8 @@ mod tests {
|
||||||
drain,
|
drain,
|
||||||
&factory,
|
&factory,
|
||||||
local_workload,
|
local_workload,
|
||||||
|
None,
|
||||||
|
true, // ipv6_enabled for tests
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -1415,7 +1554,7 @@ mod tests {
|
||||||
for (protocol, client) in [("tcp", &mut tcp_client), ("udp", &mut udp_client)] {
|
for (protocol, client) in [("tcp", &mut tcp_client), ("udp", &mut udp_client)] {
|
||||||
let name = format!("[{protocol}] {}", c.name);
|
let name = format!("[{protocol}] {}", c.name);
|
||||||
let resp = send_request(client, n(c.host), RecordType::A).await;
|
let resp = send_request(client, n(c.host), RecordType::A).await;
|
||||||
assert_eq!(c.expect_code, resp.response_code(), "{}", name);
|
assert_eq!(c.expect_code, resp.response_code(), "{name}");
|
||||||
if c.expect_code == ResponseCode::NoError {
|
if c.expect_code == ResponseCode::NoError {
|
||||||
assert!(!resp.answers().is_empty());
|
assert!(!resp.answers().is_empty());
|
||||||
}
|
}
|
||||||
|
@ -1450,6 +1589,8 @@ mod tests {
|
||||||
}),
|
}),
|
||||||
state.clone(),
|
state.clone(),
|
||||||
),
|
),
|
||||||
|
prefered_service_namespace: None,
|
||||||
|
ipv6_enabled: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
let ip4n6_client_ip = ip("::ffff:202:202");
|
let ip4n6_client_ip = ip("::ffff:202:202");
|
||||||
|
@ -1457,7 +1598,7 @@ mod tests {
|
||||||
match store.lookup(&req).await {
|
match store.lookup(&req).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
panic!("IPv6 encoded IPv4 should work! Error was {:?}", e)
|
panic!("IPv6 encoded IPv4 should work! Error was {e:?}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1483,6 +1624,8 @@ mod tests {
|
||||||
drain,
|
drain,
|
||||||
&factory,
|
&factory,
|
||||||
local_workload,
|
local_workload,
|
||||||
|
None,
|
||||||
|
true, // ipv6_enabled for tests
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -1500,7 +1643,7 @@ mod tests {
|
||||||
let resp = send_request(&mut udp_client, n("large.com."), RecordType::A).await;
|
let resp = send_request(&mut udp_client, n("large.com."), RecordType::A).await;
|
||||||
// UDP is truncated
|
// UDP is truncated
|
||||||
assert!(resp.truncated());
|
assert!(resp.truncated());
|
||||||
assert_eq!(75, resp.answers().len(), "expected UDP to be truncated");
|
assert_eq!(74, resp.answers().len(), "expected UDP to be truncated");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -1599,6 +1742,16 @@ mod tests {
|
||||||
xds_external_service("www.google.com", &[na(NW1, "1.1.1.1")]),
|
xds_external_service("www.google.com", &[na(NW1, "1.1.1.1")]),
|
||||||
xds_service("productpage", NS1, &[na(NW1, "9.9.9.9")]),
|
xds_service("productpage", NS1, &[na(NW1, "9.9.9.9")]),
|
||||||
xds_service("example", NS2, &[na(NW1, "10.10.10.10")]),
|
xds_service("example", NS2, &[na(NW1, "10.10.10.10")]),
|
||||||
|
// Service with the same name in another namespace
|
||||||
|
// This should not be used if the preferred service namespace is set
|
||||||
|
xds_namespaced_external_service("everywhere.io", NS2, &[na(NW1, "10.10.10.110")]),
|
||||||
|
xds_namespaced_external_service("preferred.io", NS2, &[na(NW1, "10.10.10.210")]),
|
||||||
|
// Preferred service namespace
|
||||||
|
xds_namespaced_external_service("everywhere.io", PREFERRED, &[na(NW1, "10.10.10.111")]),
|
||||||
|
xds_namespaced_external_service("preferred.io", PREFERRED, &[na(NW1, "10.10.10.211")]),
|
||||||
|
// Service with the same name in the same namespace
|
||||||
|
// Client in NS1 should use this service
|
||||||
|
xds_namespaced_external_service("everywhere.io", NS1, &[na(NW1, "10.10.10.112")]),
|
||||||
with_fqdn(
|
with_fqdn(
|
||||||
"details.ns2.svc.cluster.remote",
|
"details.ns2.svc.cluster.remote",
|
||||||
xds_service(
|
xds_service(
|
||||||
|
@ -1749,9 +1902,17 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn xds_external_service<S: AsRef<str>>(hostname: S, addrs: &[NetworkAddress]) -> XdsService {
|
fn xds_external_service<S: AsRef<str>>(hostname: S, addrs: &[NetworkAddress]) -> XdsService {
|
||||||
|
xds_namespaced_external_service(hostname, NS1, addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn xds_namespaced_external_service<S1: AsRef<str>, S2: AsRef<str>>(
|
||||||
|
hostname: S1,
|
||||||
|
ns: S2,
|
||||||
|
vips: &[NetworkAddress],
|
||||||
|
) -> XdsService {
|
||||||
with_fqdn(
|
with_fqdn(
|
||||||
hostname.as_ref(),
|
hostname.as_ref(),
|
||||||
xds_service(hostname.as_ref(), NS1, addrs),
|
xds_service(hostname.as_ref(), ns.as_ref(), vips),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1813,24 +1974,25 @@ mod tests {
|
||||||
_: Option<&Workload>,
|
_: Option<&Workload>,
|
||||||
request: &Request,
|
request: &Request,
|
||||||
) -> Result<Answer, LookupError> {
|
) -> Result<Answer, LookupError> {
|
||||||
let name = request.query().name().into();
|
let query = request.request_info()?.query;
|
||||||
|
let name = query.name().into();
|
||||||
let Some(ips) = self.ips.get(&name) else {
|
let Some(ips) = self.ips.get(&name) else {
|
||||||
// Not found.
|
// Not found.
|
||||||
return Err(LookupError::ResponseCode(ResponseCode::NXDomain));
|
return Err(LookupError::ResponseCode(ResponseCode::NXDomain));
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
let rtype = request.query().query_type();
|
let rtype = query.query_type();
|
||||||
for ip in ips {
|
for ip in ips {
|
||||||
match ip {
|
match ip {
|
||||||
IpAddr::V4(ip) => {
|
IpAddr::V4(ip) => {
|
||||||
if rtype == RecordType::A {
|
if rtype == RecordType::A {
|
||||||
out.push(a(request.query().name().into(), *ip));
|
out.push(a(query.name().into(), *ip));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
IpAddr::V6(ip) => {
|
IpAddr::V6(ip) => {
|
||||||
if rtype == RecordType::AAAA {
|
if rtype == RecordType::AAAA {
|
||||||
out.push(aaaa(request.query().name().into(), *ip));
|
out.push(aaaa(query.name().into(), *ip));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
use std::future::Future;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
@ -38,14 +38,13 @@ pub fn new() -> (DrainTrigger, DrainWatcher) {
|
||||||
/// * force_shutdown: when this is triggered, the future must forcefully shutdown any ongoing work ASAP.
|
/// * force_shutdown: when this is triggered, the future must forcefully shutdown any ongoing work ASAP.
|
||||||
/// This means the graceful drain exceeded the hard deadline, and all work must terminate now.
|
/// This means the graceful drain exceeded the hard deadline, and all work must terminate now.
|
||||||
/// This is only required for spawned() tasks; otherwise, the future is dropped entirely, canceling all work.
|
/// This is only required for spawned() tasks; otherwise, the future is dropped entirely, canceling all work.
|
||||||
pub async fn run_with_drain<F, Fut, O>(
|
pub async fn run_with_drain<F, O>(
|
||||||
component: String,
|
component: String,
|
||||||
drain: DrainWatcher,
|
drain: DrainWatcher,
|
||||||
deadline: Duration,
|
deadline: Duration,
|
||||||
make_future: F,
|
make_future: F,
|
||||||
) where
|
) where
|
||||||
F: FnOnce(DrainWatcher, watch::Receiver<()>) -> Fut,
|
F: AsyncFnOnce(DrainWatcher, watch::Receiver<()>) -> O,
|
||||||
Fut: Future<Output = O>,
|
|
||||||
O: Send + 'static,
|
O: Send + 'static,
|
||||||
{
|
{
|
||||||
let (sub_drain_signal, sub_drain) = new();
|
let (sub_drain_signal, sub_drain) = new();
|
||||||
|
@ -53,7 +52,7 @@ pub async fn run_with_drain<F, Fut, O>(
|
||||||
// Stop accepting once we drain.
|
// Stop accepting once we drain.
|
||||||
// We will then allow connections up to `deadline` to terminate on their own.
|
// We will then allow connections up to `deadline` to terminate on their own.
|
||||||
// After that, they will be forcefully terminated.
|
// After that, they will be forcefully terminated.
|
||||||
let fut = make_future(sub_drain, force_shutdown);
|
let fut = make_future(sub_drain, force_shutdown).in_current_span();
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_res = fut => {}
|
_res = fut => {}
|
||||||
res = drain.wait_for_drain() => {
|
res = drain.wait_for_drain() => {
|
||||||
|
|
|
@ -34,7 +34,7 @@ use hyper::{Request, Response};
|
||||||
use hyper_util::client::legacy::connect::HttpConnector;
|
use hyper_util::client::legacy::connect::HttpConnector;
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
use tokio_stream::Stream;
|
use tokio_stream::Stream;
|
||||||
use tracing::{debug, info, warn, Instrument};
|
use tracing::{Instrument, debug, info, warn};
|
||||||
|
|
||||||
use crate::tls::ServerCertProvider;
|
use crate::tls::ServerCertProvider;
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ pub use auth::*;
|
||||||
pub mod mock {
|
pub mod mock {
|
||||||
pub use super::caclient::mock::CaClient;
|
pub use super::caclient::mock::CaClient;
|
||||||
pub use super::manager::mock::{
|
pub use super::manager::mock::{
|
||||||
new_secret_manager, new_secret_manager_cfg, Config as SecretManagerConfig,
|
Config as SecretManagerConfig, new_secret_manager, new_secret_manager_cfg,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,10 +58,7 @@ async fn load_token(path: &PathBuf) -> io::Result<Vec<u8>> {
|
||||||
let t = tokio::fs::read(path).await?;
|
let t = tokio::fs::read(path).await?;
|
||||||
|
|
||||||
if t.is_empty() {
|
if t.is_empty() {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::other("token file exists, but was empty"));
|
||||||
io::ErrorKind::Other,
|
|
||||||
"token file exists, but was empty",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
Ok(t)
|
Ok(t)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,22 +15,24 @@
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use prost_types::value::Kind;
|
|
||||||
use prost_types::Struct;
|
use prost_types::Struct;
|
||||||
|
use prost_types::value::Kind;
|
||||||
|
use tonic::IntoRequest;
|
||||||
|
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue};
|
||||||
|
use tracing::{debug, error, instrument, warn};
|
||||||
|
|
||||||
use tracing::{error, instrument, warn};
|
use crate::identity::Error;
|
||||||
|
|
||||||
use crate::identity::auth::AuthSource;
|
use crate::identity::auth::AuthSource;
|
||||||
use crate::identity::manager::Identity;
|
use crate::identity::manager::Identity;
|
||||||
use crate::identity::Error;
|
|
||||||
use crate::tls::{self, TlsGrpcChannel};
|
use crate::tls::{self, TlsGrpcChannel};
|
||||||
use crate::xds::istio::ca::istio_certificate_service_client::IstioCertificateServiceClient;
|
|
||||||
use crate::xds::istio::ca::IstioCertificateRequest;
|
use crate::xds::istio::ca::IstioCertificateRequest;
|
||||||
|
use crate::xds::istio::ca::istio_certificate_service_client::IstioCertificateServiceClient;
|
||||||
|
|
||||||
pub struct CaClient {
|
pub struct CaClient {
|
||||||
pub client: IstioCertificateServiceClient<TlsGrpcChannel>,
|
pub client: IstioCertificateServiceClient<TlsGrpcChannel>,
|
||||||
pub enable_impersonated_identity: bool,
|
pub enable_impersonated_identity: bool,
|
||||||
pub secret_ttl: i64,
|
pub secret_ttl: i64,
|
||||||
|
ca_headers: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CaClient {
|
impl CaClient {
|
||||||
|
@ -41,6 +43,7 @@ impl CaClient {
|
||||||
auth: AuthSource,
|
auth: AuthSource,
|
||||||
enable_impersonated_identity: bool,
|
enable_impersonated_identity: bool,
|
||||||
secret_ttl: i64,
|
secret_ttl: i64,
|
||||||
|
ca_headers: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
|
||||||
) -> Result<CaClient, Error> {
|
) -> Result<CaClient, Error> {
|
||||||
let svc =
|
let svc =
|
||||||
tls::grpc_connector(address, auth, cert_provider.fetch_cert(alt_hostname).await?)?;
|
tls::grpc_connector(address, auth, cert_provider.fetch_cert(alt_hostname).await?)?;
|
||||||
|
@ -49,6 +52,7 @@ impl CaClient {
|
||||||
client,
|
client,
|
||||||
enable_impersonated_identity,
|
enable_impersonated_identity,
|
||||||
secret_ttl,
|
secret_ttl,
|
||||||
|
ca_headers,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,7 +67,7 @@ impl CaClient {
|
||||||
let csr = cs.csr;
|
let csr = cs.csr;
|
||||||
let private_key = cs.private_key;
|
let private_key = cs.private_key;
|
||||||
|
|
||||||
let req = IstioCertificateRequest {
|
let mut req = tonic::Request::new(IstioCertificateRequest {
|
||||||
csr,
|
csr,
|
||||||
validity_duration: self.secret_ttl,
|
validity_duration: self.secret_ttl,
|
||||||
metadata: {
|
metadata: {
|
||||||
|
@ -80,14 +84,23 @@ impl CaClient {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
};
|
});
|
||||||
|
self.ca_headers.iter().for_each(|(k, v)| {
|
||||||
|
req.metadata_mut().insert(k.clone(), v.clone());
|
||||||
|
|
||||||
|
if let Ok(v_str) = v.to_str() {
|
||||||
|
debug!("CA header added: {}={}", k, v_str);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let resp = self
|
let resp = self
|
||||||
.client
|
.client
|
||||||
.clone()
|
.clone()
|
||||||
.create_certificate(req)
|
.create_certificate(req.into_request())
|
||||||
.await
|
.await
|
||||||
.map_err(Box::new)?
|
.map_err(Box::new)?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
let leaf = resp
|
let leaf = resp
|
||||||
.cert_chain
|
.cert_chain
|
||||||
.first()
|
.first()
|
||||||
|
@ -101,12 +114,8 @@ impl CaClient {
|
||||||
};
|
};
|
||||||
let certs = tls::WorkloadCertificate::new(&private_key, leaf, chain)?;
|
let certs = tls::WorkloadCertificate::new(&private_key, leaf, chain)?;
|
||||||
// Make the certificate actually matches the identity we requested.
|
// Make the certificate actually matches the identity we requested.
|
||||||
if self.enable_impersonated_identity && certs.cert.identity().as_ref() != Some(id) {
|
if self.enable_impersonated_identity && certs.identity().as_ref() != Some(id) {
|
||||||
error!(
|
error!("expected identity {:?}, got {:?}", id, certs.identity());
|
||||||
"expected identity {:?}, got {:?}",
|
|
||||||
id,
|
|
||||||
certs.cert.identity()
|
|
||||||
);
|
|
||||||
return Err(Error::SanError(id.to_owned()));
|
return Err(Error::SanError(id.to_owned()));
|
||||||
}
|
}
|
||||||
Ok(certs)
|
Ok(certs)
|
||||||
|
@ -136,7 +145,7 @@ pub mod mock {
|
||||||
struct ClientState {
|
struct ClientState {
|
||||||
fetches: Vec<Identity>,
|
fetches: Vec<Identity>,
|
||||||
error: bool,
|
error: bool,
|
||||||
gen: tls::mock::CertGenerator,
|
cert_gen: tls::mock::CertGenerator,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -221,7 +230,7 @@ pub mod mock {
|
||||||
return Err(Error::Spiffe("injected test error".into()));
|
return Err(Error::Spiffe("injected test error".into()));
|
||||||
}
|
}
|
||||||
let certs = state
|
let certs = state
|
||||||
.gen
|
.cert_gen
|
||||||
.new_certs(&id.to_owned().into(), not_before, not_after);
|
.new_certs(&id.to_owned().into(), not_before, not_after);
|
||||||
state.fetches.push(id.to_owned());
|
state.fetches.push(id.to_owned());
|
||||||
Ok(certs)
|
Ok(certs)
|
||||||
|
@ -246,7 +255,7 @@ pub mod mock {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::iter;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use matches::assert_matches;
|
use matches::assert_matches;
|
||||||
|
@ -286,10 +295,7 @@ mod tests {
|
||||||
);
|
);
|
||||||
|
|
||||||
let res = test_ca_client_with_response(IstioCertificateResponse {
|
let res = test_ca_client_with_response(IstioCertificateResponse {
|
||||||
cert_chain: iter::once(certs.cert)
|
cert_chain: certs.full_chain_and_roots(),
|
||||||
.chain(certs.chain)
|
|
||||||
.map(|c| c.as_pem())
|
|
||||||
.collect(),
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
assert_matches!(res, Err(Error::SanError(_)));
|
assert_matches!(res, Err(Error::SanError(_)));
|
||||||
|
@ -304,10 +310,7 @@ mod tests {
|
||||||
);
|
);
|
||||||
|
|
||||||
let res = test_ca_client_with_response(IstioCertificateResponse {
|
let res = test_ca_client_with_response(IstioCertificateResponse {
|
||||||
cert_chain: iter::once(certs.cert)
|
cert_chain: certs.full_chain_and_roots(),
|
||||||
.chain(certs.chain)
|
|
||||||
.map(|c| c.as_pem())
|
|
||||||
.collect(),
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
assert_matches!(res, Ok(_));
|
assert_matches!(res, Ok(_));
|
||||||
|
|
|
@ -24,8 +24,8 @@ use crate::config::ProxyMode;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use prometheus_client::encoding::{EncodeLabelValue, LabelValueEncoder};
|
use prometheus_client::encoding::{EncodeLabelValue, LabelValueEncoder};
|
||||||
use tokio::sync::{mpsc, watch, Mutex};
|
use tokio::sync::{Mutex, mpsc, watch};
|
||||||
use tokio::time::{sleep_until, Duration, Instant};
|
use tokio::time::{Duration, Instant, sleep_until};
|
||||||
|
|
||||||
use crate::{strng, tls};
|
use crate::{strng, tls};
|
||||||
|
|
||||||
|
@ -33,11 +33,14 @@ use super::CaClient;
|
||||||
use super::Error::{self, Spiffe};
|
use super::Error::{self, Spiffe};
|
||||||
|
|
||||||
use crate::strng::Strng;
|
use crate::strng::Strng;
|
||||||
use backoff::{backoff::Backoff, ExponentialBackoff};
|
use backoff::{ExponentialBackoff, backoff::Backoff};
|
||||||
use keyed_priority_queue::KeyedPriorityQueue;
|
use keyed_priority_queue::KeyedPriorityQueue;
|
||||||
|
|
||||||
const CERT_REFRESH_FAILURE_RETRY_DELAY_MAX_INTERVAL: Duration = Duration::from_secs(150);
|
const CERT_REFRESH_FAILURE_RETRY_DELAY_MAX_INTERVAL: Duration = Duration::from_secs(150);
|
||||||
|
|
||||||
|
/// Default trust domain to use if not otherwise specified.
|
||||||
|
pub const DEFAULT_TRUST_DOMAIN: &str = "cluster.local";
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
|
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
|
||||||
pub enum Identity {
|
pub enum Identity {
|
||||||
Spiffe {
|
Spiffe {
|
||||||
|
@ -130,11 +133,10 @@ impl Identity {
|
||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
impl Default for Identity {
|
impl Default for Identity {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
const TRUST_DOMAIN: &str = "cluster.local";
|
|
||||||
const SERVICE_ACCOUNT: &str = "ztunnel";
|
const SERVICE_ACCOUNT: &str = "ztunnel";
|
||||||
const NAMESPACE: &str = "istio-system";
|
const NAMESPACE: &str = "istio-system";
|
||||||
Identity::Spiffe {
|
Identity::Spiffe {
|
||||||
trust_domain: TRUST_DOMAIN.into(),
|
trust_domain: DEFAULT_TRUST_DOMAIN.into(),
|
||||||
namespace: NAMESPACE.into(),
|
namespace: NAMESPACE.into(),
|
||||||
service_account: SERVICE_ACCOUNT.into(),
|
service_account: SERVICE_ACCOUNT.into(),
|
||||||
}
|
}
|
||||||
|
@ -248,8 +250,8 @@ impl Worker {
|
||||||
// Manages certificate updates. Since all the work is done in a single task, the code is
|
// Manages certificate updates. Since all the work is done in a single task, the code is
|
||||||
// lock-free. This is OK as the code is I/O bound so we don't need the extra parallelism.
|
// lock-free. This is OK as the code is I/O bound so we don't need the extra parallelism.
|
||||||
async fn run(&self, mut requests: mpsc::Receiver<Request>) {
|
async fn run(&self, mut requests: mpsc::Receiver<Request>) {
|
||||||
use futures::stream::FuturesUnordered;
|
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use futures::stream::FuturesUnordered;
|
||||||
|
|
||||||
#[derive(Eq, PartialEq)]
|
#[derive(Eq, PartialEq)]
|
||||||
enum Fetch {
|
enum Fetch {
|
||||||
|
@ -473,7 +475,7 @@ fn push_increase<TKey: Hash + Eq, TPriority: Ord>(
|
||||||
key: TKey,
|
key: TKey,
|
||||||
priority: TPriority,
|
priority: TPriority,
|
||||||
) {
|
) {
|
||||||
if kp.get_priority(&key).map_or(true, |p| priority > *p) {
|
if kp.get_priority(&key).is_none_or(|p| priority > *p) {
|
||||||
kp.push(key, priority);
|
kp.push(key, priority);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -507,6 +509,7 @@ impl SecretManager {
|
||||||
cfg.auth.clone(),
|
cfg.auth.clone(),
|
||||||
cfg.proxy_mode == ProxyMode::Shared,
|
cfg.proxy_mode == ProxyMode::Shared,
|
||||||
cfg.secret_ttl.as_secs().try_into().unwrap_or(60 * 60 * 24),
|
cfg.secret_ttl.as_secs().try_into().unwrap_or(60 * 60 * 24),
|
||||||
|
cfg.ca_headers.vec.clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(Self::new_with_client(caclient))
|
Ok(Self::new_with_client(caclient))
|
||||||
|
|
|
@ -148,7 +148,7 @@ impl crate::proxy::SocketFactory for InPodSocketPortReuseFactory {
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if let Err(e) = sock.set_reuseport(true) {
|
if let Err(e) = sock.set_reuseport(true) {
|
||||||
tracing::warn!("setting set_reuseport failed: {} addr: {}", e, addr);
|
tracing::warn!("setting set_reuseport failed: {e} addr: {addr}");
|
||||||
}
|
}
|
||||||
|
|
||||||
sock.bind(addr)?;
|
sock.bind(addr)?;
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use nix::sched::{setns, CloneFlags};
|
use nix::sched::{CloneFlags, setns};
|
||||||
use std::os::fd::OwnedFd;
|
use std::os::fd::OwnedFd;
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
|
@ -16,8 +16,8 @@
|
||||||
// It is not implemented in rust, so this provides an implementation for it.
|
// It is not implemented in rust, so this provides an implementation for it.
|
||||||
|
|
||||||
use nix::sys::socket::{
|
use nix::sys::socket::{
|
||||||
bind as nixbind, connect as nixconnect, listen, socket, AddressFamily, SockFlag, SockType,
|
AddressFamily, SockFlag, SockType, UnixAddr, bind as nixbind, connect as nixconnect, listen,
|
||||||
UnixAddr,
|
socket,
|
||||||
};
|
};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::os::fd::AsRawFd;
|
use std::os::fd::AsRawFd;
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
use super::istio::zds::{self, Ack, Version, WorkloadRequest, WorkloadResponse, ZdsHello};
|
use super::istio::zds::{self, Ack, Version, WorkloadRequest, WorkloadResponse, ZdsHello};
|
||||||
use super::{WorkloadData, WorkloadMessage};
|
use super::{WorkloadData, WorkloadMessage};
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
use nix::sys::socket::{recvmsg, sendmsg, ControlMessageOwned, MsgFlags};
|
use nix::sys::socket::{ControlMessageOwned, MsgFlags, recvmsg, sendmsg};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use std::io::{IoSlice, IoSliceMut};
|
use std::io::{IoSlice, IoSliceMut};
|
||||||
use std::os::fd::OwnedFd;
|
use std::os::fd::OwnedFd;
|
||||||
|
|
|
@ -15,17 +15,17 @@
|
||||||
use crate::drain;
|
use crate::drain;
|
||||||
use crate::drain::DrainTrigger;
|
use crate::drain::DrainTrigger;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tracing::{debug, info, Instrument};
|
use tracing::{Instrument, debug, info};
|
||||||
|
|
||||||
use super::{metrics::Metrics, Error, WorkloadMessage};
|
use super::{Error, WorkloadMessage, metrics::Metrics};
|
||||||
|
|
||||||
use crate::proxyfactory::ProxyFactory;
|
use crate::proxyfactory::ProxyFactory;
|
||||||
use crate::state::WorkloadInfo;
|
use crate::state::WorkloadInfo;
|
||||||
|
|
||||||
use super::config::InPodConfig;
|
use super::config::InPodConfig;
|
||||||
|
|
||||||
use super::netns::{InpodNetns, NetnsID};
|
|
||||||
use super::WorkloadUid;
|
use super::WorkloadUid;
|
||||||
|
use super::netns::{InpodNetns, NetnsID};
|
||||||
|
|
||||||
// Note: we can't drain on drop, as drain is async (it waits for the drain to finish).
|
// Note: we can't drain on drop, as drain is async (it waits for the drain to finish).
|
||||||
pub(super) struct WorkloadState {
|
pub(super) struct WorkloadState {
|
||||||
|
@ -388,8 +388,8 @@ impl WorkloadProxyManagerState {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::inpod::test_helpers::{self, create_proxy_conflict, new_netns, uid};
|
|
||||||
use crate::inpod::WorkloadData;
|
use crate::inpod::WorkloadData;
|
||||||
|
use crate::inpod::test_helpers::{self, create_proxy_conflict, new_netns, uid};
|
||||||
|
|
||||||
use crate::inpod::istio::zds;
|
use crate::inpod::istio::zds;
|
||||||
use matches::assert_matches;
|
use matches::assert_matches;
|
||||||
|
|
|
@ -17,7 +17,7 @@ use super::netns::InpodNetns;
|
||||||
|
|
||||||
use crate::proxyfactory::ProxyFactory;
|
use crate::proxyfactory::ProxyFactory;
|
||||||
use crate::state::{DemandProxyState, ProxyState};
|
use crate::state::{DemandProxyState, ProxyState};
|
||||||
use nix::sched::{unshare, CloneFlags};
|
use nix::sched::{CloneFlags, unshare};
|
||||||
use prometheus_client::registry::Registry;
|
use prometheus_client::registry::Registry;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
@ -37,7 +37,7 @@ use std::os::fd::{AsRawFd, OwnedFd};
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
pub fn uid(i: usize) -> crate::inpod::WorkloadUid {
|
pub fn uid(i: usize) -> crate::inpod::WorkloadUid {
|
||||||
crate::inpod::WorkloadUid::new(format!("uid{}", i))
|
crate::inpod::WorkloadUid::new(format!("uid{i}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Fixture {
|
pub struct Fixture {
|
||||||
|
@ -138,7 +138,7 @@ pub async fn read_msg(s: &mut UnixStream) -> WorkloadResponse {
|
||||||
debug!("read {} bytes", read_amount);
|
debug!("read {} bytes", read_amount);
|
||||||
|
|
||||||
let ret = WorkloadResponse::decode(&buf[..read_amount])
|
let ret = WorkloadResponse::decode(&buf[..read_amount])
|
||||||
.unwrap_or_else(|_| panic!("failed to decode. read amount: {}", read_amount));
|
.unwrap_or_else(|_| panic!("failed to decode. read amount: {read_amount}"));
|
||||||
|
|
||||||
debug!("decoded {:?}", ret);
|
debug!("decoded {:?}", ret);
|
||||||
ret
|
ret
|
||||||
|
|
|
@ -14,14 +14,14 @@
|
||||||
|
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
use crate::readiness;
|
use crate::readiness;
|
||||||
use backoff::{backoff::Backoff, ExponentialBackoff};
|
use backoff::{ExponentialBackoff, backoff::Backoff};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::net::UnixStream;
|
use tokio::net::UnixStream;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use super::statemanager::WorkloadProxyManagerState;
|
|
||||||
use super::Error;
|
use super::Error;
|
||||||
|
use super::statemanager::WorkloadProxyManagerState;
|
||||||
|
|
||||||
use super::protocol::WorkloadStreamProcessor;
|
use super::protocol::WorkloadStreamProcessor;
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ pub(crate) mod tests {
|
||||||
assert!(e.contains("EOF"));
|
assert!(e.contains("EOF"));
|
||||||
}
|
}
|
||||||
Ok(()) => {}
|
Ok(()) => {}
|
||||||
Err(e) => panic!("expected error due to EOF {:?}", e),
|
Err(e) => panic!("expected error due to EOF {e:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -602,11 +602,8 @@ pub(crate) mod tests {
|
||||||
assert_eq!(state.workload_states().len(), 2);
|
assert_eq!(state.workload_states().len(), 2);
|
||||||
let key_set: HashSet<crate::inpod::WorkloadUid> =
|
let key_set: HashSet<crate::inpod::WorkloadUid> =
|
||||||
state.workload_states().keys().cloned().collect();
|
state.workload_states().keys().cloned().collect();
|
||||||
let expected_key_set: HashSet<crate::inpod::WorkloadUid> = [0, 1]
|
let expected_key_set: HashSet<crate::inpod::WorkloadUid> =
|
||||||
.into_iter()
|
[0, 1].into_iter().map(uid).collect();
|
||||||
.map(uid)
|
|
||||||
.map(crate::inpod::WorkloadUid::from)
|
|
||||||
.collect();
|
|
||||||
assert_eq!(key_set, expected_key_set);
|
assert_eq!(key_set, expected_key_set);
|
||||||
assert_eq!(m.active_proxy_count.get(), 2);
|
assert_eq!(m.active_proxy_count.get(), 2);
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,9 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
pub mod app;
|
pub mod app;
|
||||||
pub mod assertions;
|
pub mod assertions;
|
||||||
|
@ -42,3 +45,7 @@ pub mod xds;
|
||||||
|
|
||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
pub mod test_helpers;
|
pub mod test_helpers;
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
static PQC_ENABLED: Lazy<bool> =
|
||||||
|
Lazy::new(|| env::var("COMPLIANCE_POLICY").unwrap_or_default() == "pqc");
|
||||||
|
|
26
src/main.rs
|
@ -14,8 +14,9 @@
|
||||||
|
|
||||||
extern crate core;
|
extern crate core;
|
||||||
|
|
||||||
|
use nix::sys::resource::{Resource, getrlimit, setrlimit};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tracing::info;
|
use tracing::{info, warn};
|
||||||
use ztunnel::*;
|
use ztunnel::*;
|
||||||
|
|
||||||
#[cfg(feature = "jemalloc")]
|
#[cfg(feature = "jemalloc")]
|
||||||
|
@ -25,9 +26,29 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||||
|
|
||||||
#[cfg(feature = "jemalloc")]
|
#[cfg(feature = "jemalloc")]
|
||||||
#[allow(non_upper_case_globals)]
|
#[allow(non_upper_case_globals)]
|
||||||
#[export_name = "malloc_conf"]
|
#[unsafe(export_name = "malloc_conf")]
|
||||||
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
|
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
|
||||||
|
|
||||||
|
// We use this on Unix systems to increase the number of open file descriptors
|
||||||
|
// if possible. This is useful for high-load scenarios where the default limit
|
||||||
|
// is too low, which can lead to droopped connections and other issues:
|
||||||
|
// see: https://github.com/istio/ztunnel/issues/1585
|
||||||
|
fn increase_open_files_limit() {
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let Ok((soft_limit, hard_limit)) = getrlimit(Resource::RLIMIT_NOFILE) {
|
||||||
|
if let Err(e) = setrlimit(Resource::RLIMIT_NOFILE, hard_limit, hard_limit) {
|
||||||
|
warn!("failed to set file descriptor limits: {e}");
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"set file descriptor limits from {} to {}",
|
||||||
|
soft_limit, hard_limit
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("failed to get file descriptor limits");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() -> anyhow::Result<()> {
|
fn main() -> anyhow::Result<()> {
|
||||||
let _log_flush = telemetry::setup_logging();
|
let _log_flush = telemetry::setup_logging();
|
||||||
|
|
||||||
|
@ -74,6 +95,7 @@ fn version() -> anyhow::Result<()> {
|
||||||
|
|
||||||
async fn proxy(cfg: Arc<config::Config>) -> anyhow::Result<()> {
|
async fn proxy(cfg: Arc<config::Config>) -> anyhow::Result<()> {
|
||||||
info!("version: {}", version::BuildInfo::new());
|
info!("version: {}", version::BuildInfo::new());
|
||||||
|
increase_open_files_limit();
|
||||||
info!("running with config: {}", serde_yaml::to_string(&cfg)?);
|
info!("running with config: {}", serde_yaml::to_string(&cfg)?);
|
||||||
app::build(cfg).await?.wait_termination().await
|
app::build(cfg).await?.wait_termination().await
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ use std::mem;
|
||||||
use prometheus_client::encoding::{EncodeLabelValue, LabelValueEncoder};
|
use prometheus_client::encoding::{EncodeLabelValue, LabelValueEncoder};
|
||||||
use prometheus_client::registry::Registry;
|
use prometheus_client::registry::Registry;
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
use tracing::field::{display, DisplayValue};
|
use tracing::field::{DisplayValue, display};
|
||||||
use tracing_core::field::Value;
|
use tracing_core::field::Value;
|
||||||
|
|
||||||
use crate::identity::Identity;
|
use crate::identity::Identity;
|
||||||
|
|
315
src/proxy.rs
|
@ -20,14 +20,14 @@ use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
use hickory_proto::error::ProtoError;
|
use hickory_proto::ProtoError;
|
||||||
|
|
||||||
use crate::strng::Strng;
|
use crate::strng::Strng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use socket2::TcpKeepalive;
|
use socket2::TcpKeepalive;
|
||||||
use tokio::net::{TcpListener, TcpSocket, TcpStream};
|
use tokio::net::{TcpListener, TcpSocket, TcpStream};
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
use tracing::{debug, trace, warn, Instrument};
|
use tracing::{Instrument, debug, trace, warn};
|
||||||
|
|
||||||
use inbound::Inbound;
|
use inbound::Inbound;
|
||||||
pub use metrics::*;
|
pub use metrics::*;
|
||||||
|
@ -48,8 +48,9 @@ use crate::state::{DemandProxyState, WorkloadInfo};
|
||||||
use crate::{config, identity, socket, tls};
|
use crate::{config, identity, socket, tls};
|
||||||
|
|
||||||
pub mod connection_manager;
|
pub mod connection_manager;
|
||||||
|
pub mod inbound;
|
||||||
|
|
||||||
mod h2;
|
mod h2;
|
||||||
mod inbound;
|
|
||||||
mod inbound_passthrough;
|
mod inbound_passthrough;
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
|
@ -259,6 +260,8 @@ pub(super) struct ProxyInputs {
|
||||||
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
||||||
local_workload_information: Arc<LocalWorkloadInformation>,
|
local_workload_information: Arc<LocalWorkloadInformation>,
|
||||||
resolver: Option<Arc<dyn Resolver + Send + Sync>>,
|
resolver: Option<Arc<dyn Resolver + Send + Sync>>,
|
||||||
|
// If true, inbound connections created with these inputs will not attempt to preserve the original source IP.
|
||||||
|
pub disable_inbound_freebind: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
@ -271,6 +274,7 @@ impl ProxyInputs {
|
||||||
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
||||||
resolver: Option<Arc<dyn Resolver + Send + Sync>>,
|
resolver: Option<Arc<dyn Resolver + Send + Sync>>,
|
||||||
local_workload_information: Arc<LocalWorkloadInformation>,
|
local_workload_information: Arc<LocalWorkloadInformation>,
|
||||||
|
disable_inbound_freebind: bool,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
cfg,
|
cfg,
|
||||||
|
@ -280,6 +284,7 @@ impl ProxyInputs {
|
||||||
socket_factory,
|
socket_factory,
|
||||||
local_workload_information,
|
local_workload_information,
|
||||||
resolver,
|
resolver,
|
||||||
|
disable_inbound_freebind,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -301,7 +306,7 @@ impl Proxy {
|
||||||
old_cfg.inbound_addr = inbound.address();
|
old_cfg.inbound_addr = inbound.address();
|
||||||
let mut new_pi = (*pi).clone();
|
let mut new_pi = (*pi).clone();
|
||||||
new_pi.cfg = Arc::new(old_cfg);
|
new_pi.cfg = Arc::new(old_cfg);
|
||||||
std::mem::swap(&mut pi, &mut Arc::new(new_pi));
|
pi = Arc::new(new_pi);
|
||||||
warn!("TEST FAKE: new address is {:?}", pi.cfg.inbound_addr);
|
warn!("TEST FAKE: new address is {:?}", pi.cfg.inbound_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +373,7 @@ impl fmt::Display for AuthorizationRejectionError {
|
||||||
match self {
|
match self {
|
||||||
Self::NoWorkload => write!(fmt, "workload not found"),
|
Self::NoWorkload => write!(fmt, "workload not found"),
|
||||||
Self::WorkloadMismatch => write!(fmt, "workload mismatch"),
|
Self::WorkloadMismatch => write!(fmt, "workload mismatch"),
|
||||||
Self::ExplicitlyDenied(a, b) => write!(fmt, "explicitly denied by: {}/{}", a, b),
|
Self::ExplicitlyDenied(a, b) => write!(fmt, "explicitly denied by: {a}/{b}"),
|
||||||
Self::NotAllowed => write!(fmt, "allow policies exist, but none allowed"),
|
Self::NotAllowed => write!(fmt, "allow policies exist, but none allowed"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -446,6 +451,24 @@ pub enum Error {
|
||||||
#[error("unknown waypoint: {0}")]
|
#[error("unknown waypoint: {0}")]
|
||||||
UnknownWaypoint(String),
|
UnknownWaypoint(String),
|
||||||
|
|
||||||
|
#[error("unknown network gateway: {0}")]
|
||||||
|
UnknownNetworkGateway(String),
|
||||||
|
|
||||||
|
#[error("no service or workload for hostname: {0}")]
|
||||||
|
NoHostname(String),
|
||||||
|
|
||||||
|
#[error("no endpoints for workload: {0}")]
|
||||||
|
NoWorkloadEndpoints(String),
|
||||||
|
|
||||||
|
#[error("no valid authority pseudo header: {0}")]
|
||||||
|
NoValidAuthority(String),
|
||||||
|
|
||||||
|
#[error("no valid service port in authority header: {0}")]
|
||||||
|
NoValidServicePort(String, u16),
|
||||||
|
|
||||||
|
#[error("no valid target port for workload: {0}")]
|
||||||
|
NoValidTargetPort(String, u16),
|
||||||
|
|
||||||
#[error("no valid routing destination for workload: {0}")]
|
#[error("no valid routing destination for workload: {0}")]
|
||||||
NoValidDestination(Box<Workload>),
|
NoValidDestination(Box<Workload>),
|
||||||
|
|
||||||
|
@ -455,6 +478,15 @@ pub enum Error {
|
||||||
#[error("no ip addresses were resolved for workload: {0}")]
|
#[error("no ip addresses were resolved for workload: {0}")]
|
||||||
NoResolvedAddresses(String),
|
NoResolvedAddresses(String),
|
||||||
|
|
||||||
|
#[error("requested service {0}:{1} found, but cannot resolve port")]
|
||||||
|
NoPortForServices(String, u16),
|
||||||
|
|
||||||
|
#[error("requested service {0} found, but has no IP addresses")]
|
||||||
|
NoIPForService(String),
|
||||||
|
|
||||||
|
#[error("no service for target address: {0}")]
|
||||||
|
NoService(SocketAddr),
|
||||||
|
|
||||||
#[error(
|
#[error(
|
||||||
"ip addresses were resolved for workload {0}, but valid dns response had no A/AAAA records"
|
"ip addresses were resolved for workload {0}, but valid dns response had no A/AAAA records"
|
||||||
)]
|
)]
|
||||||
|
@ -485,6 +517,7 @@ pub enum Error {
|
||||||
DnsEmpty,
|
DnsEmpty,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Custom TLV for proxy protocol for the identity of the source
|
||||||
const PROXY_PROTOCOL_AUTHORITY_TLV: u8 = 0xD0;
|
const PROXY_PROTOCOL_AUTHORITY_TLV: u8 = 0xD0;
|
||||||
|
|
||||||
pub async fn write_proxy_protocol<T>(
|
pub async fn write_proxy_protocol<T>(
|
||||||
|
@ -498,6 +531,10 @@ where
|
||||||
use ppp::v2::{Builder, Command, Protocol, Version};
|
use ppp::v2::{Builder, Command, Protocol, Version};
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
|
|
||||||
|
// When the hbone_addr populated from the authority header contains a svc hostname, the address included
|
||||||
|
// with respect to the hbone_addr is the SocketAddr <dst svc IP>:<original dst port>.
|
||||||
|
// This is done since addresses doesn't support hostnames.
|
||||||
|
// See ref https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||||
debug!("writing proxy protocol addresses: {:?}", addresses);
|
debug!("writing proxy protocol addresses: {:?}", addresses);
|
||||||
let mut builder =
|
let mut builder =
|
||||||
Builder::with_addresses(Version::Two | Command::Proxy, Protocol::Stream, addresses);
|
Builder::with_addresses(Version::Two | Command::Proxy, Protocol::Stream, addresses);
|
||||||
|
@ -529,11 +566,11 @@ impl TraceParent {
|
||||||
}
|
}
|
||||||
impl TraceParent {
|
impl TraceParent {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::rng();
|
||||||
Self {
|
Self {
|
||||||
version: 0,
|
version: 0,
|
||||||
trace_id: rng.gen(),
|
trace_id: rng.random(),
|
||||||
parent_id: rng.gen(),
|
parent_id: rng.random(),
|
||||||
flags: 0,
|
flags: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -709,17 +746,6 @@ async fn check_from_waypoint(
|
||||||
check_gateway_address(state, upstream.waypoint.as_ref(), is_waypoint).await
|
check_gateway_address(state, upstream.waypoint.as_ref(), is_waypoint).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks if the connection's source identity is the identity for the upstream's network
|
|
||||||
// gateway
|
|
||||||
async fn check_from_network_gateway(
|
|
||||||
state: &DemandProxyState,
|
|
||||||
upstream: &Workload,
|
|
||||||
src_identity: Option<&Identity>,
|
|
||||||
) -> bool {
|
|
||||||
let is_gateway = |wl: &Workload| Some(wl.identity()).as_ref() == src_identity;
|
|
||||||
check_gateway_address(state, upstream.network_gateway.as_ref(), is_gateway).await
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the source's identity matches any workloads that make up the given gateway
|
// Check if the source's identity matches any workloads that make up the given gateway
|
||||||
// TODO: This can be made more accurate by also checking addresses.
|
// TODO: This can be made more accurate by also checking addresses.
|
||||||
async fn check_gateway_address<F>(
|
async fn check_gateway_address<F>(
|
||||||
|
@ -782,73 +808,86 @@ pub fn parse_forwarded_host(input: &str) -> Option<String> {
|
||||||
.filter(|host| !host.is_empty())
|
.filter(|host| !host.is_empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum HboneAddress {
|
||||||
|
SocketAddr(SocketAddr),
|
||||||
|
SvcHostname(Strng, u16),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HboneAddress {
|
||||||
|
pub fn port(&self) -> u16 {
|
||||||
|
match self {
|
||||||
|
HboneAddress::SocketAddr(s) => s.port(),
|
||||||
|
HboneAddress::SvcHostname(_, p) => *p,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ip(&self) -> Option<IpAddr> {
|
||||||
|
match self {
|
||||||
|
HboneAddress::SocketAddr(s) => Some(s.ip()),
|
||||||
|
HboneAddress::SvcHostname(_, _) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn svc_hostname(&self) -> Option<Strng> {
|
||||||
|
match self {
|
||||||
|
HboneAddress::SocketAddr(_) => None,
|
||||||
|
HboneAddress::SvcHostname(s, _) => Some(s.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn hostname_addr(&self) -> Option<Strng> {
|
||||||
|
match self {
|
||||||
|
HboneAddress::SocketAddr(_) => None,
|
||||||
|
HboneAddress::SvcHostname(_, _) => Some(Strng::from(self.to_string())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for HboneAddress {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
HboneAddress::SocketAddr(addr) => write!(f, "{addr}"),
|
||||||
|
HboneAddress::SvcHostname(host, port) => write!(f, "{host}:{port}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SocketAddr> for HboneAddress {
|
||||||
|
fn from(socket_addr: SocketAddr) -> Self {
|
||||||
|
HboneAddress::SocketAddr(socket_addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(Strng, u16)> for HboneAddress {
|
||||||
|
fn from(svc_hostname: (Strng, u16)) -> Self {
|
||||||
|
HboneAddress::SvcHostname(svc_hostname.0, svc_hostname.1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&http::Uri> for HboneAddress {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(value: &http::Uri) -> Result<Self, Self::Error> {
|
||||||
|
match value.to_string().parse::<SocketAddr>() {
|
||||||
|
Ok(addr) => Ok(HboneAddress::SocketAddr(addr)),
|
||||||
|
Err(_) => {
|
||||||
|
let hbone_host = value
|
||||||
|
.host()
|
||||||
|
.ok_or_else(|| Error::NoValidAuthority(value.to_string()))?;
|
||||||
|
let hbone_port = value
|
||||||
|
.port_u16()
|
||||||
|
.ok_or_else(|| Error::NoValidAuthority(value.to_string()))?;
|
||||||
|
Ok(HboneAddress::SvcHostname(hbone_host.into(), hbone_port))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
|
||||||
|
|
||||||
use crate::state::service::EndpointSet;
|
|
||||||
use crate::state::workload::NetworkAddress;
|
|
||||||
use crate::{
|
|
||||||
identity::Identity,
|
|
||||||
state::{
|
|
||||||
self,
|
|
||||||
service::{Endpoint, Service},
|
|
||||||
workload::gatewayaddress::Destination,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use prometheus_client::registry::Registry;
|
|
||||||
use std::{collections::HashMap, net::Ipv4Addr, sync::RwLock};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn check_gateway() {
|
|
||||||
let w = mock_default_gateway_workload();
|
|
||||||
let s = mock_default_gateway_service();
|
|
||||||
let mut state = state::ProxyState::new(None);
|
|
||||||
state.workloads.insert(Arc::new(w));
|
|
||||||
state.services.insert(s);
|
|
||||||
let mut registry = Registry::default();
|
|
||||||
let metrics = Arc::new(crate::proxy::Metrics::new(&mut registry));
|
|
||||||
let state = state::DemandProxyState::new(
|
|
||||||
Arc::new(RwLock::new(state)),
|
|
||||||
None,
|
|
||||||
ResolverConfig::default(),
|
|
||||||
ResolverOpts::default(),
|
|
||||||
metrics,
|
|
||||||
);
|
|
||||||
|
|
||||||
let gateawy_id = Identity::Spiffe {
|
|
||||||
trust_domain: "cluster.local".into(),
|
|
||||||
namespace: "gatewayns".into(),
|
|
||||||
service_account: "default".into(),
|
|
||||||
};
|
|
||||||
let from_gw_conn = Some(gateawy_id);
|
|
||||||
let not_from_gw_conn = Some(Identity::default());
|
|
||||||
|
|
||||||
let upstream_with_address = mock_wokload_with_gateway(Some(mock_default_gateway_address()));
|
|
||||||
assert!(
|
|
||||||
check_from_network_gateway(&state, &upstream_with_address, from_gw_conn.as_ref(),)
|
|
||||||
.await
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
!check_from_network_gateway(&state, &upstream_with_address, not_from_gw_conn.as_ref(),)
|
|
||||||
.await
|
|
||||||
);
|
|
||||||
|
|
||||||
// using hostname (will check the service variant of address::Address)
|
|
||||||
let upstream_with_hostname =
|
|
||||||
mock_wokload_with_gateway(Some(mock_default_gateway_hostname()));
|
|
||||||
assert!(
|
|
||||||
check_from_network_gateway(&state, &upstream_with_hostname, from_gw_conn.as_ref(),)
|
|
||||||
.await
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
!check_from_network_gateway(&state, &upstream_with_hostname, not_from_gw_conn.as_ref())
|
|
||||||
.await
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_forwarded_host() {
|
fn test_parse_forwarded_host() {
|
||||||
let header = "by=identifier;for=identifier;host=example.com;proto=https";
|
let header = "by=identifier;for=identifier;host=example.com;proto=https";
|
||||||
|
@ -868,116 +907,4 @@ mod tests {
|
||||||
let header = r#"for=for;by=by;host=host;proto="pröto""#;
|
let header = r#"for=for;by=by;host=host;proto="pröto""#;
|
||||||
assert_eq!(parse_forwarded_host(header), None);
|
assert_eq!(parse_forwarded_host(header), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
// private helpers
|
|
||||||
fn mock_wokload_with_gateway(gw: Option<GatewayAddress>) -> Workload {
|
|
||||||
Workload {
|
|
||||||
workload_ips: vec![IpAddr::V4(Ipv4Addr::LOCALHOST)],
|
|
||||||
waypoint: None,
|
|
||||||
network_gateway: gw,
|
|
||||||
protocol: Default::default(),
|
|
||||||
network_mode: Default::default(),
|
|
||||||
uid: "".into(),
|
|
||||||
name: "app".into(),
|
|
||||||
namespace: "appns".into(),
|
|
||||||
trust_domain: "cluster.local".into(),
|
|
||||||
service_account: "default".into(),
|
|
||||||
network: "".into(),
|
|
||||||
workload_name: "app".into(),
|
|
||||||
workload_type: "deployment".into(),
|
|
||||||
canonical_name: "app".into(),
|
|
||||||
canonical_revision: "".into(),
|
|
||||||
hostname: "".into(),
|
|
||||||
node: "".into(),
|
|
||||||
status: Default::default(),
|
|
||||||
cluster_id: "Kubernetes".into(),
|
|
||||||
|
|
||||||
authorization_policies: Vec::new(),
|
|
||||||
native_tunnel: false,
|
|
||||||
application_tunnel: None,
|
|
||||||
locality: Default::default(),
|
|
||||||
services: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mock_default_gateway_workload() -> Workload {
|
|
||||||
Workload {
|
|
||||||
workload_ips: vec![IpAddr::V4(mock_default_gateway_ipaddr())],
|
|
||||||
waypoint: None,
|
|
||||||
network_gateway: None,
|
|
||||||
protocol: Default::default(),
|
|
||||||
network_mode: Default::default(),
|
|
||||||
uid: "".into(),
|
|
||||||
name: "gateway".into(),
|
|
||||||
namespace: "gatewayns".into(),
|
|
||||||
trust_domain: "cluster.local".into(),
|
|
||||||
service_account: "default".into(),
|
|
||||||
network: "".into(),
|
|
||||||
workload_name: "gateway".into(),
|
|
||||||
workload_type: "deployment".into(),
|
|
||||||
canonical_name: "".into(),
|
|
||||||
canonical_revision: "".into(),
|
|
||||||
hostname: "".into(),
|
|
||||||
node: "".into(),
|
|
||||||
status: Default::default(),
|
|
||||||
cluster_id: "Kubernetes".into(),
|
|
||||||
|
|
||||||
authorization_policies: Vec::new(),
|
|
||||||
native_tunnel: false,
|
|
||||||
application_tunnel: None,
|
|
||||||
locality: Default::default(),
|
|
||||||
services: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mock_default_gateway_service() -> Service {
|
|
||||||
let vip1 = NetworkAddress {
|
|
||||||
address: IpAddr::V4(Ipv4Addr::new(127, 0, 10, 1)),
|
|
||||||
network: "".into(),
|
|
||||||
};
|
|
||||||
let vips = vec![vip1];
|
|
||||||
let mut ports = HashMap::new();
|
|
||||||
ports.insert(8080, 80);
|
|
||||||
let endpoints = EndpointSet::from_list([Endpoint {
|
|
||||||
workload_uid: mock_default_gateway_workload().uid,
|
|
||||||
port: ports.clone(),
|
|
||||||
status: state::workload::HealthStatus::Healthy,
|
|
||||||
}]);
|
|
||||||
Service {
|
|
||||||
name: "gateway".into(),
|
|
||||||
namespace: "gatewayns".into(),
|
|
||||||
hostname: "gateway".into(),
|
|
||||||
vips,
|
|
||||||
ports,
|
|
||||||
endpoints,
|
|
||||||
subject_alt_names: vec![],
|
|
||||||
waypoint: None,
|
|
||||||
load_balancer: None,
|
|
||||||
ip_families: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mock_default_gateway_address() -> GatewayAddress {
|
|
||||||
GatewayAddress {
|
|
||||||
destination: Destination::Address(NetworkAddress {
|
|
||||||
network: "".into(),
|
|
||||||
address: IpAddr::V4(mock_default_gateway_ipaddr()),
|
|
||||||
}),
|
|
||||||
hbone_mtls_port: 15008,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mock_default_gateway_hostname() -> GatewayAddress {
|
|
||||||
GatewayAddress {
|
|
||||||
destination: Destination::Hostname(state::workload::NamespacedHostname {
|
|
||||||
namespace: "gatewayns".into(),
|
|
||||||
hostname: "gateway".into(),
|
|
||||||
}),
|
|
||||||
hbone_mtls_port: 15008,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mock_default_gateway_ipaddr() -> Ipv4Addr {
|
|
||||||
Ipv4Addr::new(127, 0, 0, 100)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ use std::net::SocketAddr;
|
||||||
|
|
||||||
use crate::drain;
|
use crate::drain;
|
||||||
use crate::drain::{DrainTrigger, DrainWatcher};
|
use crate::drain::{DrainTrigger, DrainWatcher};
|
||||||
|
use crate::state::workload::{InboundProtocol, OutboundProtocol};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
@ -133,6 +134,7 @@ pub struct OutboundConnection {
|
||||||
pub src: SocketAddr,
|
pub src: SocketAddr,
|
||||||
pub original_dst: SocketAddr,
|
pub original_dst: SocketAddr,
|
||||||
pub actual_dst: SocketAddr,
|
pub actual_dst: SocketAddr,
|
||||||
|
pub protocol: OutboundProtocol,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, serde::Serialize)]
|
#[derive(Debug, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, serde::Serialize)]
|
||||||
|
@ -141,6 +143,7 @@ pub struct InboundConnectionDump {
|
||||||
pub src: SocketAddr,
|
pub src: SocketAddr,
|
||||||
pub original_dst: Option<String>,
|
pub original_dst: Option<String>,
|
||||||
pub actual_dst: SocketAddr,
|
pub actual_dst: SocketAddr,
|
||||||
|
pub protocol: InboundProtocol,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize)]
|
#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize)]
|
||||||
|
@ -157,11 +160,13 @@ impl ConnectionManager {
|
||||||
src: SocketAddr,
|
src: SocketAddr,
|
||||||
original_dst: SocketAddr,
|
original_dst: SocketAddr,
|
||||||
actual_dst: SocketAddr,
|
actual_dst: SocketAddr,
|
||||||
|
protocol: OutboundProtocol,
|
||||||
) -> OutboundConnectionGuard {
|
) -> OutboundConnectionGuard {
|
||||||
let c = OutboundConnection {
|
let c = OutboundConnection {
|
||||||
src,
|
src,
|
||||||
original_dst,
|
original_dst,
|
||||||
actual_dst,
|
actual_dst,
|
||||||
|
protocol,
|
||||||
};
|
};
|
||||||
|
|
||||||
self.outbound_connections
|
self.outbound_connections
|
||||||
|
@ -278,6 +283,11 @@ impl Serialize for ConnectionManager {
|
||||||
src: c.ctx.conn.src,
|
src: c.ctx.conn.src,
|
||||||
original_dst: c.dest_service,
|
original_dst: c.dest_service,
|
||||||
actual_dst: c.ctx.conn.dst,
|
actual_dst: c.ctx.conn.dst,
|
||||||
|
protocol: if c.ctx.conn.src_identity.is_some() {
|
||||||
|
InboundProtocol::HBONE
|
||||||
|
} else {
|
||||||
|
InboundProtocol::TCP
|
||||||
|
},
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let outbound: Vec<_> = self
|
let outbound: Vec<_> = self
|
||||||
|
@ -345,8 +355,8 @@ mod tests {
|
||||||
use crate::rbac::Connection;
|
use crate::rbac::Connection;
|
||||||
use crate::state::{DemandProxyState, ProxyState};
|
use crate::state::{DemandProxyState, ProxyState};
|
||||||
use crate::test_helpers::test_default_workload;
|
use crate::test_helpers::test_default_workload;
|
||||||
use crate::xds::istio::security::{Action, Authorization, Scope};
|
|
||||||
use crate::xds::ProxyStateUpdateMutator;
|
use crate::xds::ProxyStateUpdateMutator;
|
||||||
|
use crate::xds::istio::security::{Action, Authorization, Scope};
|
||||||
|
|
||||||
use super::{ConnectionGuard, ConnectionManager, InboundConnection, PolicyWatcher};
|
use super::{ConnectionGuard, ConnectionManager, InboundConnection, PolicyWatcher};
|
||||||
|
|
||||||
|
|
|
@ -18,8 +18,8 @@ use futures_core::ready;
|
||||||
use h2::Reason;
|
use h2::Reason;
|
||||||
use std::io::Error;
|
use std::io::Error;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicU16, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU16, Ordering};
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
@ -85,6 +85,11 @@ pub struct H2StreamWriteHalf {
|
||||||
_dropped: Option<DropCounter>,
|
_dropped: Option<DropCounter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct TokioH2Stream {
|
||||||
|
stream: H2Stream,
|
||||||
|
buf: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
struct DropCounter {
|
struct DropCounter {
|
||||||
// Whether the other end of this shared counter has already dropped.
|
// Whether the other end of this shared counter has already dropped.
|
||||||
// We only decrement if they have, so we do not double count
|
// We only decrement if they have, so we do not double count
|
||||||
|
@ -138,6 +143,69 @@ impl Drop for DropCounter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We can't directly implement tokio::io::{AsyncRead, AsyncWrite} for H2Stream because
|
||||||
|
// then the specific implementation will conflict with the generic one.
|
||||||
|
impl TokioH2Stream {
|
||||||
|
pub fn new(stream: H2Stream) -> Self {
|
||||||
|
Self {
|
||||||
|
stream,
|
||||||
|
buf: Bytes::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl tokio::io::AsyncRead for TokioH2Stream {
|
||||||
|
fn poll_read(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &mut tokio::io::ReadBuf<'_>,
|
||||||
|
) -> Poll<std::io::Result<()>> {
|
||||||
|
// Just return the bytes we have left over and don't poll the stream because
|
||||||
|
// its unclear what to do if there are bytes left over from the previous read, and when we
|
||||||
|
// poll, we get an error.
|
||||||
|
if self.buf.is_empty() {
|
||||||
|
// If we have no unread bytes, we can poll the stream
|
||||||
|
// and fill self.buf with the bytes we read.
|
||||||
|
let pinned = std::pin::Pin::new(&mut self.stream.read);
|
||||||
|
let res = ready!(copy::ResizeBufRead::poll_bytes(pinned, cx))?;
|
||||||
|
self.buf = res;
|
||||||
|
}
|
||||||
|
// Copy as many bytes as we can from self.buf.
|
||||||
|
let cnt = Ord::min(buf.remaining(), self.buf.len());
|
||||||
|
buf.put_slice(&self.buf[..cnt]);
|
||||||
|
self.buf = self.buf.split_off(cnt);
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl tokio::io::AsyncWrite for TokioH2Stream {
|
||||||
|
fn poll_write(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<Result<usize, tokio::io::Error>> {
|
||||||
|
let pinned = std::pin::Pin::new(&mut self.stream.write);
|
||||||
|
let buf = Bytes::copy_from_slice(buf);
|
||||||
|
copy::AsyncWriteBuf::poll_write_buf(pinned, cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Result<(), std::io::Error>> {
|
||||||
|
let pinned = std::pin::Pin::new(&mut self.stream.write);
|
||||||
|
copy::AsyncWriteBuf::poll_flush(pinned, cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Result<(), std::io::Error>> {
|
||||||
|
let pinned = std::pin::Pin::new(&mut self.stream.write);
|
||||||
|
copy::AsyncWriteBuf::poll_shutdown(pinned, cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl copy::ResizeBufRead for H2StreamReadHalf {
|
impl copy::ResizeBufRead for H2StreamReadHalf {
|
||||||
fn poll_bytes(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<Bytes>> {
|
fn poll_bytes(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<Bytes>> {
|
||||||
let this = self.get_mut();
|
let this = self.get_mut();
|
||||||
|
@ -156,13 +224,13 @@ impl copy::ResizeBufRead for H2StreamReadHalf {
|
||||||
Some(Err(e)) => {
|
Some(Err(e)) => {
|
||||||
return Poll::Ready(match e.reason() {
|
return Poll::Ready(match e.reason() {
|
||||||
Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => {
|
Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => {
|
||||||
return Poll::Ready(Ok(Bytes::new()))
|
return Poll::Ready(Ok(Bytes::new()));
|
||||||
}
|
}
|
||||||
Some(Reason::STREAM_CLOSED) => {
|
Some(Reason::STREAM_CLOSED) => {
|
||||||
Err(Error::new(std::io::ErrorKind::BrokenPipe, e))
|
Err(Error::new(std::io::ErrorKind::BrokenPipe, e))
|
||||||
}
|
}
|
||||||
_ => Err(h2_to_io_error(e)),
|
_ => Err(h2_to_io_error(e)),
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -199,7 +267,7 @@ impl copy::AsyncWriteBuf for H2StreamWriteHalf {
|
||||||
Poll::Ready(Err(h2_to_io_error(
|
Poll::Ready(Err(h2_to_io_error(
|
||||||
match ready!(self.send_stream.poll_reset(cx)) {
|
match ready!(self.send_stream.poll_reset(cx)) {
|
||||||
Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
|
Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
|
||||||
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()))
|
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()));
|
||||||
}
|
}
|
||||||
Ok(reason) => reason.into(),
|
Ok(reason) => reason.into(),
|
||||||
Err(e) => e,
|
Err(e) => e,
|
||||||
|
@ -224,7 +292,7 @@ impl copy::AsyncWriteBuf for H2StreamWriteHalf {
|
||||||
match ready!(self.send_stream.poll_reset(cx)) {
|
match ready!(self.send_stream.poll_reset(cx)) {
|
||||||
Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())),
|
Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())),
|
||||||
Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
|
Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
|
||||||
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()))
|
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()));
|
||||||
}
|
}
|
||||||
Ok(reason) => reason.into(),
|
Ok(reason) => reason.into(),
|
||||||
Err(e) => e,
|
Err(e) => e,
|
||||||
|
@ -237,6 +305,6 @@ fn h2_to_io_error(e: h2::Error) -> std::io::Error {
|
||||||
if e.is_io() {
|
if e.is_io() {
|
||||||
e.into_io().unwrap()
|
e.into_io().unwrap()
|
||||||
} else {
|
} else {
|
||||||
std::io::Error::new(std::io::ErrorKind::Other, e)
|
std::io::Error::other(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,20 +13,23 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
|
use crate::identity::Identity;
|
||||||
use crate::proxy::Error;
|
use crate::proxy::Error;
|
||||||
use bytes::{Buf, Bytes};
|
use bytes::{Buf, Bytes};
|
||||||
use h2::client::{Connection, SendRequest};
|
|
||||||
use h2::SendStream;
|
use h2::SendStream;
|
||||||
|
use h2::client::{Connection, SendRequest};
|
||||||
use http::Request;
|
use http::Request;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicU16, Ordering};
|
use std::fmt;
|
||||||
|
use std::fmt::{Display, Formatter};
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU16, Ordering};
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
use tokio::sync::watch::Receiver;
|
use tokio::sync::watch::Receiver;
|
||||||
use tokio_rustls::client::TlsStream;
|
use tracing::{Instrument, debug, error, trace, warn};
|
||||||
use tracing::{debug, error, trace, warn, Instrument};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
// H2ConnectClient is a wrapper abstracting h2
|
// H2ConnectClient is a wrapper abstracting h2
|
||||||
|
@ -34,9 +37,42 @@ pub struct H2ConnectClient {
|
||||||
sender: SendRequest<Bytes>,
|
sender: SendRequest<Bytes>,
|
||||||
pub max_allowed_streams: u16,
|
pub max_allowed_streams: u16,
|
||||||
stream_count: Arc<AtomicU16>,
|
stream_count: Arc<AtomicU16>,
|
||||||
|
wl_key: WorkloadKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
||||||
|
pub struct WorkloadKey {
|
||||||
|
pub src_id: Identity,
|
||||||
|
pub dst_id: Vec<Identity>,
|
||||||
|
// In theory we can just use src,dst,node. However, the dst has a check that
|
||||||
|
// the L3 destination IP matches the HBONE IP. This could be loosened to just assert they are the same identity maybe.
|
||||||
|
pub dst: SocketAddr,
|
||||||
|
// Because we spoof the source IP, we need to key on this as well. Note: for in-pod its already per-pod
|
||||||
|
// pools anyways.
|
||||||
|
pub src: IpAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for WorkloadKey {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}({})->{}[", self.src, &self.src_id, self.dst,)?;
|
||||||
|
for i in &self.dst_id {
|
||||||
|
write!(f, "{i}")?;
|
||||||
|
}
|
||||||
|
write!(f, "]")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl H2ConnectClient {
|
impl H2ConnectClient {
|
||||||
|
pub fn is_for_workload(&self, wl_key: &WorkloadKey) -> Result<(), crate::proxy::Error> {
|
||||||
|
if !(self.wl_key == *wl_key) {
|
||||||
|
Err(crate::proxy::Error::Generic(
|
||||||
|
"connection does not match workload key!".into(),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// will_be_at_max_streamcount checks if a stream will be maxed out if we send one more request on it
|
// will_be_at_max_streamcount checks if a stream will be maxed out if we send one more request on it
|
||||||
pub fn will_be_at_max_streamcount(&self) -> bool {
|
pub fn will_be_at_max_streamcount(&self) -> bool {
|
||||||
let future_count = self.stream_count.load(Ordering::Relaxed) + 1;
|
let future_count = self.stream_count.load(Ordering::Relaxed) + 1;
|
||||||
|
@ -109,8 +145,9 @@ impl H2ConnectClient {
|
||||||
|
|
||||||
pub async fn spawn_connection(
|
pub async fn spawn_connection(
|
||||||
cfg: Arc<config::Config>,
|
cfg: Arc<config::Config>,
|
||||||
s: TlsStream<TcpStream>,
|
s: impl AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||||
driver_drain: Receiver<bool>,
|
driver_drain: Receiver<bool>,
|
||||||
|
wl_key: WorkloadKey,
|
||||||
) -> Result<H2ConnectClient, Error> {
|
) -> Result<H2ConnectClient, Error> {
|
||||||
let mut builder = h2::client::Builder::new();
|
let mut builder = h2::client::Builder::new();
|
||||||
builder
|
builder
|
||||||
|
@ -150,6 +187,7 @@ pub async fn spawn_connection(
|
||||||
sender: send_req,
|
sender: send_req,
|
||||||
stream_count: Arc::new(AtomicU16::new(0)),
|
stream_count: Arc::new(AtomicU16::new(0)),
|
||||||
max_allowed_streams,
|
max_allowed_streams,
|
||||||
|
wl_key,
|
||||||
};
|
};
|
||||||
Ok(c)
|
Ok(c)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,15 +17,15 @@ use crate::drain::DrainWatcher;
|
||||||
use crate::proxy::Error;
|
use crate::proxy::Error;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
use http::request::Parts;
|
|
||||||
use http::Response;
|
use http::Response;
|
||||||
|
use http::request::Parts;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::sync::{oneshot, watch};
|
use tokio::sync::{oneshot, watch};
|
||||||
use tracing::{debug, Instrument};
|
use tracing::{Instrument, debug};
|
||||||
|
|
||||||
pub struct H2Request {
|
pub struct H2Request {
|
||||||
request: Parts,
|
request: Parts,
|
||||||
|
@ -42,21 +42,6 @@ impl Debug for H2Request {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl H2Request {
|
impl H2Request {
|
||||||
/// The request's method
|
|
||||||
pub fn method(&self) -> &http::Method {
|
|
||||||
&self.request.method
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The request's URI
|
|
||||||
pub fn uri(&self) -> &http::Uri {
|
|
||||||
&self.request.uri
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The request's headers
|
|
||||||
pub fn headers(&self) -> &http::HeaderMap<http::HeaderValue> {
|
|
||||||
&self.request.headers
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send_error(mut self, resp: Response<()>) -> Result<(), Error> {
|
pub fn send_error(mut self, resp: Response<()>) -> Result<(), Error> {
|
||||||
let _ = self.send.send_response(resp, true)?;
|
let _ = self.send.send_response(resp, true)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -79,6 +64,34 @@ impl H2Request {
|
||||||
let h2 = crate::proxy::h2::H2Stream { read, write };
|
let h2 = crate::proxy::h2::H2Stream { read, write };
|
||||||
Ok(h2)
|
Ok(h2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_request(&self) -> &Parts {
|
||||||
|
&self.request
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn headers(&self) -> &http::HeaderMap<http::HeaderValue> {
|
||||||
|
self.request.headers()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait RequestParts {
|
||||||
|
fn uri(&self) -> &http::Uri;
|
||||||
|
fn method(&self) -> &http::Method;
|
||||||
|
fn headers(&self) -> &http::HeaderMap<http::HeaderValue>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RequestParts for Parts {
|
||||||
|
fn uri(&self) -> &http::Uri {
|
||||||
|
&self.uri
|
||||||
|
}
|
||||||
|
|
||||||
|
fn method(&self) -> &http::Method {
|
||||||
|
&self.method
|
||||||
|
}
|
||||||
|
|
||||||
|
fn headers(&self) -> &http::HeaderMap<http::HeaderValue> {
|
||||||
|
&self.headers
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn serve_connection<F, Fut>(
|
pub async fn serve_connection<F, Fut>(
|
||||||
|
|
|
@ -12,39 +12,40 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use futures::stream::StreamExt;
|
|
||||||
use futures_util::TryFutureExt;
|
use futures_util::TryFutureExt;
|
||||||
use http::{Method, Response, StatusCode};
|
use http::{Method, Response, StatusCode};
|
||||||
use std::net::SocketAddr;
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
use tls_listener::AsyncTls;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use tracing::{debug, info, info_span, trace_span, Instrument};
|
use tracing::{Instrument, debug, error, info, info_span, trace_span};
|
||||||
|
|
||||||
use super::{ConnectionResult, Error, LocalWorkloadInformation, ResponseFlags};
|
use super::{ConnectionResult, Error, HboneAddress, LocalWorkloadInformation, ResponseFlags, util};
|
||||||
use crate::baggage::parse_baggage_header;
|
use crate::baggage::parse_baggage_header;
|
||||||
use crate::identity::Identity;
|
use crate::identity::Identity;
|
||||||
|
|
||||||
use crate::config::Config;
|
use crate::config::Config;
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
use crate::proxy::h2::server::H2Request;
|
use crate::proxy::h2::server::{H2Request, RequestParts};
|
||||||
use crate::proxy::metrics::{ConnectionOpen, Reporter};
|
use crate::proxy::metrics::{ConnectionOpen, Reporter};
|
||||||
use crate::proxy::{metrics, ProxyInputs, TraceParent, BAGGAGE_HEADER, TRACEPARENT_HEADER};
|
use crate::proxy::{BAGGAGE_HEADER, ProxyInputs, TRACEPARENT_HEADER, TraceParent, metrics};
|
||||||
use crate::rbac::Connection;
|
use crate::rbac::Connection;
|
||||||
use crate::socket::to_canonical;
|
use crate::socket::to_canonical;
|
||||||
use crate::state::service::Service;
|
use crate::state::service::Service;
|
||||||
use crate::state::workload::application_tunnel::Protocol as AppProtocol;
|
|
||||||
use crate::{assertions, copy, handle_connection, proxy, socket, strng, tls};
|
use crate::{assertions, copy, handle_connection, proxy, socket, strng, tls};
|
||||||
|
|
||||||
use crate::drain::run_with_drain;
|
use crate::drain::run_with_drain;
|
||||||
use crate::proxy::h2;
|
use crate::proxy::h2;
|
||||||
use crate::state::workload::address::Address;
|
use crate::state::workload::address::Address;
|
||||||
|
use crate::state::workload::application_tunnel::Protocol;
|
||||||
use crate::state::workload::{self, NetworkAddress, Workload};
|
use crate::state::workload::{self, NetworkAddress, Workload};
|
||||||
use crate::state::{DemandProxyState, ProxyRbacContext};
|
use crate::state::{DemandProxyState, ProxyRbacContext};
|
||||||
|
use crate::strng::Strng;
|
||||||
use crate::tls::TlsError;
|
use crate::tls::TlsError;
|
||||||
|
|
||||||
pub(super) struct Inbound {
|
pub struct Inbound {
|
||||||
listener: socket::Listener,
|
listener: socket::Listener,
|
||||||
drain: DrainWatcher,
|
drain: DrainWatcher,
|
||||||
pi: Arc<ProxyInputs>,
|
pi: Arc<ProxyInputs>,
|
||||||
|
@ -52,7 +53,7 @@ pub(super) struct Inbound {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Inbound {
|
impl Inbound {
|
||||||
pub(super) async fn new(pi: Arc<ProxyInputs>, drain: DrainWatcher) -> Result<Inbound, Error> {
|
pub(crate) async fn new(pi: Arc<ProxyInputs>, drain: DrainWatcher) -> Result<Inbound, Error> {
|
||||||
let listener = pi
|
let listener = pi
|
||||||
.socket_factory
|
.socket_factory
|
||||||
.tcp_bind(pi.cfg.inbound_addr)
|
.tcp_bind(pi.cfg.inbound_addr)
|
||||||
|
@ -73,11 +74,12 @@ impl Inbound {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn address(&self) -> SocketAddr {
|
/// Returns the socket address this proxy is listening on.
|
||||||
|
pub fn address(&self) -> SocketAddr {
|
||||||
self.listener.local_addr()
|
self.listener.local_addr()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn run(self) {
|
pub async fn run(self) {
|
||||||
let pi = self.pi.clone();
|
let pi = self.pi.clone();
|
||||||
let acceptor = InboundCertProvider {
|
let acceptor = InboundCertProvider {
|
||||||
local_workload: self.pi.local_workload_information.clone(),
|
local_workload: self.pi.local_workload_information.clone(),
|
||||||
|
@ -85,62 +87,79 @@ impl Inbound {
|
||||||
|
|
||||||
// Safety: we set nodelay directly in tls_server, so it is safe to convert to a normal listener.
|
// Safety: we set nodelay directly in tls_server, so it is safe to convert to a normal listener.
|
||||||
// Although, that is *after* the TLS handshake; in theory we may get some benefits to setting it earlier.
|
// Although, that is *after* the TLS handshake; in theory we may get some benefits to setting it earlier.
|
||||||
let mut stream = crate::hyper_util::tls_server(acceptor, self.listener.inner());
|
|
||||||
|
|
||||||
let accept = |drain: DrainWatcher, force_shutdown: watch::Receiver<()>| {
|
let accept = async move |drain: DrainWatcher, force_shutdown: watch::Receiver<()>| {
|
||||||
async move {
|
loop {
|
||||||
while let Some(tls) = stream.next().await {
|
let (raw_socket, src) = match self.listener.accept().await {
|
||||||
let pi = self.pi.clone();
|
Ok(raw_socket) => raw_socket,
|
||||||
let (raw_socket, ssl) = tls.get_ref();
|
Err(e) => {
|
||||||
let src_identity: Option<Identity> = tls::identity_from_connection(ssl);
|
if util::is_runtime_shutdown(&e) {
|
||||||
let dst = to_canonical(raw_socket.local_addr().expect("local_addr available"));
|
return;
|
||||||
let src = to_canonical(raw_socket.peer_addr().expect("peer_addr available"));
|
}
|
||||||
let drain = drain.clone();
|
error!("Failed TCP handshake {}", e);
|
||||||
let force_shutdown = force_shutdown.clone();
|
continue;
|
||||||
let network = pi.cfg.network.clone();
|
}
|
||||||
let serve_client = async move {
|
};
|
||||||
let conn = Connection {
|
let src = to_canonical(src);
|
||||||
src_identity,
|
let start = Instant::now();
|
||||||
src,
|
let drain = drain.clone();
|
||||||
dst_network: strng::new(&network), // inbound request must be on our network
|
let force_shutdown = force_shutdown.clone();
|
||||||
dst,
|
let pi = self.pi.clone();
|
||||||
};
|
let dst = to_canonical(raw_socket.local_addr().expect("local_addr available"));
|
||||||
debug!(%conn, "accepted connection");
|
let network = pi.cfg.network.clone();
|
||||||
let cfg = pi.cfg.clone();
|
let acceptor = crate::tls::InboundAcceptor::new(acceptor.clone());
|
||||||
let request_handler = move |req| {
|
let serve_client = async move {
|
||||||
let id = Self::extract_traceparent(&req);
|
let tls = match acceptor.accept(raw_socket).await {
|
||||||
let peer = conn.src;
|
Ok(tls) => tls,
|
||||||
let req_handler = Self::serve_connect(
|
Err(e) => {
|
||||||
pi.clone(),
|
metrics::log_early_deny(src, dst, Reporter::destination, e);
|
||||||
conn.clone(),
|
|
||||||
self.enable_orig_src,
|
|
||||||
req,
|
|
||||||
)
|
|
||||||
.instrument(info_span!("inbound", %id, %peer));
|
|
||||||
// This is for each user connection, so most important to keep small
|
|
||||||
assertions::size_between_ref(1500, 2500, &req_handler);
|
|
||||||
req_handler
|
|
||||||
};
|
|
||||||
|
|
||||||
let serve_conn = h2::server::serve_connection(
|
return Err::<(), _>(proxy::Error::SelfCall);
|
||||||
cfg,
|
}
|
||||||
tls,
|
|
||||||
drain,
|
|
||||||
force_shutdown,
|
|
||||||
request_handler,
|
|
||||||
);
|
|
||||||
// This is per HBONE connection, so while would be nice to be small, at least it
|
|
||||||
// is pooled so typically fewer of these.
|
|
||||||
let serve = Box::pin(assertions::size_between(6000, 8000, serve_conn));
|
|
||||||
serve.await
|
|
||||||
};
|
};
|
||||||
// This is small since it only handles the TLS layer -- the HTTP2 layer is boxed
|
debug!(latency=?start.elapsed(), "accepted TLS connection");
|
||||||
// and measured above.
|
let (_, ssl) = tls.get_ref();
|
||||||
assertions::size_between_ref(1000, 1500, &serve_client);
|
let src_identity: Option<Identity> = tls::identity_from_connection(ssl);
|
||||||
tokio::task::spawn(serve_client.in_current_span());
|
let conn = Connection {
|
||||||
}
|
src_identity,
|
||||||
|
src,
|
||||||
|
dst_network: network.clone(), // inbound request must be on our network
|
||||||
|
dst,
|
||||||
|
};
|
||||||
|
debug!(%conn, "accepted connection");
|
||||||
|
let cfg = pi.cfg.clone();
|
||||||
|
let request_handler = move |req| {
|
||||||
|
let id = Self::extract_traceparent(&req);
|
||||||
|
let peer = conn.src;
|
||||||
|
let req_handler = Self::serve_connect(
|
||||||
|
pi.clone(),
|
||||||
|
conn.clone(),
|
||||||
|
self.enable_orig_src,
|
||||||
|
req,
|
||||||
|
)
|
||||||
|
.instrument(info_span!("inbound", %id, %peer));
|
||||||
|
// This is for each user connection, so most important to keep small
|
||||||
|
assertions::size_between_ref(1500, 2500, &req_handler);
|
||||||
|
req_handler
|
||||||
|
};
|
||||||
|
|
||||||
|
let serve_conn = h2::server::serve_connection(
|
||||||
|
cfg,
|
||||||
|
tls,
|
||||||
|
drain,
|
||||||
|
force_shutdown,
|
||||||
|
request_handler,
|
||||||
|
);
|
||||||
|
// This is per HBONE connection, so while would be nice to be small, at least it
|
||||||
|
// is pooled so typically fewer of these.
|
||||||
|
let serve = Box::pin(assertions::size_between(6000, 8000, serve_conn));
|
||||||
|
serve.await
|
||||||
|
};
|
||||||
|
// This is small since it only handles the TLS layer -- the HTTP2 layer is boxed
|
||||||
|
// and measured above.
|
||||||
|
assertions::size_between_ref(1000, 1500, &serve_client);
|
||||||
|
tokio::task::spawn(serve_client.in_current_span());
|
||||||
}
|
}
|
||||||
.in_current_span()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
run_with_drain(
|
run_with_drain(
|
||||||
|
@ -160,6 +179,7 @@ impl Inbound {
|
||||||
.unwrap_or_else(TraceParent::new)
|
.unwrap_or_else(TraceParent::new)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// serve_connect handles a single connection from a client.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn serve_connect(
|
async fn serve_connect(
|
||||||
pi: Arc<ProxyInputs>,
|
pi: Arc<ProxyInputs>,
|
||||||
|
@ -176,7 +196,7 @@ impl Inbound {
|
||||||
// phases.
|
// phases.
|
||||||
|
|
||||||
// Initial phase, build up context about the request.
|
// Initial phase, build up context about the request.
|
||||||
let ri = match Self::build_inbound_request(&pi, conn, &req).await {
|
let ri = match Self::build_inbound_request(&pi, conn, req.get_request()).await {
|
||||||
Ok(i) => i,
|
Ok(i) => i,
|
||||||
Err(InboundError(e, code)) => {
|
Err(InboundError(e, code)) => {
|
||||||
// At this point in processing, we never built up full context to log a complete access log.
|
// At this point in processing, we never built up full context to log a complete access log.
|
||||||
|
@ -192,6 +212,7 @@ impl Inbound {
|
||||||
// Now we have enough context to properly report logs and metrics. Group everything else that
|
// Now we have enough context to properly report logs and metrics. Group everything else that
|
||||||
// can fail before we send the OK response here.
|
// can fail before we send the OK response here.
|
||||||
let rx = async {
|
let rx = async {
|
||||||
|
// Define a connection guard to ensure rbac conditions are maintained for the duration of the connection
|
||||||
let conn_guard = pi
|
let conn_guard = pi
|
||||||
.connection_manager
|
.connection_manager
|
||||||
.assert_rbac(&pi.state, &ri.rbac_ctx, ri.for_host)
|
.assert_rbac(&pi.state, &ri.rbac_ctx, ri.for_host)
|
||||||
|
@ -201,18 +222,56 @@ impl Inbound {
|
||||||
ResponseFlags::AuthorizationPolicyDenied,
|
ResponseFlags::AuthorizationPolicyDenied,
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let orig_src = enable_original_source.then_some(ri.rbac_ctx.conn.src.ip());
|
// app tunnels should only bind to localhost to prevent
|
||||||
let stream =
|
// being accessed without going through ztunnel
|
||||||
super::freebind_connect(orig_src, ri.upstream_addr, pi.socket_factory.as_ref())
|
let localhost_tunnel = pi.cfg.localhost_app_tunnel
|
||||||
.await
|
&& ri
|
||||||
.map_err(Error::ConnectionFailed)
|
.tunnel_request
|
||||||
.map_err(InboundFlagError::build(
|
.as_ref()
|
||||||
StatusCode::SERVICE_UNAVAILABLE,
|
.map(|tr| tr.protocol.supports_localhost_send())
|
||||||
ResponseFlags::ConnectionFailure,
|
.unwrap_or(false);
|
||||||
))?;
|
let (src, dst) = if localhost_tunnel {
|
||||||
|
// guess the family based on the destination address
|
||||||
|
let loopback = match ri.upstream_addr {
|
||||||
|
SocketAddr::V4(_) => IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
SocketAddr::V6(_) => IpAddr::V6(Ipv6Addr::LOCALHOST),
|
||||||
|
};
|
||||||
|
|
||||||
|
// we must bind the src to be localhost when sending to localhost,
|
||||||
|
// or various components could break traffic (RPF, iptables, ip route)
|
||||||
|
// the original source is preserved within PROXY protocol
|
||||||
|
(
|
||||||
|
Some(loopback),
|
||||||
|
SocketAddr::new(loopback, ri.upstream_addr.port()),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// When ztunnel is proxying to its own internal endpoints (metrics server after HBONE termination),
|
||||||
|
// we must not attempt to use the original external client's IP as the source for this internal connection.
|
||||||
|
// Setting `disable_inbound_freebind` to true for such self-proxy scenarios ensures `upstream_src_ip` is `None`,
|
||||||
|
// causing `freebind_connect` to use a local IP for the connection to ztunnel's own service.
|
||||||
|
// For regular inbound traffic to other workloads, `disable_inbound_freebind` is false, and original source
|
||||||
|
// preservation depends on `enable_original_source`.
|
||||||
|
let upstream_src_ip = if pi.disable_inbound_freebind {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
enable_original_source.then_some(ri.rbac_ctx.conn.src.ip())
|
||||||
|
};
|
||||||
|
(upstream_src_ip, ri.upstream_addr)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Establish upstream connection between original source and destination
|
||||||
|
// We are allowing a bind to the original source address locally even if the ip address isn't on this node.
|
||||||
|
let stream = super::freebind_connect(src, dst, pi.socket_factory.as_ref())
|
||||||
|
.await
|
||||||
|
.map_err(Error::ConnectionFailed)
|
||||||
|
.map_err(InboundFlagError::build(
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE,
|
||||||
|
ResponseFlags::ConnectionFailure,
|
||||||
|
))?;
|
||||||
debug!("connected to: {}", ri.upstream_addr);
|
debug!("connected to: {}", ri.upstream_addr);
|
||||||
Ok((conn_guard, stream))
|
Ok((conn_guard, stream))
|
||||||
};
|
};
|
||||||
|
// Wait on establishing the upstream connection and connection guard before sending the 200 response to the client
|
||||||
let (mut conn_guard, mut stream) = match rx.await {
|
let (mut conn_guard, mut stream) = match rx.await {
|
||||||
Ok(res) => res,
|
Ok(res) => res,
|
||||||
Err(InboundFlagError(err, flag, code)) => {
|
Err(InboundFlagError(err, flag, code)) => {
|
||||||
|
@ -227,14 +286,24 @@ impl Inbound {
|
||||||
// At this point, we established the upstream connection and need to send a 200 back to the client.
|
// At this point, we established the upstream connection and need to send a 200 back to the client.
|
||||||
// we may still have failures at this point during the proxying, but we don't need to send these
|
// we may still have failures at this point during the proxying, but we don't need to send these
|
||||||
// at the HTTP layer.
|
// at the HTTP layer.
|
||||||
|
// Send a 200 back to the client and start forwarding traffic.
|
||||||
|
//
|
||||||
|
// If requested, we may start the stream with a PROXY protocol header. This ensures
|
||||||
|
// that the server has all of the necessary information about the connection regardless of the protocol
|
||||||
|
// See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt for more information about the
|
||||||
|
// proxy protocol.
|
||||||
let send = req
|
let send = req
|
||||||
.send_response(build_response(StatusCode::OK))
|
.send_response(build_response(StatusCode::OK))
|
||||||
.and_then(|h2_stream| async {
|
.and_then(|h2_stream| async {
|
||||||
if ri.inbound_protocol == AppProtocol::PROXY {
|
if let Some(TunnelRequest {
|
||||||
|
protocol: Protocol::PROXY,
|
||||||
|
tunnel_target,
|
||||||
|
}) = ri.tunnel_request
|
||||||
|
{
|
||||||
let Connection {
|
let Connection {
|
||||||
src, src_identity, ..
|
src, src_identity, ..
|
||||||
} = ri.rbac_ctx.conn;
|
} = ri.rbac_ctx.conn;
|
||||||
super::write_proxy_protocol(&mut stream, (src, ri.hbone_addr), src_identity)
|
super::write_proxy_protocol(&mut stream, (src, tunnel_target), src_identity)
|
||||||
.instrument(trace_span!("proxy protocol"))
|
.instrument(trace_span!("proxy protocol"))
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
@ -250,10 +319,11 @@ impl Inbound {
|
||||||
ri.result_tracker.record(res);
|
ri.result_tracker.record(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn build_inbound_request(
|
// build_inbound_request builds up the context for an inbound request.
|
||||||
|
async fn build_inbound_request<T: RequestParts>(
|
||||||
pi: &Arc<ProxyInputs>,
|
pi: &Arc<ProxyInputs>,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
req: &H2Request,
|
req: &T,
|
||||||
) -> Result<InboundRequest, InboundError> {
|
) -> Result<InboundRequest, InboundError> {
|
||||||
if req.method() != Method::CONNECT {
|
if req.method() != Method::CONNECT {
|
||||||
let e = Error::NonConnectMethod(req.method().to_string());
|
let e = Error::NonConnectMethod(req.method().to_string());
|
||||||
|
@ -261,18 +331,14 @@ impl Inbound {
|
||||||
}
|
}
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let hbone_addr = req
|
|
||||||
.uri()
|
|
||||||
.to_string()
|
|
||||||
.as_str()
|
|
||||||
.parse::<SocketAddr>()
|
|
||||||
.map_err(|_| {
|
|
||||||
InboundError(
|
|
||||||
Error::ConnectAddress(req.uri().to_string()),
|
|
||||||
StatusCode::BAD_REQUEST,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
|
// Extract the host or IP from the authority pseudo-header of the URI
|
||||||
|
let hbone_addr: HboneAddress = req
|
||||||
|
.uri()
|
||||||
|
.try_into()
|
||||||
|
.map_err(InboundError::build(StatusCode::BAD_REQUEST))?;
|
||||||
|
|
||||||
|
// Get the destination workload information of the destination pods (wds) workload (not destination ztunnel)
|
||||||
let destination_workload = pi
|
let destination_workload = pi
|
||||||
.local_workload_information
|
.local_workload_information
|
||||||
.get_workload()
|
.get_workload()
|
||||||
|
@ -280,14 +346,20 @@ impl Inbound {
|
||||||
// At this point we already fetched the local workload for TLS, so it should be infallible.
|
// At this point we already fetched the local workload for TLS, so it should be infallible.
|
||||||
.map_err(InboundError::build(StatusCode::SERVICE_UNAVAILABLE))?;
|
.map_err(InboundError::build(StatusCode::SERVICE_UNAVAILABLE))?;
|
||||||
|
|
||||||
// Check the request is allowed
|
// Check the request is allowed by verifying the destination
|
||||||
Self::validate_destination(&pi.cfg, &pi.state, &conn, &destination_workload, hbone_addr)
|
Self::validate_destination(&pi.state, &conn, &destination_workload, &hbone_addr)
|
||||||
.await
|
.await
|
||||||
.map_err(InboundError::build(StatusCode::BAD_REQUEST))?;
|
.map_err(InboundError::build(StatusCode::BAD_REQUEST))?;
|
||||||
|
|
||||||
// Determine the next hop.
|
// Determine the next hop.
|
||||||
let (upstream_addr, inbound_protocol, upstream_service) =
|
let (upstream_addr, tunnel_request, upstream_service) = Self::find_inbound_upstream(
|
||||||
Self::find_inbound_upstream(&pi.state, &conn, &destination_workload, hbone_addr);
|
&pi.cfg,
|
||||||
|
&pi.state,
|
||||||
|
&conn,
|
||||||
|
&destination_workload,
|
||||||
|
&hbone_addr,
|
||||||
|
)
|
||||||
|
.map_err(InboundError::build(StatusCode::SERVICE_UNAVAILABLE))?;
|
||||||
|
|
||||||
let original_dst = conn.dst;
|
let original_dst = conn.dst;
|
||||||
// Connection has 15008, swap with the real port
|
// Connection has 15008, swap with the real port
|
||||||
|
@ -305,12 +377,11 @@ impl Inbound {
|
||||||
let baggage =
|
let baggage =
|
||||||
parse_baggage_header(req.headers().get_all(BAGGAGE_HEADER)).unwrap_or_default();
|
parse_baggage_header(req.headers().get_all(BAGGAGE_HEADER)).unwrap_or_default();
|
||||||
|
|
||||||
let from_gateway = proxy::check_from_network_gateway(
|
// We assume it is from gateway if it's a hostname request.
|
||||||
&pi.state,
|
// We may need a more explicit indicator in the future.
|
||||||
&destination_workload,
|
// Note: previously this attempted to check that the src identity was equal to the Gateway;
|
||||||
rbac_ctx.conn.src_identity.as_ref(),
|
// this check is broken as the gateway only forwards an HBONE request, it doesn't initiate it itself.
|
||||||
)
|
let from_gateway = matches!(hbone_addr, HboneAddress::SvcHostname(_, _));
|
||||||
.await;
|
|
||||||
if from_gateway {
|
if from_gateway {
|
||||||
debug!("request from gateway");
|
debug!("request from gateway");
|
||||||
}
|
}
|
||||||
|
@ -330,10 +401,12 @@ impl Inbound {
|
||||||
let derived_source = metrics::DerivedWorkload {
|
let derived_source = metrics::DerivedWorkload {
|
||||||
identity: rbac_ctx.conn.src_identity.clone(),
|
identity: rbac_ctx.conn.src_identity.clone(),
|
||||||
cluster_id: baggage.cluster_id,
|
cluster_id: baggage.cluster_id,
|
||||||
|
region: baggage.region,
|
||||||
|
zone: baggage.zone,
|
||||||
namespace: baggage.namespace,
|
namespace: baggage.namespace,
|
||||||
|
app: baggage.service_name,
|
||||||
workload_name: baggage.workload_name,
|
workload_name: baggage.workload_name,
|
||||||
revision: baggage.revision,
|
revision: baggage.revision,
|
||||||
..Default::default()
|
|
||||||
};
|
};
|
||||||
let ds = proxy::guess_inbound_service(
|
let ds = proxy::guess_inbound_service(
|
||||||
&rbac_ctx.conn,
|
&rbac_ctx.conn,
|
||||||
|
@ -346,7 +419,7 @@ impl Inbound {
|
||||||
// For consistency with outbound logs, report the original destination (with 15008 port)
|
// For consistency with outbound logs, report the original destination (with 15008 port)
|
||||||
// as dst.addr, and the target address as dst.hbone_addr
|
// as dst.addr, and the target address as dst.hbone_addr
|
||||||
original_dst,
|
original_dst,
|
||||||
Some(hbone_addr),
|
Some(hbone_addr.clone()),
|
||||||
start,
|
start,
|
||||||
ConnectionOpen {
|
ConnectionOpen {
|
||||||
reporter: Reporter::destination,
|
reporter: Reporter::destination,
|
||||||
|
@ -363,24 +436,44 @@ impl Inbound {
|
||||||
rbac_ctx,
|
rbac_ctx,
|
||||||
result_tracker,
|
result_tracker,
|
||||||
upstream_addr,
|
upstream_addr,
|
||||||
inbound_protocol,
|
tunnel_request,
|
||||||
hbone_addr,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Selects a service by hostname without the explicit knowledge of the namespace
|
||||||
|
// There is no explicit mapping from hostname to namespace (e.g. foo.com)
|
||||||
|
fn find_service_by_hostname(
|
||||||
|
state: &DemandProxyState,
|
||||||
|
local_workload: &Workload,
|
||||||
|
hbone_host: &Strng,
|
||||||
|
) -> Result<Arc<Service>, Error> {
|
||||||
|
// Validate a service exists for the hostname
|
||||||
|
let services = state.read().find_service_by_hostname(hbone_host)?;
|
||||||
|
|
||||||
|
services
|
||||||
|
.iter()
|
||||||
|
.max_by_key(|s| {
|
||||||
|
let is_local_namespace = s.namespace == local_workload.namespace;
|
||||||
|
match is_local_namespace {
|
||||||
|
true => 1,
|
||||||
|
false => 0,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.cloned()
|
||||||
|
.ok_or_else(|| Error::NoHostname(hbone_host.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
/// validate_destination ensures the destination is an allowed request.
|
/// validate_destination ensures the destination is an allowed request.
|
||||||
async fn validate_destination(
|
async fn validate_destination(
|
||||||
cfg: &Config,
|
|
||||||
state: &DemandProxyState,
|
state: &DemandProxyState,
|
||||||
conn: &Connection,
|
conn: &Connection,
|
||||||
local_workload: &Workload,
|
local_workload: &Workload,
|
||||||
hbone_addr: SocketAddr,
|
hbone_addr: &HboneAddress,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let illegal_call = cfg.illegal_ports.contains(&hbone_addr.port());
|
let HboneAddress::SocketAddr(hbone_addr) = hbone_addr else {
|
||||||
if illegal_call {
|
// This is a hostname - it is valid. We may not find the hostname, at which point we will fail later
|
||||||
return Err(Error::SelfCall);
|
return Ok(());
|
||||||
}
|
};
|
||||||
|
|
||||||
if conn.dst.ip() == hbone_addr.ip() {
|
if conn.dst.ip() == hbone_addr.ip() {
|
||||||
// Normal case: both are aligned. This is allowed (we really only need the HBONE address for the port.)
|
// Normal case: both are aligned. This is allowed (we really only need the HBONE address for the port.)
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -450,44 +543,129 @@ impl Inbound {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn find_inbound_upstream(
|
/// find_inbound_upstream determines the next hop for an inbound request.
|
||||||
|
#[expect(clippy::type_complexity)]
|
||||||
|
pub(super) fn find_inbound_upstream(
|
||||||
|
cfg: &Config,
|
||||||
state: &DemandProxyState,
|
state: &DemandProxyState,
|
||||||
conn: &Connection,
|
conn: &Connection,
|
||||||
local_workload: &Workload,
|
local_workload: &Workload,
|
||||||
hbone_addr: SocketAddr,
|
hbone_addr: &HboneAddress,
|
||||||
) -> (SocketAddr, AppProtocol, Vec<Arc<Service>>) {
|
) -> Result<(SocketAddr, Option<TunnelRequest>, Vec<Arc<Service>>), Error> {
|
||||||
let upstream_addr = SocketAddr::new(conn.dst.ip(), hbone_addr.port());
|
// We always target the local workload IP as the destination. But we need to determine the port to send to.
|
||||||
|
let target_ip = conn.dst.ip();
|
||||||
|
|
||||||
|
// First, fetch the actual target SocketAddr as well as all possible services this could be for.
|
||||||
|
// Given they may request the pod directly, there may be multiple possible services; we will
|
||||||
|
// select a final one (if any) later.
|
||||||
|
let (dest, services) = match hbone_addr {
|
||||||
|
HboneAddress::SvcHostname(hostname, service_port) => {
|
||||||
|
// Request is to a hostname. This must be a service.
|
||||||
|
// We know the destination IP already (since this is inbound, we just need to forward it),
|
||||||
|
// but will need to resolve the port from service port to target port.
|
||||||
|
let svc = Self::find_service_by_hostname(state, local_workload, hostname)?;
|
||||||
|
|
||||||
|
let endpoint_port = svc
|
||||||
|
.endpoints
|
||||||
|
.get(&local_workload.uid)
|
||||||
|
.and_then(|ep| ep.port.get(service_port));
|
||||||
|
// If we can get the port from the endpoint, that is ideal. But we may not, which is fine
|
||||||
|
// if the service has a number target port (rather than named).
|
||||||
|
let port = if let Some(&ep_port) = endpoint_port {
|
||||||
|
ep_port
|
||||||
|
} else {
|
||||||
|
let service_target_port =
|
||||||
|
svc.ports.get(service_port).copied().unwrap_or_default();
|
||||||
|
if service_target_port == 0 {
|
||||||
|
return Err(Error::NoPortForServices(
|
||||||
|
hostname.to_string(),
|
||||||
|
*service_port,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
service_target_port
|
||||||
|
};
|
||||||
|
(SocketAddr::new(target_ip, port), vec![svc])
|
||||||
|
}
|
||||||
|
HboneAddress::SocketAddr(hbone_addr) => (
|
||||||
|
SocketAddr::new(target_ip, hbone_addr.port()),
|
||||||
|
state.get_services_by_workload(local_workload),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check for illegal calls now that we have resolved to the final destination.
|
||||||
|
// We need to do this here, rather than `validate_destination`, since the former doesn't
|
||||||
|
// have access to the resolved service port.
|
||||||
|
if cfg.illegal_ports.contains(&dest.port()) {
|
||||||
|
return Err(Error::SelfCall);
|
||||||
|
}
|
||||||
|
|
||||||
// Application tunnel may override the port.
|
// Application tunnel may override the port.
|
||||||
let (upstream_addr, inbound_protocol) = match local_workload.application_tunnel.clone() {
|
let (target, tunnel) = match local_workload.application_tunnel.clone() {
|
||||||
Some(workload::ApplicationTunnel { port, protocol }) => {
|
Some(workload::ApplicationTunnel { port, protocol }) => {
|
||||||
// We may need to override the target port. For instance, we may send all PROXY
|
// We may need to override the target port. For instance, we may send all PROXY
|
||||||
// traffic over a dedicated port like 15088.
|
// traffic over a dedicated port like 15088.
|
||||||
let new_target =
|
let new_target = SocketAddr::new(dest.ip(), port.unwrap_or(dest.port()));
|
||||||
SocketAddr::new(upstream_addr.ip(), port.unwrap_or(upstream_addr.port()));
|
|
||||||
// Note: the logic to decide which destination address to set inside the PROXY headers
|
// Note: the logic to decide which destination address to set inside the PROXY headers
|
||||||
// is handled outside of this call. This just determines that location we actually send the
|
// is handled outside of this call. This just determines that location we actually send the
|
||||||
// connection to
|
// connection to.
|
||||||
(new_target, protocol)
|
|
||||||
}
|
|
||||||
None => (upstream_addr, AppProtocol::NONE),
|
|
||||||
};
|
|
||||||
let services = state.get_services_by_workload(local_workload);
|
|
||||||
|
|
||||||
(upstream_addr, inbound_protocol, services)
|
// Which address we will send in the tunnel
|
||||||
|
let tunnel_target = match hbone_addr {
|
||||||
|
HboneAddress::SvcHostname(h, port) => {
|
||||||
|
// PROXY cannot currently send to hostnames, so we will need to select an IP to
|
||||||
|
// use instead
|
||||||
|
// We ensure a service is set above.
|
||||||
|
let vip = services
|
||||||
|
.first()
|
||||||
|
.expect("service must exist")
|
||||||
|
.vips
|
||||||
|
.iter()
|
||||||
|
.max_by_key(|a| match a.network == conn.dst_network {
|
||||||
|
true => {
|
||||||
|
// Defer to IPv4 if present
|
||||||
|
match a.address.is_ipv4() {
|
||||||
|
true => 2,
|
||||||
|
false => 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false => 0,
|
||||||
|
})
|
||||||
|
.ok_or_else(|| Error::NoIPForService(h.to_string()))?;
|
||||||
|
SocketAddr::new(vip.address, *port)
|
||||||
|
}
|
||||||
|
HboneAddress::SocketAddr(s) => *s,
|
||||||
|
};
|
||||||
|
(
|
||||||
|
new_target,
|
||||||
|
Some(TunnelRequest {
|
||||||
|
tunnel_target,
|
||||||
|
protocol,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
None => (dest, None),
|
||||||
|
};
|
||||||
|
Ok((target, tunnel, services))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(super) struct TunnelRequest {
|
||||||
|
tunnel_target: SocketAddr,
|
||||||
|
protocol: Protocol,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
struct InboundRequest {
|
struct InboundRequest {
|
||||||
for_host: Option<String>,
|
for_host: Option<String>,
|
||||||
rbac_ctx: ProxyRbacContext,
|
rbac_ctx: ProxyRbacContext,
|
||||||
result_tracker: Box<ConnectionResult>,
|
result_tracker: Box<ConnectionResult>,
|
||||||
upstream_addr: SocketAddr,
|
upstream_addr: SocketAddr,
|
||||||
hbone_addr: SocketAddr,
|
tunnel_request: Option<TunnelRequest>,
|
||||||
inbound_protocol: AppProtocol,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// InboundError represents an error with an associated status code.
|
/// InboundError represents an error with an associated status code.
|
||||||
|
#[derive(Debug)]
|
||||||
struct InboundError(Error, StatusCode);
|
struct InboundError(Error, StatusCode);
|
||||||
impl InboundError {
|
impl InboundError {
|
||||||
pub fn build(code: StatusCode) -> impl Fn(Error) -> Self {
|
pub fn build(code: StatusCode) -> impl Fn(Error) -> Self {
|
||||||
|
@ -519,7 +697,7 @@ impl crate::tls::ServerCertProvider for InboundCertProvider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_forwarded_host(req: &H2Request) -> Option<String> {
|
pub fn parse_forwarded_host<T: RequestParts>(req: &T) -> Option<String> {
|
||||||
req.headers()
|
req.headers()
|
||||||
.get(http::header::FORWARDED)
|
.get(http::header::FORWARDED)
|
||||||
.and_then(|rh| rh.to_str().ok())
|
.and_then(|rh| rh.to_str().ok())
|
||||||
|
@ -534,31 +712,36 @@ fn build_response(status: StatusCode) -> Response<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Inbound;
|
use super::{Inbound, ProxyInputs};
|
||||||
use crate::{config, strng};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
config,
|
||||||
|
identity::manager::mock::new_secret_manager,
|
||||||
|
proxy::{
|
||||||
|
ConnectionManager, DefaultSocketFactory, LocalWorkloadInformation,
|
||||||
|
h2::server::RequestParts, inbound::HboneAddress,
|
||||||
|
},
|
||||||
rbac::Connection,
|
rbac::Connection,
|
||||||
state::{
|
state::{
|
||||||
self,
|
self, DemandProxyState, WorkloadInfo,
|
||||||
service::{Endpoint, EndpointSet, Service},
|
service::{Endpoint, EndpointSet, Service},
|
||||||
workload::{
|
workload::{
|
||||||
application_tunnel::Protocol as AppProtocol, gatewayaddress::Destination,
|
ApplicationTunnel, GatewayAddress, HealthStatus, InboundProtocol, NetworkAddress,
|
||||||
ApplicationTunnel, GatewayAddress, NetworkAddress, Protocol, Workload,
|
NetworkMode, Workload, application_tunnel::Protocol as AppProtocol,
|
||||||
|
gatewayaddress::Destination,
|
||||||
},
|
},
|
||||||
DemandProxyState,
|
|
||||||
},
|
},
|
||||||
test_helpers,
|
strng, test_helpers,
|
||||||
};
|
};
|
||||||
|
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||||
|
use http::{Method, Uri};
|
||||||
|
use prometheus_client::registry::Registry;
|
||||||
use std::{
|
use std::{
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
sync::{Arc, RwLock},
|
sync::{Arc, RwLock},
|
||||||
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::state::workload::HealthStatus;
|
|
||||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
|
||||||
use prometheus_client::registry::Registry;
|
|
||||||
use test_case::test_case;
|
use test_case::test_case;
|
||||||
|
|
||||||
const CLIENT_POD_IP: &str = "10.0.0.1";
|
const CLIENT_POD_IP: &str = "10.0.0.1";
|
||||||
|
@ -566,9 +749,12 @@ mod tests {
|
||||||
const SERVER_POD_IP: &str = "10.0.0.2";
|
const SERVER_POD_IP: &str = "10.0.0.2";
|
||||||
const SERVER_SVC_IP: &str = "10.10.0.1";
|
const SERVER_SVC_IP: &str = "10.10.0.1";
|
||||||
|
|
||||||
|
const SERVER_POD_HOSTNAME: &str = "server.default.svc.cluster.local";
|
||||||
|
|
||||||
const WAYPOINT_POD_IP: &str = "10.0.0.3";
|
const WAYPOINT_POD_IP: &str = "10.0.0.3";
|
||||||
const WAYPOINT_SVC_IP: &str = "10.10.0.2";
|
const WAYPOINT_SVC_IP: &str = "10.10.0.2";
|
||||||
|
|
||||||
|
const SERVER_PORT: u16 = 80;
|
||||||
const TARGET_PORT: u16 = 8080;
|
const TARGET_PORT: u16 = 8080;
|
||||||
const PROXY_PORT: u16 = 15088;
|
const PROXY_PORT: u16 = 15088;
|
||||||
|
|
||||||
|
@ -577,8 +763,31 @@ mod tests {
|
||||||
protocol: AppProtocol::PROXY,
|
protocol: AppProtocol::PROXY,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
struct MockParts {
|
||||||
|
method: Method,
|
||||||
|
uri: Uri,
|
||||||
|
headers: http::HeaderMap<http::HeaderValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RequestParts for MockParts {
|
||||||
|
fn uri(&self) -> &http::Uri {
|
||||||
|
&self.uri
|
||||||
|
}
|
||||||
|
|
||||||
|
fn method(&self) -> &http::Method {
|
||||||
|
&self.method
|
||||||
|
}
|
||||||
|
|
||||||
|
fn headers(&self) -> &http::HeaderMap<http::HeaderValue> {
|
||||||
|
&self.headers
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Regular zTunnel workload traffic inbound
|
// Regular zTunnel workload traffic inbound
|
||||||
#[test_case(Waypoint::None, SERVER_POD_IP, SERVER_POD_IP, Some((SERVER_POD_IP, TARGET_PORT)); "to workload no waypoint")]
|
#[test_case(Waypoint::None, SERVER_POD_IP, SERVER_POD_IP, Some((SERVER_POD_IP, TARGET_PORT)); "to workload no waypoint")]
|
||||||
|
// Svc hostname
|
||||||
|
#[test_case(Waypoint::None, SERVER_POD_IP, SERVER_POD_HOSTNAME, Some((SERVER_POD_IP, TARGET_PORT)); "svc hostname to workload no waypoint")]
|
||||||
|
// Sandwiched Waypoint Cases
|
||||||
// to workload traffic
|
// to workload traffic
|
||||||
#[test_case(Waypoint::Workload(WAYPOINT_POD_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP , Some((WAYPOINT_POD_IP, TARGET_PORT)); "to workload with waypoint referenced by pod")]
|
#[test_case(Waypoint::Workload(WAYPOINT_POD_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP , Some((WAYPOINT_POD_IP, TARGET_PORT)); "to workload with waypoint referenced by pod")]
|
||||||
#[test_case(Waypoint::Workload(WAYPOINT_SVC_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP , Some((WAYPOINT_POD_IP, TARGET_PORT)); "to workload with waypoint referenced by vip")]
|
#[test_case(Waypoint::Workload(WAYPOINT_SVC_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP , Some((WAYPOINT_POD_IP, TARGET_PORT)); "to workload with waypoint referenced by vip")]
|
||||||
|
@ -615,22 +824,118 @@ mod tests {
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let hbone_addr = format!("{hbone_dst}:{TARGET_PORT}").parse().unwrap();
|
let hbone_addr =
|
||||||
let res = Inbound::validate_destination(&cfg, &state, &conn, &local_wl, hbone_addr)
|
if let Ok(addr) = format!("{hbone_dst}:{TARGET_PORT}").parse::<SocketAddr>() {
|
||||||
.await
|
HboneAddress::SocketAddr(addr)
|
||||||
.map(|_| Inbound::find_inbound_upstream(&state, &conn, &local_wl, hbone_addr));
|
} else {
|
||||||
|
HboneAddress::SvcHostname(hbone_dst.into(), SERVER_PORT)
|
||||||
|
};
|
||||||
|
|
||||||
|
let validate_destination =
|
||||||
|
Inbound::validate_destination(&state, &conn, &local_wl, &hbone_addr).await;
|
||||||
|
let res = Inbound::find_inbound_upstream(&cfg, &state, &conn, &local_wl, &hbone_addr);
|
||||||
|
|
||||||
match want {
|
match want {
|
||||||
Some((ip, port)) => {
|
Some((ip, port)) => {
|
||||||
let got_addr = res.expect("found upstream").0;
|
let got_addr = res.expect("no error").0;
|
||||||
assert_eq!(got_addr, SocketAddr::new(ip.parse().unwrap(), port))
|
assert_eq!(got_addr, SocketAddr::new(ip.parse().unwrap(), port));
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
res.expect_err("did not find upstream");
|
validate_destination.expect_err("did not find upstream");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Regular zTunnel workload traffic inbound
|
||||||
|
#[test_case(Waypoint::None, SERVER_POD_IP, SERVER_POD_IP, TARGET_PORT, Some((SERVER_POD_IP, TARGET_PORT, None)); "to workload no waypoint")]
|
||||||
|
// Svc hostname
|
||||||
|
#[test_case(Waypoint::None, SERVER_POD_IP, SERVER_POD_HOSTNAME, SERVER_PORT, Some((SERVER_POD_IP, TARGET_PORT, None)); "svc hostname to workload no waypoint")]
|
||||||
|
// Sandwiched Waypoint Cases
|
||||||
|
// to workload traffic
|
||||||
|
#[test_case(Waypoint::Workload(WAYPOINT_POD_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP, TARGET_PORT, Some((WAYPOINT_POD_IP, TARGET_PORT, None)); "to workload with waypoint referenced by pod")]
|
||||||
|
#[test_case(Waypoint::Workload(WAYPOINT_SVC_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP, TARGET_PORT, Some((WAYPOINT_POD_IP, TARGET_PORT, None)); "to workload with waypoint referenced by vip")]
|
||||||
|
#[test_case(Waypoint::Workload(WAYPOINT_SVC_IP, APP_TUNNEL_PROXY), WAYPOINT_POD_IP, SERVER_POD_IP, TARGET_PORT, Some((WAYPOINT_POD_IP, PROXY_PORT, Some(SERVER_POD_IP))); "to workload with app tunnel")]
|
||||||
|
// to service traffic
|
||||||
|
#[test_case(Waypoint::Service(WAYPOINT_POD_IP, None), WAYPOINT_POD_IP, SERVER_SVC_IP, TARGET_PORT, Some((WAYPOINT_POD_IP, TARGET_PORT, None)); "to service with waypoint referenced by pod")]
|
||||||
|
#[test_case(Waypoint::Service(WAYPOINT_SVC_IP, None), WAYPOINT_POD_IP, SERVER_SVC_IP, TARGET_PORT, Some((WAYPOINT_POD_IP, TARGET_PORT, None)); "to service with waypint referenced by vip")]
|
||||||
|
#[test_case(Waypoint::Service(WAYPOINT_SVC_IP, APP_TUNNEL_PROXY), WAYPOINT_POD_IP, SERVER_SVC_IP, TARGET_PORT, Some((WAYPOINT_POD_IP, PROXY_PORT, Some(SERVER_SVC_IP))); "to service with app tunnel")]
|
||||||
|
// Override port via app_protocol
|
||||||
|
// Error cases
|
||||||
|
#[test_case(Waypoint::None, SERVER_POD_IP, CLIENT_POD_IP, TARGET_PORT, None; "to server ip mismatch" )]
|
||||||
|
#[test_case(Waypoint::None, WAYPOINT_POD_IP, CLIENT_POD_IP, TARGET_PORT, None; "to waypoint without attachment" )]
|
||||||
|
#[test_case(Waypoint::Service(WAYPOINT_POD_IP, None), WAYPOINT_POD_IP, SERVER_POD_IP, TARGET_PORT, None; "to workload via waypoint with wrong attachment")]
|
||||||
|
#[test_case(Waypoint::Workload(WAYPOINT_POD_IP, None), WAYPOINT_POD_IP, SERVER_SVC_IP, TARGET_PORT, None; "to service via waypoint with wrong attachment")]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_build_inbound_request(
|
||||||
|
target_waypoint: Waypoint<'_>,
|
||||||
|
connection_dst: &str,
|
||||||
|
hbone_dst: &str,
|
||||||
|
hbobe_dst_port: u16,
|
||||||
|
want: Option<(&str, u16, Option<&str>)>,
|
||||||
|
) {
|
||||||
|
let state = test_state(target_waypoint).expect("state setup");
|
||||||
|
let cfg = config::parse_config().unwrap();
|
||||||
|
let conn = Connection {
|
||||||
|
src_identity: None,
|
||||||
|
src: format!("{CLIENT_POD_IP}:1234").parse().unwrap(),
|
||||||
|
dst_network: "".into(),
|
||||||
|
dst: format!("{connection_dst}:15008").parse().unwrap(),
|
||||||
|
};
|
||||||
|
let request_parts = MockParts {
|
||||||
|
method: Method::CONNECT,
|
||||||
|
uri: format!("{hbone_dst}:{hbobe_dst_port}").parse().unwrap(),
|
||||||
|
headers: http::HeaderMap::new(),
|
||||||
|
};
|
||||||
|
let cm = ConnectionManager::default();
|
||||||
|
let metrics = Arc::new(crate::proxy::Metrics::new(&mut Registry::default()));
|
||||||
|
let sf = Arc::new(DefaultSocketFactory::default());
|
||||||
|
let wl = state
|
||||||
|
.fetch_workload_by_address(&NetworkAddress {
|
||||||
|
network: "".into(),
|
||||||
|
address: conn.dst.ip(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let local_workload = Arc::new(LocalWorkloadInformation::new(
|
||||||
|
Arc::new(WorkloadInfo {
|
||||||
|
name: wl.name.to_string(),
|
||||||
|
namespace: wl.namespace.to_string(),
|
||||||
|
service_account: wl.service_account.to_string(),
|
||||||
|
}),
|
||||||
|
state.clone(),
|
||||||
|
new_secret_manager(Duration::from_secs(10)),
|
||||||
|
));
|
||||||
|
let pi = Arc::new(ProxyInputs::new(
|
||||||
|
Arc::new(cfg),
|
||||||
|
cm,
|
||||||
|
state.clone(),
|
||||||
|
metrics.clone(),
|
||||||
|
sf,
|
||||||
|
None,
|
||||||
|
local_workload,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
let inbound_request = Inbound::build_inbound_request(&pi, conn, &request_parts).await;
|
||||||
|
match want {
|
||||||
|
Some((ip, port, protocol_addr)) => {
|
||||||
|
let ir = inbound_request.unwrap();
|
||||||
|
assert_eq!(ir.upstream_addr, SocketAddr::new(ip.parse().unwrap(), port));
|
||||||
|
match ir.tunnel_request {
|
||||||
|
Some(addr) => assert_eq!(
|
||||||
|
addr.tunnel_target,
|
||||||
|
SocketAddr::new(protocol_addr.unwrap().parse().unwrap(), hbobe_dst_port)
|
||||||
|
),
|
||||||
|
None => assert_eq!(protocol_addr, None),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
inbound_request.expect_err("could not build inbound request");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a test state for the `DemandProxyState` with predefined services and workloads.
|
||||||
|
// server_waypoint specifies the waypoint configuration for the server.
|
||||||
fn test_state(server_waypoint: Waypoint) -> anyhow::Result<state::DemandProxyState> {
|
fn test_state(server_waypoint: Waypoint) -> anyhow::Result<state::DemandProxyState> {
|
||||||
let mut state = state::ProxyState::new(None);
|
let mut state = state::ProxyState::new(None);
|
||||||
|
|
||||||
|
@ -647,7 +952,7 @@ mod tests {
|
||||||
address: vip.parse().unwrap(),
|
address: vip.parse().unwrap(),
|
||||||
network: "".into(),
|
network: "".into(),
|
||||||
}],
|
}],
|
||||||
ports: std::collections::HashMap::new(),
|
ports: std::collections::HashMap::from([(80u16, 8080u16)]),
|
||||||
endpoints: EndpointSet::from_list([Endpoint {
|
endpoints: EndpointSet::from_list([Endpoint {
|
||||||
workload_uid: strng::format!("cluster1//v1/Pod/default/{name}"),
|
workload_uid: strng::format!("cluster1//v1/Pod/default/{name}"),
|
||||||
port: std::collections::HashMap::new(),
|
port: std::collections::HashMap::new(),
|
||||||
|
@ -664,7 +969,6 @@ mod tests {
|
||||||
"waypoint",
|
"waypoint",
|
||||||
WAYPOINT_POD_IP,
|
WAYPOINT_POD_IP,
|
||||||
Waypoint::None,
|
Waypoint::None,
|
||||||
// the waypoint's _workload_ gets the app tunnel field
|
|
||||||
server_waypoint.app_tunnel(),
|
server_waypoint.app_tunnel(),
|
||||||
),
|
),
|
||||||
("client", CLIENT_POD_IP, Waypoint::None, None),
|
("client", CLIENT_POD_IP, Waypoint::None, None),
|
||||||
|
@ -674,12 +978,13 @@ mod tests {
|
||||||
.map(|(name, ip, waypoint, app_tunnel)| Workload {
|
.map(|(name, ip, waypoint, app_tunnel)| Workload {
|
||||||
workload_ips: vec![ip.parse().unwrap()],
|
workload_ips: vec![ip.parse().unwrap()],
|
||||||
waypoint: waypoint.workload_attached(),
|
waypoint: waypoint.workload_attached(),
|
||||||
protocol: Protocol::HBONE,
|
protocol: InboundProtocol::HBONE,
|
||||||
uid: strng::format!("cluster1//v1/Pod/default/{name}"),
|
uid: strng::format!("cluster1//v1/Pod/default/{name}"),
|
||||||
name: strng::format!("workload-{name}"),
|
name: strng::format!("workload-{name}"),
|
||||||
namespace: "default".into(),
|
namespace: "default".into(),
|
||||||
service_account: strng::format!("service-account-{name}"),
|
service_account: strng::format!("service-account-{name}"),
|
||||||
application_tunnel: app_tunnel,
|
application_tunnel: app_tunnel,
|
||||||
|
network_mode: NetworkMode::Standard,
|
||||||
..test_helpers::test_default_workload()
|
..test_helpers::test_default_workload()
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -19,13 +19,13 @@ use std::time::Instant;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use tracing::{debug, error, info, trace, Instrument};
|
use tracing::{Instrument, debug, error, info, trace};
|
||||||
|
|
||||||
use crate::drain::run_with_drain;
|
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
use crate::proxy::metrics::Reporter;
|
use crate::drain::run_with_drain;
|
||||||
use crate::proxy::Error;
|
use crate::proxy::Error;
|
||||||
use crate::proxy::{metrics, util, ProxyInputs};
|
use crate::proxy::metrics::Reporter;
|
||||||
|
use crate::proxy::{ProxyInputs, metrics, util};
|
||||||
use crate::state::workload::NetworkAddress;
|
use crate::state::workload::NetworkAddress;
|
||||||
use crate::{assertions, copy, handle_connection, rbac, strng};
|
use crate::{assertions, copy, handle_connection, rbac, strng};
|
||||||
use crate::{proxy, socket};
|
use crate::{proxy, socket};
|
||||||
|
@ -65,45 +65,41 @@ impl InboundPassthrough {
|
||||||
|
|
||||||
pub(super) async fn run(self) {
|
pub(super) async fn run(self) {
|
||||||
let pi = self.pi.clone();
|
let pi = self.pi.clone();
|
||||||
let accept = |drain: DrainWatcher, force_shutdown: watch::Receiver<()>| {
|
let accept = async move |drain: DrainWatcher, force_shutdown: watch::Receiver<()>| {
|
||||||
async move {
|
loop {
|
||||||
loop {
|
// Asynchronously wait for an inbound socket.
|
||||||
// Asynchronously wait for an inbound socket.
|
let socket = self.listener.accept().await;
|
||||||
let socket = self.listener.accept().await;
|
let start = Instant::now();
|
||||||
let start = Instant::now();
|
let mut force_shutdown = force_shutdown.clone();
|
||||||
let mut force_shutdown = force_shutdown.clone();
|
let drain = drain.clone();
|
||||||
let drain = drain.clone();
|
let pi = self.pi.clone();
|
||||||
let pi = self.pi.clone();
|
match socket {
|
||||||
match socket {
|
Ok((stream, remote)) => {
|
||||||
Ok((stream, remote)) => {
|
let serve_client = async move {
|
||||||
let serve_client = async move {
|
debug!(component="inbound passthrough", "connection started");
|
||||||
debug!(component="inbound passthrough", "connection started");
|
|
||||||
// Since this task is spawned, make sure we are guaranteed to terminate
|
// Since this task is spawned, make sure we are guaranteed to terminate
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = force_shutdown.changed() => {
|
_ = force_shutdown.changed() => {
|
||||||
debug!(component="inbound passthrough", "connection forcefully terminated");
|
debug!(component="inbound passthrough", "connection forcefully terminated");
|
||||||
}
|
|
||||||
_ = Self::proxy_inbound_plaintext(pi, socket::to_canonical(remote), stream, self.enable_orig_src) => {
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Mark we are done with the connection, so drain can complete
|
_ = Self::proxy_inbound_plaintext(pi, socket::to_canonical(remote), stream, self.enable_orig_src) => {}
|
||||||
drop(drain);
|
|
||||||
debug!(component="inbound passthrough", dur=?start.elapsed(), "connection completed");
|
|
||||||
}
|
}
|
||||||
.in_current_span();
|
// Mark we are done with the connection, so drain can complete
|
||||||
|
drop(drain);
|
||||||
|
debug!(component="inbound passthrough", dur=?start.elapsed(), "connection completed");
|
||||||
|
}.in_current_span();
|
||||||
|
|
||||||
assertions::size_between_ref(1500, 3000, &serve_client);
|
assertions::size_between_ref(1500, 3000, &serve_client);
|
||||||
tokio::spawn(serve_client);
|
tokio::spawn(serve_client);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if util::is_runtime_shutdown(&e) {
|
if util::is_runtime_shutdown(&e) {
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
error!("Failed TCP handshake {}", e);
|
|
||||||
}
|
}
|
||||||
|
error!("Failed TCP handshake {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}.in_current_span()
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
run_with_drain(
|
run_with_drain(
|
||||||
|
|
|
@ -15,10 +15,12 @@
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::{atomic, Arc};
|
use std::sync::{Arc, atomic};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
|
use prometheus_client::encoding::{
|
||||||
|
EncodeLabelSet, EncodeLabelValue, LabelSetEncoder, LabelValueEncoder,
|
||||||
|
};
|
||||||
use prometheus_client::metrics::counter::{Atomic, Counter};
|
use prometheus_client::metrics::counter::{Atomic, Counter};
|
||||||
use prometheus_client::metrics::family::Family;
|
use prometheus_client::metrics::family::Family;
|
||||||
use prometheus_client::registry::Registry;
|
use prometheus_client::registry::Registry;
|
||||||
|
@ -28,12 +30,13 @@ use tracing_core::field::Value;
|
||||||
|
|
||||||
use crate::identity::Identity;
|
use crate::identity::Identity;
|
||||||
use crate::metrics::DefaultedUnknown;
|
use crate::metrics::DefaultedUnknown;
|
||||||
use crate::proxy;
|
use crate::proxy::{self, HboneAddress};
|
||||||
|
|
||||||
use crate::state::service::ServiceDescription;
|
use crate::state::service::ServiceDescription;
|
||||||
use crate::state::workload::Workload;
|
use crate::state::workload::Workload;
|
||||||
use crate::strng::{RichStrng, Strng};
|
use crate::strng::{RichStrng, Strng};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct Metrics {
|
pub struct Metrics {
|
||||||
pub connection_opens: Family<CommonTrafficLabels, Counter>,
|
pub connection_opens: Family<CommonTrafficLabels, Counter>,
|
||||||
pub connection_close: Family<CommonTrafficLabels, Counter>,
|
pub connection_close: Family<CommonTrafficLabels, Counter>,
|
||||||
|
@ -95,6 +98,8 @@ pub struct DerivedWorkload {
|
||||||
pub namespace: Option<Strng>,
|
pub namespace: Option<Strng>,
|
||||||
pub identity: Option<Identity>,
|
pub identity: Option<Identity>,
|
||||||
pub cluster_id: Option<Strng>,
|
pub cluster_id: Option<Strng>,
|
||||||
|
pub region: Option<Strng>,
|
||||||
|
pub zone: Option<Strng>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -123,6 +128,12 @@ impl CommonTrafficLabels {
|
||||||
self.source_app = w.canonical_name.clone().into();
|
self.source_app = w.canonical_name.clone().into();
|
||||||
self.source_version = w.canonical_revision.clone().into();
|
self.source_version = w.canonical_revision.clone().into();
|
||||||
self.source_cluster = w.cluster_id.to_string().into();
|
self.source_cluster = w.cluster_id.to_string().into();
|
||||||
|
|
||||||
|
let mut local = self.locality.0.unwrap_or_default();
|
||||||
|
local.source_region = w.locality.region.clone().into();
|
||||||
|
local.source_zone = w.locality.zone.clone().into();
|
||||||
|
self.locality = OptionallyEncode(Some(local));
|
||||||
|
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,6 +148,12 @@ impl CommonTrafficLabels {
|
||||||
self.source_cluster = w.cluster_id.clone().into();
|
self.source_cluster = w.cluster_id.clone().into();
|
||||||
// This is the identity from the TLS handshake; this is the most trustworthy source so use it
|
// This is the identity from the TLS handshake; this is the most trustworthy source so use it
|
||||||
self.source_principal = w.identity.clone().into();
|
self.source_principal = w.identity.clone().into();
|
||||||
|
|
||||||
|
let mut local = self.locality.0.unwrap_or_default();
|
||||||
|
local.source_region = w.region.clone().into();
|
||||||
|
local.source_zone = w.zone.clone().into();
|
||||||
|
self.locality = OptionallyEncode(Some(local));
|
||||||
|
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,6 +167,12 @@ impl CommonTrafficLabels {
|
||||||
self.destination_app = w.canonical_name.clone().into();
|
self.destination_app = w.canonical_name.clone().into();
|
||||||
self.destination_version = w.canonical_revision.clone().into();
|
self.destination_version = w.canonical_revision.clone().into();
|
||||||
self.destination_cluster = w.cluster_id.to_string().into();
|
self.destination_cluster = w.cluster_id.to_string().into();
|
||||||
|
|
||||||
|
let mut local = self.locality.0.unwrap_or_default();
|
||||||
|
local.destination_region = w.locality.region.clone().into();
|
||||||
|
local.destination_zone = w.locality.zone.clone().into();
|
||||||
|
self.locality = OptionallyEncode(Some(local));
|
||||||
|
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,6 +231,30 @@ pub struct CommonTrafficLabels {
|
||||||
request_protocol: RequestProtocol,
|
request_protocol: RequestProtocol,
|
||||||
response_flags: ResponseFlags,
|
response_flags: ResponseFlags,
|
||||||
connection_security_policy: SecurityPolicy,
|
connection_security_policy: SecurityPolicy,
|
||||||
|
|
||||||
|
#[prometheus(flatten)]
|
||||||
|
locality: OptionallyEncode<LocalityLabels>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OptionallyEncode is a wrapper that will optionally encode the entire label set.
|
||||||
|
/// This differs from something like DefaultedUnknown which handles only the value - this makes the
|
||||||
|
/// entire label not show up.
|
||||||
|
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq)]
|
||||||
|
struct OptionallyEncode<T>(Option<T>);
|
||||||
|
impl<T: EncodeLabelSet> EncodeLabelSet for OptionallyEncode<T> {
|
||||||
|
fn encode(&self, encoder: LabelSetEncoder) -> Result<(), std::fmt::Error> {
|
||||||
|
match &self.0 {
|
||||||
|
None => Ok(()),
|
||||||
|
Some(ll) => ll.encode(encoder),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq, EncodeLabelSet)]
|
||||||
|
struct LocalityLabels {
|
||||||
|
source_region: DefaultedUnknown<RichStrng>,
|
||||||
|
source_zone: DefaultedUnknown<RichStrng>,
|
||||||
|
destination_region: DefaultedUnknown<RichStrng>,
|
||||||
|
destination_zone: DefaultedUnknown<RichStrng>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq, EncodeLabelSet)]
|
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq, EncodeLabelSet)]
|
||||||
|
@ -293,13 +340,14 @@ impl Metrics {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
/// ConnectionResult abstracts recording a metric and emitting an access log upon a connection completion
|
/// ConnectionResult abstracts recording a metric and emitting an access log upon a connection completion
|
||||||
pub struct ConnectionResult {
|
pub struct ConnectionResult {
|
||||||
// Src address and name
|
// Src address and name
|
||||||
src: (SocketAddr, Option<RichStrng>),
|
src: (SocketAddr, Option<RichStrng>),
|
||||||
// Dst address and name
|
// Dst address and name
|
||||||
dst: (SocketAddr, Option<RichStrng>),
|
dst: (SocketAddr, Option<RichStrng>),
|
||||||
hbone_target: Option<SocketAddr>,
|
hbone_target: Option<HboneAddress>,
|
||||||
start: Instant,
|
start: Instant,
|
||||||
|
|
||||||
// TODO: storing CommonTrafficLabels adds ~600 bytes retained throughout a connection life time.
|
// TODO: storing CommonTrafficLabels adds ~600 bytes retained throughout a connection life time.
|
||||||
|
@ -343,7 +391,7 @@ pub fn log_early_deny<E: std::error::Error>(
|
||||||
"inbound"
|
"inbound"
|
||||||
},
|
},
|
||||||
|
|
||||||
error = format!("{}", err),
|
error = format!("{err}"),
|
||||||
|
|
||||||
"connection failed"
|
"connection failed"
|
||||||
);
|
);
|
||||||
|
@ -381,7 +429,7 @@ impl ConnectionResult {
|
||||||
dst: SocketAddr,
|
dst: SocketAddr,
|
||||||
// If using hbone, the inner HBONE address
|
// If using hbone, the inner HBONE address
|
||||||
// That is, dst is the L4 address, while is the :authority.
|
// That is, dst is the L4 address, while is the :authority.
|
||||||
hbone_target: Option<SocketAddr>,
|
hbone_target: Option<HboneAddress>,
|
||||||
start: Instant,
|
start: Instant,
|
||||||
conn: ConnectionOpen,
|
conn: ConnectionOpen,
|
||||||
metrics: Arc<Metrics>,
|
metrics: Arc<Metrics>,
|
||||||
|
@ -410,7 +458,7 @@ impl ConnectionResult {
|
||||||
src.identity = tl.source_principal.as_ref().filter(|_| mtls).map(to_value_owned),
|
src.identity = tl.source_principal.as_ref().filter(|_| mtls).map(to_value_owned),
|
||||||
|
|
||||||
dst.addr = %dst.0,
|
dst.addr = %dst.0,
|
||||||
dst.hbone_addr = hbone_target.map(display),
|
dst.hbone_addr = hbone_target.as_ref().map(display),
|
||||||
dst.service = tl.destination_service.to_value(),
|
dst.service = tl.destination_service.to_value(),
|
||||||
dst.workload = dst.1.as_deref().map(to_value),
|
dst.workload = dst.1.as_deref().map(to_value),
|
||||||
dst.namespace = tl.destination_workload_namespace.to_value(),
|
dst.namespace = tl.destination_workload_namespace.to_value(),
|
||||||
|
@ -504,7 +552,7 @@ impl ConnectionResult {
|
||||||
src.identity = tl.source_principal.as_ref().filter(|_| mtls).map(to_value_owned),
|
src.identity = tl.source_principal.as_ref().filter(|_| mtls).map(to_value_owned),
|
||||||
|
|
||||||
dst.addr = %self.dst.0,
|
dst.addr = %self.dst.0,
|
||||||
dst.hbone_addr = self.hbone_target.map(display),
|
dst.hbone_addr = self.hbone_target.as_ref().map(display),
|
||||||
dst.service = tl.destination_service.to_value(),
|
dst.service = tl.destination_service.to_value(),
|
||||||
dst.workload = self.dst.1.as_deref().map(to_value),
|
dst.workload = self.dst.1.as_deref().map(to_value),
|
||||||
dst.namespace = tl.destination_workload_namespace.to_value(),
|
dst.namespace = tl.destination_workload_namespace.to_value(),
|
||||||
|
|
|
@ -13,34 +13,28 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#![warn(clippy::cast_lossless)]
|
#![warn(clippy::cast_lossless)]
|
||||||
use super::{h2, LocalWorkloadInformation};
|
|
||||||
use super::{Error, SocketFactory};
|
use super::{Error, SocketFactory};
|
||||||
|
use super::{LocalWorkloadInformation, h2};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use std::collections::hash_map::DefaultHasher;
|
use std::collections::hash_map::DefaultHasher;
|
||||||
use std::fmt;
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
|
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
|
|
||||||
use std::net::IpAddr;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
use std::sync::atomic::{AtomicI32, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicI32, Ordering};
|
||||||
|
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use tracing::{debug, trace, Instrument};
|
use tracing::{Instrument, debug, trace};
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::identity::Identity;
|
|
||||||
|
|
||||||
use flurry;
|
use flurry;
|
||||||
|
|
||||||
use crate::proxy::h2::client::H2ConnectClient;
|
|
||||||
use crate::proxy::h2::H2Stream;
|
use crate::proxy::h2::H2Stream;
|
||||||
|
use crate::proxy::h2::client::{H2ConnectClient, WorkloadKey};
|
||||||
use pingora_pool;
|
use pingora_pool;
|
||||||
use tokio::io;
|
use tokio::io;
|
||||||
|
|
||||||
|
@ -64,7 +58,7 @@ struct PoolState {
|
||||||
timeout_tx: watch::Sender<bool>, // This is already impl clone? rustc complains that it isn't, tho
|
timeout_tx: watch::Sender<bool>, // This is already impl clone? rustc complains that it isn't, tho
|
||||||
// this is effectively just a convenience data type - a rwlocked hashmap with keying and LRU drops
|
// this is effectively just a convenience data type - a rwlocked hashmap with keying and LRU drops
|
||||||
// and has no actual hyper/http/connection logic.
|
// and has no actual hyper/http/connection logic.
|
||||||
connected_pool: Arc<pingora_pool::ConnectionPool<ConnClient>>,
|
connected_pool: Arc<pingora_pool::ConnectionPool<H2ConnectClient>>,
|
||||||
// this must be an atomic/concurrent-safe list-of-locks, so we can lock per-key, not globally, and avoid holding up all conn attempts
|
// this must be an atomic/concurrent-safe list-of-locks, so we can lock per-key, not globally, and avoid holding up all conn attempts
|
||||||
established_conn_writelock: flurry::HashMap<u64, Option<Arc<Mutex<()>>>>,
|
established_conn_writelock: flurry::HashMap<u64, Option<Arc<Mutex<()>>>>,
|
||||||
pool_unused_release_timeout: Duration,
|
pool_unused_release_timeout: Duration,
|
||||||
|
@ -83,7 +77,7 @@ struct ConnSpawner {
|
||||||
|
|
||||||
// Does nothing but spawn new conns when asked
|
// Does nothing but spawn new conns when asked
|
||||||
impl ConnSpawner {
|
impl ConnSpawner {
|
||||||
async fn new_pool_conn(&self, key: WorkloadKey) -> Result<ConnClient, Error> {
|
async fn new_pool_conn(&self, key: WorkloadKey) -> Result<H2ConnectClient, Error> {
|
||||||
debug!("spawning new pool conn for {}", key);
|
debug!("spawning new pool conn for {}", key);
|
||||||
|
|
||||||
let cert = self.local_workload.fetch_certificate().await?;
|
let cert = self.local_workload.fetch_certificate().await?;
|
||||||
|
@ -97,14 +91,14 @@ impl ConnSpawner {
|
||||||
|
|
||||||
let tls_stream = connector.connect(tcp_stream).await?;
|
let tls_stream = connector.connect(tcp_stream).await?;
|
||||||
trace!("connector connected, handshaking");
|
trace!("connector connected, handshaking");
|
||||||
let sender =
|
let sender = h2::client::spawn_connection(
|
||||||
h2::client::spawn_connection(self.cfg.clone(), tls_stream, self.timeout_rx.clone())
|
self.cfg.clone(),
|
||||||
.await?;
|
tls_stream,
|
||||||
let client = ConnClient {
|
self.timeout_rx.clone(),
|
||||||
sender,
|
key,
|
||||||
wl_key: key,
|
)
|
||||||
};
|
.await?;
|
||||||
Ok(client)
|
Ok(sender)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,8 +123,8 @@ impl PoolState {
|
||||||
//
|
//
|
||||||
// Note that this simply removes the client ref from this pool - if other things hold client/streamrefs refs,
|
// Note that this simply removes the client ref from this pool - if other things hold client/streamrefs refs,
|
||||||
// they must also drop those before the underlying connection is fully closed.
|
// they must also drop those before the underlying connection is fully closed.
|
||||||
fn maybe_checkin_conn(&self, conn: ConnClient, pool_key: pingora_pool::ConnectionMeta) {
|
fn maybe_checkin_conn(&self, conn: H2ConnectClient, pool_key: pingora_pool::ConnectionMeta) {
|
||||||
if conn.sender.will_be_at_max_streamcount() {
|
if conn.will_be_at_max_streamcount() {
|
||||||
debug!(
|
debug!(
|
||||||
"checked out connection for {:?} is now at max streamcount; removing from pool",
|
"checked out connection for {:?} is now at max streamcount; removing from pool",
|
||||||
pool_key
|
pool_key
|
||||||
|
@ -164,7 +158,7 @@ impl PoolState {
|
||||||
&self,
|
&self,
|
||||||
hash_key: &u64,
|
hash_key: &u64,
|
||||||
workload_key: &WorkloadKey,
|
workload_key: &WorkloadKey,
|
||||||
) -> Result<Option<ConnClient>, Error> {
|
) -> Result<Option<H2ConnectClient>, Error> {
|
||||||
match self.connected_pool.get(hash_key) {
|
match self.connected_pool.get(hash_key) {
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
Some(conn) => match Self::enforce_key_integrity(conn, workload_key) {
|
Some(conn) => match Self::enforce_key_integrity(conn, workload_key) {
|
||||||
|
@ -180,9 +174,9 @@ impl PoolState {
|
||||||
//
|
//
|
||||||
// this is a final safety check for collisions, we will throw up our hands and refuse to return the conn
|
// this is a final safety check for collisions, we will throw up our hands and refuse to return the conn
|
||||||
fn enforce_key_integrity(
|
fn enforce_key_integrity(
|
||||||
conn: ConnClient,
|
conn: H2ConnectClient,
|
||||||
expected_key: &WorkloadKey,
|
expected_key: &WorkloadKey,
|
||||||
) -> Result<ConnClient, Error> {
|
) -> Result<H2ConnectClient, Error> {
|
||||||
match conn.is_for_workload(expected_key) {
|
match conn.is_for_workload(expected_key) {
|
||||||
Ok(()) => Ok(conn),
|
Ok(()) => Ok(conn),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
|
@ -211,7 +205,7 @@ impl PoolState {
|
||||||
&self,
|
&self,
|
||||||
workload_key: &WorkloadKey,
|
workload_key: &WorkloadKey,
|
||||||
pool_key: &pingora_pool::ConnectionMeta,
|
pool_key: &pingora_pool::ConnectionMeta,
|
||||||
) -> Result<Option<ConnClient>, Error> {
|
) -> Result<Option<H2ConnectClient>, Error> {
|
||||||
let inner_conn_lock = {
|
let inner_conn_lock = {
|
||||||
trace!("getting keyed lock out of lockmap");
|
trace!("getting keyed lock out of lockmap");
|
||||||
let guard = self.established_conn_writelock.guard();
|
let guard = self.established_conn_writelock.guard();
|
||||||
|
@ -273,7 +267,7 @@ impl PoolState {
|
||||||
&self,
|
&self,
|
||||||
workload_key: &WorkloadKey,
|
workload_key: &WorkloadKey,
|
||||||
pool_key: &pingora_pool::ConnectionMeta,
|
pool_key: &pingora_pool::ConnectionMeta,
|
||||||
) -> Result<Option<ConnClient>, Error> {
|
) -> Result<Option<H2ConnectClient>, Error> {
|
||||||
let found_conn = {
|
let found_conn = {
|
||||||
trace!("pool connect outer map - take guard");
|
trace!("pool connect outer map - take guard");
|
||||||
let guard = self.established_conn_writelock.guard();
|
let guard = self.established_conn_writelock.guard();
|
||||||
|
@ -293,13 +287,12 @@ impl PoolState {
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
"checkout - got writelock for conn with key {} and hash {:?}",
|
"checkout - got writelock for conn with key {} and hash {:?}",
|
||||||
workload_key,
|
workload_key, pool_key.key
|
||||||
pool_key.key
|
|
||||||
);
|
);
|
||||||
let returned_connection = loop {
|
let returned_connection = loop {
|
||||||
match self.guarded_get(&pool_key.key, workload_key)? {
|
match self.guarded_get(&pool_key.key, workload_key)? {
|
||||||
Some(mut existing) => {
|
Some(mut existing) => {
|
||||||
if !existing.sender.ready_to_use() {
|
if !existing.ready_to_use() {
|
||||||
// We checked this out, and will not check it back in
|
// We checked this out, and will not check it back in
|
||||||
// Loop again to find another/make a new one
|
// Loop again to find another/make a new one
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -329,7 +322,9 @@ impl PoolState {
|
||||||
// which will terminate all connection driver spawns, as well as cancel all outstanding eviction timeout spawns
|
// which will terminate all connection driver spawns, as well as cancel all outstanding eviction timeout spawns
|
||||||
impl Drop for PoolState {
|
impl Drop for PoolState {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
debug!("poolstate dropping, stopping all connection drivers and cancelling all outstanding eviction timeout spawns");
|
debug!(
|
||||||
|
"poolstate dropping, stopping all connection drivers and cancelling all outstanding eviction timeout spawns"
|
||||||
|
);
|
||||||
let _ = self.timeout_tx.send(true);
|
let _ = self.timeout_tx.send(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,7 +373,7 @@ impl WorkloadHBONEPool {
|
||||||
) -> Result<H2Stream, Error> {
|
) -> Result<H2Stream, Error> {
|
||||||
let mut connection = self.connect(workload_key).await?;
|
let mut connection = self.connect(workload_key).await?;
|
||||||
|
|
||||||
connection.sender.send_request(request).await
|
connection.send_request(request).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// Obtain a pooled connection. Will prefer to retrieve an existing conn from the pool, but
|
// Obtain a pooled connection. Will prefer to retrieve an existing conn from the pool, but
|
||||||
|
@ -387,7 +382,7 @@ impl WorkloadHBONEPool {
|
||||||
//
|
//
|
||||||
// If many `connects` request a connection to the same dest at once, all will wait until exactly
|
// If many `connects` request a connection to the same dest at once, all will wait until exactly
|
||||||
// one connection is created, before deciding if they should create more or just use that one.
|
// one connection is created, before deciding if they should create more or just use that one.
|
||||||
async fn connect(&mut self, workload_key: &WorkloadKey) -> Result<ConnClient, Error> {
|
async fn connect(&mut self, workload_key: &WorkloadKey) -> Result<H2ConnectClient, Error> {
|
||||||
trace!("pool connect START");
|
trace!("pool connect START");
|
||||||
// TODO BML this may not be collision resistant, or a fast hash. It should be resistant enough for workloads tho.
|
// TODO BML this may not be collision resistant, or a fast hash. It should be resistant enough for workloads tho.
|
||||||
// We are doing a deep-equals check at the end to mitigate any collisions, will see about bumping Pingora
|
// We are doing a deep-equals check at the end to mitigate any collisions, will see about bumping Pingora
|
||||||
|
@ -488,7 +483,10 @@ impl WorkloadHBONEPool {
|
||||||
.await?;
|
.await?;
|
||||||
match existing_conn {
|
match existing_conn {
|
||||||
None => {
|
None => {
|
||||||
trace!("woke up on pool notification, but didn't find a conn for {:?} yet", hash_key);
|
trace!(
|
||||||
|
"woke up on pool notification, but didn't find a conn for {:?} yet",
|
||||||
|
hash_key
|
||||||
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Some(e_conn) => {
|
Some(e_conn) => {
|
||||||
|
@ -508,84 +506,40 @@ impl WorkloadHBONEPool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
// A sort of faux-client, that represents a single checked-out 'request sender' which might
|
|
||||||
// send requests over some underlying stream using some underlying http/2 client
|
|
||||||
struct ConnClient {
|
|
||||||
sender: H2ConnectClient,
|
|
||||||
// A WL key may have many clients, but every client has no more than one WL key
|
|
||||||
wl_key: WorkloadKey, // the WL key associated with this client.
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnClient {
|
|
||||||
pub fn is_for_workload(&self, wl_key: &WorkloadKey) -> Result<(), crate::proxy::Error> {
|
|
||||||
if !(self.wl_key == *wl_key) {
|
|
||||||
Err(crate::proxy::Error::Generic(
|
|
||||||
"fetched connection does not match workload key!".into(),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is currently only for debugging
|
|
||||||
impl Drop for ConnClient {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
trace!("dropping ConnClient for key {}", self.wl_key,)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
|
||||||
pub struct WorkloadKey {
|
|
||||||
pub src_id: Identity,
|
|
||||||
pub dst_id: Vec<Identity>,
|
|
||||||
// In theory we can just use src,dst,node. However, the dst has a check that
|
|
||||||
// the L3 destination IP matches the HBONE IP. This could be loosened to just assert they are the same identity maybe.
|
|
||||||
pub dst: SocketAddr,
|
|
||||||
// Because we spoof the source IP, we need to key on this as well. Note: for in-pod its already per-pod
|
|
||||||
// pools anyways.
|
|
||||||
pub src: IpAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for WorkloadKey {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "{}({})->{}[", self.src, &self.src_id, self.dst,)?;
|
|
||||||
for i in &self.dst_id {
|
|
||||||
write!(f, "{i}")?;
|
|
||||||
}
|
|
||||||
write!(f, "]")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use std::convert::Infallible;
|
use std::convert::Infallible;
|
||||||
|
use std::net::IpAddr;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use crate::{drain, identity, proxy};
|
use crate::{drain, identity, proxy};
|
||||||
|
|
||||||
use futures_util::{future, StreamExt};
|
use futures_util::{StreamExt, future};
|
||||||
use hyper::body::Incoming;
|
use hyper::body::Incoming;
|
||||||
|
|
||||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||||
use hyper::service::service_fn;
|
use hyper::service::service_fn;
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
use prometheus_client::registry::Registry;
|
use prometheus_client::registry::Registry;
|
||||||
use std::sync::atomic::AtomicU32;
|
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
use std::sync::atomic::AtomicU32;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
|
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
|
||||||
use tracing::{error, Instrument};
|
use tracing::{Instrument, error};
|
||||||
|
|
||||||
use crate::test_helpers::helpers::initialize_telemetry;
|
use crate::test_helpers::helpers::initialize_telemetry;
|
||||||
|
|
||||||
|
use crate::identity::Identity;
|
||||||
|
|
||||||
|
use self::h2::TokioH2Stream;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
use crate::state::workload;
|
use crate::state::workload;
|
||||||
|
@ -594,7 +548,7 @@ mod test {
|
||||||
use ztunnel::test_helpers::*;
|
use ztunnel::test_helpers::*;
|
||||||
|
|
||||||
macro_rules! assert_opens_drops {
|
macro_rules! assert_opens_drops {
|
||||||
($srv:expr, $open:expr, $drops:expr) => {
|
($srv:expr_2021, $open:expr_2021, $drops:expr_2021) => {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
$srv.conn_counter.load(Ordering::Relaxed),
|
$srv.conn_counter.load(Ordering::Relaxed),
|
||||||
$open,
|
$open,
|
||||||
|
@ -639,6 +593,50 @@ mod test {
|
||||||
assert_opens_drops!(srv, 1, 1);
|
assert_opens_drops!(srv, 1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This is really a test for TokioH2Stream, but its nicer here because we have access to
|
||||||
|
/// streams.
|
||||||
|
/// Most important, we make sure there are no panics.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn read_buffering() {
|
||||||
|
let (mut pool, srv) = setup_test(3).await;
|
||||||
|
|
||||||
|
let key = key(&srv, 2);
|
||||||
|
let req = || {
|
||||||
|
http::Request::builder()
|
||||||
|
.uri(srv.addr.to_string())
|
||||||
|
.method(http::Method::CONNECT)
|
||||||
|
.version(http::Version::HTTP_2)
|
||||||
|
.body(())
|
||||||
|
.unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
let c = pool.send_request_pooled(&key.clone(), req()).await.unwrap();
|
||||||
|
let mut c = TokioH2Stream::new(c);
|
||||||
|
c.write_all(b"abcde").await.unwrap();
|
||||||
|
let mut b = [0u8; 100];
|
||||||
|
// Properly buffer reads and don't error
|
||||||
|
assert_eq!(c.read(&mut b).await.unwrap(), 8);
|
||||||
|
assert_eq!(&b[..8], b"poolsrv\n"); // this is added by itself
|
||||||
|
assert_eq!(c.read(&mut b[..1]).await.unwrap(), 1);
|
||||||
|
assert_eq!(&b[..1], b"a");
|
||||||
|
assert_eq!(c.read(&mut b[..1]).await.unwrap(), 1);
|
||||||
|
assert_eq!(&b[..1], b"b");
|
||||||
|
assert_eq!(c.read(&mut b[..1]).await.unwrap(), 1);
|
||||||
|
assert_eq!(&b[..1], b"c");
|
||||||
|
assert_eq!(c.read(&mut b).await.unwrap(), 2); // there are only two bytes left
|
||||||
|
assert_eq!(&b[..2], b"de");
|
||||||
|
|
||||||
|
// Once we drop the pool, we should still retained the buffered data,
|
||||||
|
// but then we should error.
|
||||||
|
c.write_all(b"abcde").await.unwrap();
|
||||||
|
assert_eq!(c.read(&mut b[..3]).await.unwrap(), 3);
|
||||||
|
assert_eq!(&b[..3], b"abc");
|
||||||
|
drop(pool);
|
||||||
|
assert_eq!(c.read(&mut b[..2]).await.unwrap(), 2);
|
||||||
|
assert_eq!(&b[..2], b"de");
|
||||||
|
assert!(c.read(&mut b).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn unique_keys_have_unique_connections() {
|
async fn unique_keys_have_unique_connections() {
|
||||||
let (pool, mut srv) = setup_test(3).await;
|
let (pool, mut srv) = setup_test(3).await;
|
||||||
|
|
|
@ -19,8 +19,9 @@ use crate::dns::resolver::Resolver;
|
||||||
use hickory_proto::op::{Message, MessageType, Query};
|
use hickory_proto::op::{Message, MessageType, Query};
|
||||||
use hickory_proto::rr::{Name, RecordType};
|
use hickory_proto::rr::{Name, RecordType};
|
||||||
use hickory_proto::serialize::binary::BinDecodable;
|
use hickory_proto::serialize::binary::BinDecodable;
|
||||||
|
use hickory_proto::xfer::Protocol;
|
||||||
use hickory_server::authority::MessageRequest;
|
use hickory_server::authority::MessageRequest;
|
||||||
use hickory_server::server::{Protocol, Request};
|
use hickory_server::server::Request;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
@ -28,12 +29,12 @@ use tokio::io::AsyncReadExt;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
use tracing::{debug, error, info, info_span, warn, Instrument};
|
use tracing::{Instrument, debug, error, info, info_span, warn};
|
||||||
|
|
||||||
use crate::drain::run_with_drain;
|
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
|
use crate::drain::run_with_drain;
|
||||||
use crate::proxy::outbound::OutboundConnection;
|
use crate::proxy::outbound::OutboundConnection;
|
||||||
use crate::proxy::{util, Error, ProxyInputs, TraceParent};
|
use crate::proxy::{Error, ProxyInputs, TraceParent, util};
|
||||||
use crate::{assertions, socket};
|
use crate::{assertions, socket};
|
||||||
|
|
||||||
pub(super) struct Socks5 {
|
pub(super) struct Socks5 {
|
||||||
|
@ -76,46 +77,44 @@ impl Socks5 {
|
||||||
self.pi.socket_factory.clone(),
|
self.pi.socket_factory.clone(),
|
||||||
self.pi.local_workload_information.clone(),
|
self.pi.local_workload_information.clone(),
|
||||||
);
|
);
|
||||||
let accept = |drain: DrainWatcher, force_shutdown: watch::Receiver<()>| {
|
let accept = async move |drain: DrainWatcher, force_shutdown: watch::Receiver<()>| {
|
||||||
async move {
|
loop {
|
||||||
loop {
|
// Asynchronously wait for an inbound socket.
|
||||||
// Asynchronously wait for an inbound socket.
|
let socket = self.listener.accept().await;
|
||||||
let socket = self.listener.accept().await;
|
let start = Instant::now();
|
||||||
let start = Instant::now();
|
let drain = drain.clone();
|
||||||
let drain = drain.clone();
|
let mut force_shutdown = force_shutdown.clone();
|
||||||
let mut force_shutdown = force_shutdown.clone();
|
match socket {
|
||||||
match socket {
|
Ok((stream, _remote)) => {
|
||||||
Ok((stream, _remote)) => {
|
let oc = OutboundConnection {
|
||||||
let oc = OutboundConnection {
|
pi: self.pi.clone(),
|
||||||
pi: self.pi.clone(),
|
id: TraceParent::new(),
|
||||||
id: TraceParent::new(),
|
pool: pool.clone(),
|
||||||
pool: pool.clone(),
|
hbone_port: self.pi.cfg.inbound_addr.port(),
|
||||||
hbone_port: self.pi.cfg.inbound_addr.port(),
|
};
|
||||||
};
|
let span = info_span!("socks5", id=%oc.id);
|
||||||
let span = info_span!("socks5", id=%oc.id);
|
let serve = (async move {
|
||||||
let serve = (async move {
|
debug!(component="socks5", "connection started");
|
||||||
debug!(component="socks5", "connection started");
|
// Since this task is spawned, make sure we are guaranteed to terminate
|
||||||
// Since this task is spawned, make sure we are guaranteed to terminate
|
tokio::select! {
|
||||||
tokio::select! {
|
_ = force_shutdown.changed() => {
|
||||||
_ = force_shutdown.changed() => {
|
debug!(component="socks5", "connection forcefully terminated");
|
||||||
debug!(component="socks5", "connection forcefully terminated");
|
|
||||||
}
|
|
||||||
_ = handle_socks_connection(oc, stream) => {}
|
|
||||||
}
|
}
|
||||||
// Mark we are done with the connection, so drain can complete
|
_ = handle_socks_connection(oc, stream) => {}
|
||||||
drop(drain);
|
|
||||||
debug!(component="socks5", dur=?start.elapsed(), "connection completed");
|
|
||||||
}).instrument(span);
|
|
||||||
|
|
||||||
assertions::size_between_ref(1000, 2000, &serve);
|
|
||||||
tokio::spawn(serve);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
if util::is_runtime_shutdown(&e) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
error!("Failed TCP handshake {}", e);
|
// Mark we are done with the connection, so drain can complete
|
||||||
|
drop(drain);
|
||||||
|
debug!(component="socks5", dur=?start.elapsed(), "connection completed");
|
||||||
|
}).instrument(span);
|
||||||
|
|
||||||
|
assertions::size_between_ref(1000, 2000, &serve);
|
||||||
|
tokio::spawn(serve);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if util::is_runtime_shutdown(&e) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
error!("Failed TCP handshake {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -204,8 +203,7 @@ async fn negotiate_socks_connection(
|
||||||
|
|
||||||
if version != 0x05 {
|
if version != 0x05 {
|
||||||
return Err(SocksError::invalid_protocol(format!(
|
return Err(SocksError::invalid_protocol(format!(
|
||||||
"unsupported version {}",
|
"unsupported version {version}",
|
||||||
version
|
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +315,7 @@ async fn dns_lookup(
|
||||||
let answer = resolver.lookup(&req).await?;
|
let answer = resolver.lookup(&req).await?;
|
||||||
let response = answer
|
let response = answer
|
||||||
.record_iter()
|
.record_iter()
|
||||||
.filter_map(|rec| rec.data().and_then(|d| d.ip_addr()))
|
.filter_map(|rec| rec.data().ip_addr())
|
||||||
.next() // TODO: do not always use the first result
|
.next() // TODO: do not always use the first result
|
||||||
.ok_or_else(|| Error::DnsEmpty)?;
|
.ok_or_else(|| Error::DnsEmpty)?;
|
||||||
|
|
||||||
|
@ -350,7 +348,7 @@ async fn send_response(
|
||||||
// https://www.rfc-editor.org/rfc/rfc1928#section-6
|
// https://www.rfc-editor.org/rfc/rfc1928#section-6
|
||||||
let mut buf: Vec<u8> = Vec::with_capacity(10);
|
let mut buf: Vec<u8> = Vec::with_capacity(10);
|
||||||
buf.push(0x05); // version
|
buf.push(0x05); // version
|
||||||
// Status
|
// Status
|
||||||
buf.push(match err {
|
buf.push(match err {
|
||||||
None => 0,
|
None => 0,
|
||||||
Some(SocksError::General(_)) => 1,
|
Some(SocksError::General(_)) => 1,
|
||||||
|
|
|
@ -22,10 +22,9 @@ use crate::dns;
|
||||||
use crate::drain::DrainWatcher;
|
use crate::drain::DrainWatcher;
|
||||||
|
|
||||||
use crate::proxy::connection_manager::ConnectionManager;
|
use crate::proxy::connection_manager::ConnectionManager;
|
||||||
|
use crate::proxy::{DefaultSocketFactory, Proxy, inbound::Inbound};
|
||||||
use crate::proxy::{Error, LocalWorkloadInformation, Metrics};
|
use crate::proxy::{Error, LocalWorkloadInformation, Metrics};
|
||||||
|
|
||||||
use crate::proxy::Proxy;
|
|
||||||
|
|
||||||
// Proxy factory creates ztunnel proxies using a socket factory.
|
// Proxy factory creates ztunnel proxies using a socket factory.
|
||||||
// this allows us to create our proxies the same way in regular mode and in inpod mode.
|
// this allows us to create our proxies the same way in regular mode and in inpod mode.
|
||||||
pub struct ProxyFactory {
|
pub struct ProxyFactory {
|
||||||
|
@ -113,6 +112,8 @@ impl ProxyFactory {
|
||||||
drain.clone(),
|
drain.clone(),
|
||||||
socket_factory.as_ref(),
|
socket_factory.as_ref(),
|
||||||
local_workload_information.as_fetcher(),
|
local_workload_information.as_fetcher(),
|
||||||
|
self.config.prefered_service_namespace.clone(),
|
||||||
|
self.config.ipv6_enabled,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
resolver = Some(server.resolver());
|
resolver = Some(server.resolver());
|
||||||
|
@ -130,6 +131,7 @@ impl ProxyFactory {
|
||||||
socket_factory.clone(),
|
socket_factory.clone(),
|
||||||
resolver,
|
resolver,
|
||||||
local_workload_information,
|
local_workload_information,
|
||||||
|
false,
|
||||||
);
|
);
|
||||||
result.connection_manager = Some(cm);
|
result.connection_manager = Some(cm);
|
||||||
result.proxy = Some(Proxy::from_inputs(pi, drain).await?);
|
result.proxy = Some(Proxy::from_inputs(pi, drain).await?);
|
||||||
|
@ -137,6 +139,52 @@ impl ProxyFactory {
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates an inbound listener specifically for ztunnel's own internal endpoints (metrics).
|
||||||
|
/// This allows ztunnel to act as its own workload, enforcing policies on traffic directed to itself.
|
||||||
|
/// This is distinct from the main inbound listener which handles traffic for other workloads proxied by ztunnel.
|
||||||
|
pub async fn create_ztunnel_self_proxy_listener(
|
||||||
|
&self,
|
||||||
|
) -> Result<Option<crate::proxy::inbound::Inbound>, Error> {
|
||||||
|
if self.config.proxy_mode != config::ProxyMode::Shared {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let (Some(ztunnel_identity), Some(ztunnel_workload)) =
|
||||||
|
(&self.config.ztunnel_identity, &self.config.ztunnel_workload)
|
||||||
|
{
|
||||||
|
tracing::info!(
|
||||||
|
"creating ztunnel self-proxy listener with identity: {:?}",
|
||||||
|
ztunnel_identity
|
||||||
|
);
|
||||||
|
|
||||||
|
let local_workload_information = Arc::new(LocalWorkloadInformation::new(
|
||||||
|
Arc::new(ztunnel_workload.clone()),
|
||||||
|
self.state.clone(),
|
||||||
|
self.cert_manager.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
let socket_factory = Arc::new(DefaultSocketFactory(self.config.socket_config));
|
||||||
|
|
||||||
|
let cm = ConnectionManager::default();
|
||||||
|
|
||||||
|
let pi = crate::proxy::ProxyInputs::new(
|
||||||
|
self.config.clone(),
|
||||||
|
cm.clone(),
|
||||||
|
self.state.clone(),
|
||||||
|
self.proxy_metrics.clone(),
|
||||||
|
socket_factory,
|
||||||
|
None,
|
||||||
|
local_workload_information,
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
|
||||||
|
let inbound = Inbound::new(pi, self.drain.clone()).await?;
|
||||||
|
Ok(Some(inbound))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
|
|
20
src/rbac.rs
|
@ -18,16 +18,16 @@ use std::fmt;
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use tracing::{instrument, trace};
|
use tracing::{instrument, trace};
|
||||||
use xds::istio::security::string_match::MatchType;
|
|
||||||
use xds::istio::security::Address as XdsAddress;
|
use xds::istio::security::Address as XdsAddress;
|
||||||
use xds::istio::security::Authorization as XdsRbac;
|
use xds::istio::security::Authorization as XdsRbac;
|
||||||
use xds::istio::security::Match;
|
use xds::istio::security::Match;
|
||||||
use xds::istio::security::ServiceAccountMatch as XdsServiceAccountMatch;
|
use xds::istio::security::ServiceAccountMatch as XdsServiceAccountMatch;
|
||||||
use xds::istio::security::StringMatch as XdsStringMatch;
|
use xds::istio::security::StringMatch as XdsStringMatch;
|
||||||
|
use xds::istio::security::string_match::MatchType;
|
||||||
|
|
||||||
use crate::identity::Identity;
|
use crate::identity::Identity;
|
||||||
|
|
||||||
use crate::state::workload::{byte_to_ip, WorkloadError};
|
use crate::state::workload::{WorkloadError, byte_to_ip};
|
||||||
use crate::strng::Strng;
|
use crate::strng::Strng;
|
||||||
use crate::{strng, xds};
|
use crate::{strng, xds};
|
||||||
|
|
||||||
|
@ -523,13 +523,15 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rbac_empty_policy() {
|
fn rbac_empty_policy() {
|
||||||
assert!(!allow_policy(
|
assert!(
|
||||||
"empty",
|
!allow_policy(
|
||||||
vec![vec![vec![RbacMatch {
|
"empty",
|
||||||
..Default::default()
|
vec![vec![vec![RbacMatch {
|
||||||
}]]]
|
..Default::default()
|
||||||
)
|
}]]]
|
||||||
.matches(&plaintext_conn()));
|
)
|
||||||
|
.matches(&plaintext_conn())
|
||||||
|
);
|
||||||
assert!(allow_policy("empty", vec![vec![vec![]]]).matches(&plaintext_conn()));
|
assert!(allow_policy("empty", vec![vec![vec![]]]).matches(&plaintext_conn()));
|
||||||
assert!(allow_policy("empty", vec![vec![]]).matches(&plaintext_conn()));
|
assert!(allow_policy("empty", vec![vec![]]).matches(&plaintext_conn()));
|
||||||
assert!(!allow_policy("empty", vec![]).matches(&plaintext_conn()));
|
assert!(!allow_policy("empty", vec![]).matches(&plaintext_conn()));
|
||||||
|
|
|
@ -66,7 +66,7 @@ impl ShutdownTrigger {
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
mod imp {
|
mod imp {
|
||||||
use std::process;
|
use std::process;
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
use tokio::signal::unix::{SignalKind, signal};
|
||||||
use tokio::sync::mpsc::Receiver;
|
use tokio::sync::mpsc::Receiver;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
|
|
250
src/state.rs
|
@ -21,8 +21,8 @@ use crate::state::service::{
|
||||||
};
|
};
|
||||||
use crate::state::service::{Service, ServiceDescription};
|
use crate::state::service::{Service, ServiceDescription};
|
||||||
use crate::state::workload::{
|
use crate::state::workload::{
|
||||||
address::Address, gatewayaddress::Destination, network_addr, GatewayAddress,
|
GatewayAddress, NamespacedHostname, NetworkAddress, Workload, WorkloadStore, address::Address,
|
||||||
NamespacedHostname, NetworkAddress, Workload, WorkloadStore,
|
gatewayaddress::Destination, network_addr,
|
||||||
};
|
};
|
||||||
use crate::strng::Strng;
|
use crate::strng::Strng;
|
||||||
use crate::tls;
|
use crate::tls;
|
||||||
|
@ -33,11 +33,12 @@ use crate::{cert_fetcher, config, rbac, xds};
|
||||||
use crate::{proxy, strng};
|
use crate::{proxy, strng};
|
||||||
use educe::Educe;
|
use educe::Educe;
|
||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
|
use hickory_resolver::TokioResolver;
|
||||||
use hickory_resolver::config::*;
|
use hickory_resolver::config::*;
|
||||||
use hickory_resolver::name_server::TokioConnectionProvider;
|
use hickory_resolver::name_server::TokioConnectionProvider;
|
||||||
use hickory_resolver::TokioAsyncResolver;
|
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use rand::prelude::IteratorRandom;
|
use rand::prelude::IteratorRandom;
|
||||||
|
use rand::seq::IndexedRandom;
|
||||||
use serde::Serializer;
|
use serde::Serializer;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::Into;
|
use std::convert::Into;
|
||||||
|
@ -61,7 +62,8 @@ pub struct Upstream {
|
||||||
pub workload: Arc<Workload>,
|
pub workload: Arc<Workload>,
|
||||||
/// selected_workload_ip defines the IP address we should actually use to connect to this workload
|
/// selected_workload_ip defines the IP address we should actually use to connect to this workload
|
||||||
/// This handles multiple IPs (dual stack) or Hostname destinations (DNS resolution)
|
/// This handles multiple IPs (dual stack) or Hostname destinations (DNS resolution)
|
||||||
pub selected_workload_ip: IpAddr,
|
/// The workload IP might be empty if we have to go through a network gateway.
|
||||||
|
pub selected_workload_ip: Option<IpAddr>,
|
||||||
/// Port is the port we should connect to
|
/// Port is the port we should connect to
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
/// Service SANs defines SANs defined at the service level *only*. A complete view of things requires
|
/// Service SANs defines SANs defined at the service level *only*. A complete view of things requires
|
||||||
|
@ -72,8 +74,9 @@ pub struct Upstream {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Upstream {
|
impl Upstream {
|
||||||
pub fn workload_socket_addr(&self) -> SocketAddr {
|
pub fn workload_socket_addr(&self) -> Option<SocketAddr> {
|
||||||
SocketAddr::new(self.selected_workload_ip, self.port)
|
self.selected_workload_ip
|
||||||
|
.map(|ip| SocketAddr::new(ip, self.port))
|
||||||
}
|
}
|
||||||
pub fn workload_and_services_san(&self) -> Vec<Identity> {
|
pub fn workload_and_services_san(&self) -> Vec<Identity> {
|
||||||
self.service_sans
|
self.service_sans
|
||||||
|
@ -88,6 +91,19 @@ impl Upstream {
|
||||||
.chain(std::iter::once(self.workload.identity()))
|
.chain(std::iter::once(self.workload.identity()))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn service_sans(&self) -> Vec<Identity> {
|
||||||
|
self.service_sans
|
||||||
|
.iter()
|
||||||
|
.flat_map(|san| match Identity::from_str(san) {
|
||||||
|
Ok(id) => Some(id),
|
||||||
|
Err(err) => {
|
||||||
|
warn!("ignoring invalid SAN {}: {}", san, err);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Workload information that a specific proxy instance represents. This is used to cross check
|
// Workload information that a specific proxy instance represents. This is used to cross check
|
||||||
|
@ -258,6 +274,14 @@ impl ProxyState {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Find services by hostname.
|
||||||
|
pub fn find_service_by_hostname(&self, hostname: &Strng) -> Result<Vec<Arc<Service>>, Error> {
|
||||||
|
// Hostnames for services are more common, so lookup service first and fallback to workload.
|
||||||
|
self.services
|
||||||
|
.get_by_host(hostname)
|
||||||
|
.ok_or_else(|| Error::NoHostname(hostname.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
fn find_upstream(
|
fn find_upstream(
|
||||||
&self,
|
&self,
|
||||||
network: Strng,
|
network: Strng,
|
||||||
|
@ -342,14 +366,26 @@ impl ProxyState {
|
||||||
debug!("failed to fetch workload for {}", ep.workload_uid);
|
debug!("failed to fetch workload for {}", ep.workload_uid);
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let in_network = wl.network == src.network;
|
||||||
|
let has_network_gateway = wl.network_gateway.is_some();
|
||||||
|
let has_address = !wl.workload_ips.is_empty() || !wl.hostname.is_empty();
|
||||||
|
if !has_address {
|
||||||
|
// Workload has no IP. We can only reach it via a network gateway
|
||||||
|
// WDS is client-agnostic, so we will get a network gateway for a workload
|
||||||
|
// even if it's in the same network; we should never use it.
|
||||||
|
if in_network || !has_network_gateway {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match resolution_mode {
|
match resolution_mode {
|
||||||
ServiceResolutionMode::Standard => {
|
ServiceResolutionMode::Standard => {
|
||||||
if target_port.unwrap_or_default() == 0 && !ep.port.contains_key(&svc_port) {
|
if target_port.unwrap_or_default() == 0 && !ep.port.contains_key(&svc_port) {
|
||||||
// Filter workload out, it doesn't have a matching port
|
// Filter workload out, it doesn't have a matching port
|
||||||
trace!(
|
trace!(
|
||||||
"filter endpoint {}, it does not have service port {}",
|
"filter endpoint {}, it does not have service port {}",
|
||||||
ep.workload_uid,
|
ep.workload_uid, svc_port
|
||||||
svc_port
|
|
||||||
);
|
);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -371,7 +407,7 @@ impl ProxyState {
|
||||||
Some((ep, wl))
|
Some((ep, wl))
|
||||||
});
|
});
|
||||||
|
|
||||||
match svc.load_balancer {
|
let options = match svc.load_balancer {
|
||||||
Some(ref lb) if lb.mode != LoadBalancerMode::Standard => {
|
Some(ref lb) if lb.mode != LoadBalancerMode::Standard => {
|
||||||
let ranks = endpoints
|
let ranks = endpoints
|
||||||
.filter_map(|(ep, wl)| {
|
.filter_map(|(ep, wl)| {
|
||||||
|
@ -410,14 +446,21 @@ impl ProxyState {
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let max = *ranks.iter().map(|(rank, _ep, _wl)| rank).max()?;
|
let max = *ranks.iter().map(|(rank, _ep, _wl)| rank).max()?;
|
||||||
ranks
|
let options: Vec<_> = ranks
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|(rank, _ep, _wl)| *rank == max)
|
.filter(|(rank, _ep, _wl)| *rank == max)
|
||||||
.map(|(_, ep, wl)| (ep, wl))
|
.map(|(_, ep, wl)| (ep, wl))
|
||||||
.choose(&mut rand::thread_rng())
|
.collect();
|
||||||
|
options
|
||||||
}
|
}
|
||||||
_ => endpoints.choose(&mut rand::thread_rng()),
|
_ => endpoints.collect(),
|
||||||
}
|
};
|
||||||
|
options
|
||||||
|
.choose_weighted(&mut rand::rng(), |(_, wl)| wl.capacity as u64)
|
||||||
|
// This can fail if there are no weights, the sum is zero (not possible in our API), or if it overflows
|
||||||
|
// The API has u32 but we sum into an u64, so it would take ~4 billion entries of max weight to overflow
|
||||||
|
.ok()
|
||||||
|
.cloned()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,7 +479,7 @@ pub struct DemandProxyState {
|
||||||
metrics: Arc<proxy::Metrics>,
|
metrics: Arc<proxy::Metrics>,
|
||||||
|
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
dns_resolver: TokioAsyncResolver,
|
dns_resolver: TokioResolver,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DemandProxyState {
|
impl DemandProxyState {
|
||||||
|
@ -457,11 +500,12 @@ impl DemandProxyState {
|
||||||
dns_resolver_opts: ResolverOpts,
|
dns_resolver_opts: ResolverOpts,
|
||||||
metrics: Arc<proxy::Metrics>,
|
metrics: Arc<proxy::Metrics>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let dns_resolver = TokioAsyncResolver::new(
|
let mut rb = hickory_resolver::Resolver::builder_with_config(
|
||||||
dns_resolver_cfg.to_owned(),
|
dns_resolver_cfg,
|
||||||
dns_resolver_opts.clone(),
|
|
||||||
TokioConnectionProvider::default(),
|
TokioConnectionProvider::default(),
|
||||||
);
|
);
|
||||||
|
*rb.options_mut() = dns_resolver_opts;
|
||||||
|
let dns_resolver = rb.build();
|
||||||
Self {
|
Self {
|
||||||
state,
|
state,
|
||||||
demand,
|
demand,
|
||||||
|
@ -480,7 +524,7 @@ impl DemandProxyState {
|
||||||
) -> Result<(), proxy::AuthorizationRejectionError> {
|
) -> Result<(), proxy::AuthorizationRejectionError> {
|
||||||
let wl = &ctx.dest_workload;
|
let wl = &ctx.dest_workload;
|
||||||
let conn = &ctx.conn;
|
let conn = &ctx.conn;
|
||||||
let state = self.state.read().unwrap();
|
let state = self.read();
|
||||||
|
|
||||||
// We can get policies from namespace, global, and workload...
|
// We can get policies from namespace, global, and workload...
|
||||||
let ns = state.policies.get_by_namespace(&wl.namespace);
|
let ns = state.policies.get_by_namespace(&wl.namespace);
|
||||||
|
@ -552,13 +596,13 @@ impl DemandProxyState {
|
||||||
src_workload: &Workload,
|
src_workload: &Workload,
|
||||||
original_target_address: SocketAddr,
|
original_target_address: SocketAddr,
|
||||||
ip_family_restriction: Option<IpFamily>,
|
ip_family_restriction: Option<IpFamily>,
|
||||||
) -> Result<IpAddr, Error> {
|
) -> Result<Option<IpAddr>, Error> {
|
||||||
// If the user requested the pod by a specific IP, use that directly.
|
// If the user requested the pod by a specific IP, use that directly.
|
||||||
if dst_workload
|
if dst_workload
|
||||||
.workload_ips
|
.workload_ips
|
||||||
.contains(&original_target_address.ip())
|
.contains(&original_target_address.ip())
|
||||||
{
|
{
|
||||||
return Ok(original_target_address.ip());
|
return Ok(Some(original_target_address.ip()));
|
||||||
}
|
}
|
||||||
// They may have 1 or 2 IPs (single/dual stack)
|
// They may have 1 or 2 IPs (single/dual stack)
|
||||||
// Ensure we are meeting the Service family restriction (if any is defined).
|
// Ensure we are meeting the Service family restriction (if any is defined).
|
||||||
|
@ -573,14 +617,19 @@ impl DemandProxyState {
|
||||||
})
|
})
|
||||||
.find_or_first(|ip| ip.is_ipv6() == original_target_address.is_ipv6())
|
.find_or_first(|ip| ip.is_ipv6() == original_target_address.is_ipv6())
|
||||||
{
|
{
|
||||||
return Ok(*ip);
|
return Ok(Some(*ip));
|
||||||
}
|
}
|
||||||
if dst_workload.hostname.is_empty() {
|
if dst_workload.hostname.is_empty() {
|
||||||
debug!(
|
if dst_workload.network_gateway.is_none() {
|
||||||
"workload {} has no suitable workload IPs for routing",
|
debug!(
|
||||||
dst_workload.name
|
"workload {} has no suitable workload IPs for routing",
|
||||||
);
|
dst_workload.name
|
||||||
return Err(Error::NoValidDestination(Box::new(dst_workload.clone())));
|
);
|
||||||
|
return Err(Error::NoValidDestination(Box::new(dst_workload.clone())));
|
||||||
|
} else {
|
||||||
|
// We can route through network gateway
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let ip = Box::pin(self.resolve_workload_address(
|
let ip = Box::pin(self.resolve_workload_address(
|
||||||
dst_workload,
|
dst_workload,
|
||||||
|
@ -588,7 +637,7 @@ impl DemandProxyState {
|
||||||
original_target_address,
|
original_target_address,
|
||||||
))
|
))
|
||||||
.await?;
|
.await?;
|
||||||
Ok(ip)
|
Ok(Some(ip))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn resolve_workload_address(
|
async fn resolve_workload_address(
|
||||||
|
@ -630,14 +679,14 @@ impl DemandProxyState {
|
||||||
let (matching, unmatching): (Vec<_>, Vec<_>) = resp
|
let (matching, unmatching): (Vec<_>, Vec<_>) = resp
|
||||||
.as_lookup()
|
.as_lookup()
|
||||||
.record_iter()
|
.record_iter()
|
||||||
.filter_map(|record| record.data().and_then(|d| d.ip_addr()))
|
.filter_map(|record| record.data().ip_addr())
|
||||||
.partition(|record| record.is_ipv6() == original_target_address.is_ipv6());
|
.partition(|record| record.is_ipv6() == original_target_address.is_ipv6());
|
||||||
// Randomly pick an IP, prefer to match the IP family of the downstream request.
|
// Randomly pick an IP, prefer to match the IP family of the downstream request.
|
||||||
// Without this, we run into trouble in pure v4 or pure v6 environments.
|
// Without this, we run into trouble in pure v4 or pure v6 environments.
|
||||||
matching
|
matching
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.choose(&mut rand::thread_rng())
|
.choose(&mut rand::rng())
|
||||||
.or_else(|| unmatching.into_iter().choose(&mut rand::thread_rng()))
|
.or_else(|| unmatching.into_iter().choose(&mut rand::rng()))
|
||||||
.ok_or_else(|| Error::EmptyResolvedAddresses(workload_uid.to_string()))
|
.ok_or_else(|| Error::EmptyResolvedAddresses(workload_uid.to_string()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -652,7 +701,7 @@ impl DemandProxyState {
|
||||||
debug!(%wl, "wait for workload");
|
debug!(%wl, "wait for workload");
|
||||||
|
|
||||||
// Take a watch listener *before* checking state (so we don't miss anything)
|
// Take a watch listener *before* checking state (so we don't miss anything)
|
||||||
let mut wl_sub = self.state.read().unwrap().workloads.new_subscriber();
|
let mut wl_sub = self.read().workloads.new_subscriber();
|
||||||
|
|
||||||
debug!(%wl, "got sub, waiting for workload");
|
debug!(%wl, "got sub, waiting for workload");
|
||||||
|
|
||||||
|
@ -683,7 +732,7 @@ impl DemandProxyState {
|
||||||
/// Finds the workload by workload information, as an arc.
|
/// Finds the workload by workload information, as an arc.
|
||||||
/// Note: this does not currently support on-demand.
|
/// Note: this does not currently support on-demand.
|
||||||
fn find_by_info(&self, wl: &WorkloadInfo) -> Option<Arc<Workload>> {
|
fn find_by_info(&self, wl: &WorkloadInfo) -> Option<Arc<Workload>> {
|
||||||
self.state.read().unwrap().workloads.find_by_info(wl)
|
self.read().workloads.find_by_info(wl)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch_workload_by_address looks up a Workload by address.
|
// fetch_workload_by_address looks up a Workload by address.
|
||||||
|
@ -693,21 +742,21 @@ impl DemandProxyState {
|
||||||
pub async fn fetch_workload_by_address(&self, addr: &NetworkAddress) -> Option<Arc<Workload>> {
|
pub async fn fetch_workload_by_address(&self, addr: &NetworkAddress) -> Option<Arc<Workload>> {
|
||||||
// Wait for it on-demand, *if* needed
|
// Wait for it on-demand, *if* needed
|
||||||
debug!(%addr, "fetch workload");
|
debug!(%addr, "fetch workload");
|
||||||
if let Some(wl) = self.state.read().unwrap().workloads.find_address(addr) {
|
if let Some(wl) = self.read().workloads.find_address(addr) {
|
||||||
return Some(wl);
|
return Some(wl);
|
||||||
}
|
}
|
||||||
if !self.supports_on_demand() {
|
if !self.supports_on_demand() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
self.fetch_on_demand(addr.to_string().into()).await;
|
self.fetch_on_demand(addr.to_string().into()).await;
|
||||||
self.state.read().unwrap().workloads.find_address(addr)
|
self.read().workloads.find_address(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// only support workload
|
// only support workload
|
||||||
pub async fn fetch_workload_by_uid(&self, uid: &Strng) -> Option<Arc<Workload>> {
|
pub async fn fetch_workload_by_uid(&self, uid: &Strng) -> Option<Arc<Workload>> {
|
||||||
// Wait for it on-demand, *if* needed
|
// Wait for it on-demand, *if* needed
|
||||||
debug!(%uid, "fetch workload");
|
debug!(%uid, "fetch workload");
|
||||||
if let Some(wl) = self.state.read().unwrap().workloads.find_uid(uid) {
|
if let Some(wl) = self.read().workloads.find_uid(uid) {
|
||||||
return Some(wl);
|
return Some(wl);
|
||||||
}
|
}
|
||||||
if !self.supports_on_demand() {
|
if !self.supports_on_demand() {
|
||||||
|
@ -766,6 +815,65 @@ impl DemandProxyState {
|
||||||
Ok(Some(res))
|
Ok(Some(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns destination address, upstream sans, and final sans, for
|
||||||
|
/// connecting to a remote workload through a gateway.
|
||||||
|
/// Would be nice to return this as an Upstream, but gateways don't necessarily
|
||||||
|
/// have workloads. That is, they could just be IPs without a corresponding workload.
|
||||||
|
pub async fn fetch_network_gateway(
|
||||||
|
&self,
|
||||||
|
gw_address: &GatewayAddress,
|
||||||
|
source_workload: &Workload,
|
||||||
|
original_destination_address: SocketAddr,
|
||||||
|
) -> Result<Upstream, Error> {
|
||||||
|
let (res, target_address) = match &gw_address.destination {
|
||||||
|
Destination::Address(ip) => {
|
||||||
|
let addr = SocketAddr::new(ip.address, gw_address.hbone_mtls_port);
|
||||||
|
let us = self.state.read().unwrap().find_upstream(
|
||||||
|
ip.network.clone(),
|
||||||
|
source_workload,
|
||||||
|
addr,
|
||||||
|
ServiceResolutionMode::Standard,
|
||||||
|
);
|
||||||
|
// If the workload references a network gateway by IP, use that IP as the destination.
|
||||||
|
// Note this means that an IPv6 call may be translated to IPv4 if the network
|
||||||
|
// gateway is specified as an IPv4 address.
|
||||||
|
// For this reason, the Hostname method is preferred which can adapt to the callers IP family.
|
||||||
|
(us, addr)
|
||||||
|
}
|
||||||
|
Destination::Hostname(host) => {
|
||||||
|
let state = self.read();
|
||||||
|
match state.find_hostname(host) {
|
||||||
|
Some(Address::Service(s)) => {
|
||||||
|
let us = state.find_upstream_from_service(
|
||||||
|
source_workload,
|
||||||
|
gw_address.hbone_mtls_port,
|
||||||
|
ServiceResolutionMode::Standard,
|
||||||
|
s,
|
||||||
|
);
|
||||||
|
// For hostname, use the original_destination_address as the target so we can
|
||||||
|
// adapt to the callers IP family.
|
||||||
|
(us, original_destination_address)
|
||||||
|
}
|
||||||
|
Some(Address::Workload(w)) => {
|
||||||
|
let us = Some((w, gw_address.hbone_mtls_port, None));
|
||||||
|
(us, original_destination_address)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
return Err(Error::UnknownNetworkGateway(format!(
|
||||||
|
"network gateway {} not found",
|
||||||
|
host.hostname
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.finalize_upstream(source_workload, target_address, res)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::UnknownNetworkGateway(format!("network gateway {gw_address:?} not found"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
async fn fetch_waypoint(
|
async fn fetch_waypoint(
|
||||||
&self,
|
&self,
|
||||||
gw_address: &GatewayAddress,
|
gw_address: &GatewayAddress,
|
||||||
|
@ -777,7 +885,7 @@ impl DemandProxyState {
|
||||||
let (res, target_address) = match &gw_address.destination {
|
let (res, target_address) = match &gw_address.destination {
|
||||||
Destination::Address(ip) => {
|
Destination::Address(ip) => {
|
||||||
let addr = SocketAddr::new(ip.address, gw_address.hbone_mtls_port);
|
let addr = SocketAddr::new(ip.address, gw_address.hbone_mtls_port);
|
||||||
let us = self.state.read().unwrap().find_upstream(
|
let us = self.read().find_upstream(
|
||||||
ip.network.clone(),
|
ip.network.clone(),
|
||||||
source_workload,
|
source_workload,
|
||||||
addr,
|
addr,
|
||||||
|
@ -811,14 +919,14 @@ impl DemandProxyState {
|
||||||
return Err(Error::UnknownWaypoint(format!(
|
return Err(Error::UnknownWaypoint(format!(
|
||||||
"waypoint {} not found",
|
"waypoint {} not found",
|
||||||
host.hostname
|
host.hostname
|
||||||
)))
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
self.finalize_upstream(source_workload, target_address, res)
|
self.finalize_upstream(source_workload, target_address, res)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| Error::UnknownWaypoint(format!("waypoint {:?} not found", gw_address)))
|
.ok_or_else(|| Error::UnknownWaypoint(format!("waypoint {gw_address:?} not found")))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn fetch_service_waypoint(
|
pub async fn fetch_service_waypoint(
|
||||||
|
@ -865,7 +973,7 @@ impl DemandProxyState {
|
||||||
pub async fn fetch_address(&self, network_addr: &NetworkAddress) -> Option<Address> {
|
pub async fn fetch_address(&self, network_addr: &NetworkAddress) -> Option<Address> {
|
||||||
// Wait for it on-demand, *if* needed
|
// Wait for it on-demand, *if* needed
|
||||||
debug!(%network_addr.address, "fetch address");
|
debug!(%network_addr.address, "fetch address");
|
||||||
if let Some(address) = self.state.read().unwrap().find_address(network_addr) {
|
if let Some(address) = self.read().find_address(network_addr) {
|
||||||
return Some(address);
|
return Some(address);
|
||||||
}
|
}
|
||||||
if !self.supports_on_demand() {
|
if !self.supports_on_demand() {
|
||||||
|
@ -873,7 +981,7 @@ impl DemandProxyState {
|
||||||
}
|
}
|
||||||
// if both cache not found, start on demand fetch
|
// if both cache not found, start on demand fetch
|
||||||
self.fetch_on_demand(network_addr.to_string().into()).await;
|
self.fetch_on_demand(network_addr.to_string().into()).await;
|
||||||
self.state.read().unwrap().find_address(network_addr)
|
self.read().find_address(network_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Looks for the given hostname to find either a workload or service by IP. If not found
|
/// Looks for the given hostname to find either a workload or service by IP. If not found
|
||||||
|
@ -881,7 +989,7 @@ impl DemandProxyState {
|
||||||
async fn fetch_hostname(&self, hostname: &NamespacedHostname) -> Option<Address> {
|
async fn fetch_hostname(&self, hostname: &NamespacedHostname) -> Option<Address> {
|
||||||
// Wait for it on-demand, *if* needed
|
// Wait for it on-demand, *if* needed
|
||||||
debug!(%hostname, "fetch hostname");
|
debug!(%hostname, "fetch hostname");
|
||||||
if let Some(address) = self.state.read().unwrap().find_hostname(hostname) {
|
if let Some(address) = self.read().find_hostname(hostname) {
|
||||||
return Some(address);
|
return Some(address);
|
||||||
}
|
}
|
||||||
if !self.supports_on_demand() {
|
if !self.supports_on_demand() {
|
||||||
|
@ -889,7 +997,7 @@ impl DemandProxyState {
|
||||||
}
|
}
|
||||||
// if both cache not found, start on demand fetch
|
// if both cache not found, start on demand fetch
|
||||||
self.fetch_on_demand(hostname.to_string().into()).await;
|
self.fetch_on_demand(hostname.to_string().into()).await;
|
||||||
self.state.read().unwrap().find_hostname(hostname)
|
self.read().find_hostname(hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn supports_on_demand(&self) -> bool {
|
pub fn supports_on_demand(&self) -> bool {
|
||||||
|
@ -996,7 +1104,7 @@ mod tests {
|
||||||
use rbac::StringMatch;
|
use rbac::StringMatch;
|
||||||
use std::{net::Ipv4Addr, net::SocketAddrV4, time::Duration};
|
use std::{net::Ipv4Addr, net::SocketAddrV4, time::Duration};
|
||||||
|
|
||||||
use self::workload::{application_tunnel::Protocol as AppProtocol, ApplicationTunnel};
|
use self::workload::{ApplicationTunnel, application_tunnel::Protocol as AppProtocol};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_helpers::helpers::initialize_telemetry;
|
use crate::test_helpers::helpers::initialize_telemetry;
|
||||||
|
@ -1270,17 +1378,17 @@ mod tests {
|
||||||
fn create_workload(dest_uid: u8) -> Workload {
|
fn create_workload(dest_uid: u8) -> Workload {
|
||||||
Workload {
|
Workload {
|
||||||
name: "test".into(),
|
name: "test".into(),
|
||||||
namespace: format!("ns{}", dest_uid).into(),
|
namespace: format!("ns{dest_uid}").into(),
|
||||||
trust_domain: "cluster.local".into(),
|
trust_domain: "cluster.local".into(),
|
||||||
service_account: "defaultacct".into(),
|
service_account: "defaultacct".into(),
|
||||||
workload_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 0, dest_uid))],
|
workload_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 0, dest_uid))],
|
||||||
uid: format!("{}", dest_uid).into(),
|
uid: format!("{dest_uid}").into(),
|
||||||
..test_helpers::test_default_workload()
|
..test_helpers::test_default_workload()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_workload(state: &DemandProxyState, dest_uid: u8) -> Arc<Workload> {
|
fn get_workload(state: &DemandProxyState, dest_uid: u8) -> Arc<Workload> {
|
||||||
let key: Strng = format!("{}", dest_uid).into();
|
let key: Strng = format!("{dest_uid}").into();
|
||||||
state.read().workloads.by_uid[&key].clone()
|
state.read().workloads.by_uid[&key].clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1289,7 +1397,7 @@ mod tests {
|
||||||
dest_uid: u8,
|
dest_uid: u8,
|
||||||
src_svc_acct: &str,
|
src_svc_acct: &str,
|
||||||
) -> crate::state::ProxyRbacContext {
|
) -> crate::state::ProxyRbacContext {
|
||||||
let key: Strng = format!("{}", dest_uid).into();
|
let key: Strng = format!("{dest_uid}").into();
|
||||||
let workload = &state.read().workloads.by_uid[&key];
|
let workload = &state.read().workloads.by_uid[&key];
|
||||||
crate::state::ProxyRbacContext {
|
crate::state::ProxyRbacContext {
|
||||||
conn: rbac::Connection {
|
conn: rbac::Connection {
|
||||||
|
@ -1379,10 +1487,12 @@ mod tests {
|
||||||
|
|
||||||
// test workload in ns2. this should work as ns2 doesn't have any policies. this tests:
|
// test workload in ns2. this should work as ns2 doesn't have any policies. this tests:
|
||||||
// 3. If there are no ALLOW policies for the workload, allow the request.
|
// 3. If there are no ALLOW policies for the workload, allow the request.
|
||||||
assert!(mock_proxy_state
|
assert!(
|
||||||
.assert_rbac(&get_rbac_context(&mock_proxy_state, 2, "not-defaultacct"))
|
mock_proxy_state
|
||||||
.await
|
.assert_rbac(&get_rbac_context(&mock_proxy_state, 2, "not-defaultacct"))
|
||||||
.is_ok());
|
.await
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
|
||||||
let ctx = get_rbac_context(&mock_proxy_state, 1, "defaultacct");
|
let ctx = get_rbac_context(&mock_proxy_state, 1, "defaultacct");
|
||||||
// 4. if any allow policies match, allow
|
// 4. if any allow policies match, allow
|
||||||
|
@ -1474,6 +1584,22 @@ mod tests {
|
||||||
},
|
},
|
||||||
..test_helpers::test_default_workload()
|
..test_helpers::test_default_workload()
|
||||||
};
|
};
|
||||||
|
let wl_empty_ip = Workload {
|
||||||
|
uid: "cluster1//v1/Pod/default/wl_empty_ip".into(),
|
||||||
|
name: "wl_empty_ip".into(),
|
||||||
|
namespace: "default".into(),
|
||||||
|
trust_domain: "cluster.local".into(),
|
||||||
|
service_account: "default".into(),
|
||||||
|
workload_ips: vec![], // none!
|
||||||
|
network: "network".into(),
|
||||||
|
locality: Locality {
|
||||||
|
region: "reg".into(),
|
||||||
|
zone: "zone".into(),
|
||||||
|
subzone: "".into(),
|
||||||
|
},
|
||||||
|
..test_helpers::test_default_workload()
|
||||||
|
};
|
||||||
|
|
||||||
let _ep_almost = Workload {
|
let _ep_almost = Workload {
|
||||||
uid: "cluster1//v1/Pod/default/ep_almost".into(),
|
uid: "cluster1//v1/Pod/default/ep_almost".into(),
|
||||||
name: "wl_almost".into(),
|
name: "wl_almost".into(),
|
||||||
|
@ -1520,6 +1646,11 @@ mod tests {
|
||||||
port: HashMap::from([(80u16, 80u16)]),
|
port: HashMap::from([(80u16, 80u16)]),
|
||||||
status: HealthStatus::Healthy,
|
status: HealthStatus::Healthy,
|
||||||
},
|
},
|
||||||
|
Endpoint {
|
||||||
|
workload_uid: "cluster1//v1/Pod/default/wl_empty_ip".into(),
|
||||||
|
port: HashMap::from([(80u16, 80u16)]),
|
||||||
|
status: HealthStatus::Healthy,
|
||||||
|
},
|
||||||
]);
|
]);
|
||||||
let strict_svc = Service {
|
let strict_svc = Service {
|
||||||
endpoints: endpoints.clone(),
|
endpoints: endpoints.clone(),
|
||||||
|
@ -1552,6 +1683,7 @@ mod tests {
|
||||||
state.workloads.insert(Arc::new(wl_no_locality.clone()));
|
state.workloads.insert(Arc::new(wl_no_locality.clone()));
|
||||||
state.workloads.insert(Arc::new(wl_match.clone()));
|
state.workloads.insert(Arc::new(wl_match.clone()));
|
||||||
state.workloads.insert(Arc::new(wl_almost.clone()));
|
state.workloads.insert(Arc::new(wl_almost.clone()));
|
||||||
|
state.workloads.insert(Arc::new(wl_empty_ip.clone()));
|
||||||
state.services.insert(strict_svc.clone());
|
state.services.insert(strict_svc.clone());
|
||||||
state.services.insert(failover_svc.clone());
|
state.services.insert(failover_svc.clone());
|
||||||
|
|
||||||
|
@ -1566,6 +1698,15 @@ mod tests {
|
||||||
assert!(want.contains(&got.unwrap()), "{}", desc);
|
assert!(want.contains(&got.unwrap()), "{}", desc);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
let assert_not_endpoint =
|
||||||
|
|src: &Workload, svc: &Service, uid: &str, tries: usize, desc: &str| {
|
||||||
|
for _ in 0..tries {
|
||||||
|
let got = state
|
||||||
|
.load_balance(src, svc, 80, ServiceResolutionMode::Standard)
|
||||||
|
.map(|(ep, _)| ep.workload_uid.as_str());
|
||||||
|
assert!(got != Some(uid), "{}", desc);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
assert_endpoint(
|
assert_endpoint(
|
||||||
&wl_no_locality,
|
&wl_no_locality,
|
||||||
|
@ -1611,5 +1752,12 @@ mod tests {
|
||||||
vec!["cluster1//v1/Pod/default/wl_match"],
|
vec!["cluster1//v1/Pod/default/wl_match"],
|
||||||
"failover full match selects closest match",
|
"failover full match selects closest match",
|
||||||
);
|
);
|
||||||
|
assert_not_endpoint(
|
||||||
|
&wl_no_locality,
|
||||||
|
&failover_svc,
|
||||||
|
"cluster1//v1/Pod/default/wl_empty_ip",
|
||||||
|
10,
|
||||||
|
"failover no match can select any endpoint",
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,17 +17,16 @@ use itertools::Itertools;
|
||||||
use serde::{Deserializer, Serializer};
|
use serde::{Deserializer, Serializer};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tracing::trace;
|
use tracing::trace;
|
||||||
|
|
||||||
use xds::istio::workload::Service as XdsService;
|
use xds::istio::workload::Service as XdsService;
|
||||||
|
|
||||||
use crate::state::workload::{
|
use crate::state::workload::{
|
||||||
byte_to_ip, network_addr, GatewayAddress, NamespacedHostname, NetworkAddress, Workload,
|
GatewayAddress, NamespacedHostname, NetworkAddress, Workload, WorkloadError, byte_to_ip,
|
||||||
WorkloadError,
|
network_addr,
|
||||||
};
|
};
|
||||||
use crate::state::workload::{is_default, HealthStatus};
|
use crate::state::workload::{HealthStatus, is_default};
|
||||||
use crate::strng::Strng;
|
use crate::strng::Strng;
|
||||||
use crate::xds::istio::workload::load_balancing::Scope as XdsScope;
|
use crate::xds::istio::workload::load_balancing::Scope as XdsScope;
|
||||||
use crate::xds::istio::workload::{IpFamilies, PortList};
|
use crate::xds::istio::workload::{IpFamilies, PortList};
|
||||||
|
@ -358,13 +357,8 @@ impl ServiceStore {
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `hostname` - the hostname of the service.
|
/// * `hostname` - the hostname of the service.
|
||||||
pub fn get_by_host(&self, hostname: &Strng) -> Option<Vec<Service>> {
|
pub fn get_by_host(&self, hostname: &Strng) -> Option<Vec<Arc<Service>>> {
|
||||||
self.by_host.get(hostname).map(|services| {
|
self.by_host.get(hostname).map(|v| v.to_vec())
|
||||||
services
|
|
||||||
.iter()
|
|
||||||
.map(|service| service.deref().clone())
|
|
||||||
.collect()
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_by_workload(&self, workload: &Workload) -> Vec<Arc<Service>> {
|
pub fn get_by_workload(&self, workload: &Workload) -> Vec<Arc<Service>> {
|
||||||
|
|
|
@ -19,11 +19,11 @@ use crate::strng::Strng;
|
||||||
use crate::xds::istio::workload::{Port, PortList};
|
use crate::xds::istio::workload::{Port, PortList};
|
||||||
use crate::{strng, xds};
|
use crate::{strng, xds};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use serde::de::Visitor;
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde::Deserializer;
|
use serde::Deserializer;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde::Serializer;
|
use serde::Serializer;
|
||||||
|
use serde::de::Visitor;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::convert::Into;
|
use std::convert::Into;
|
||||||
|
@ -41,20 +41,62 @@ use xds::istio::workload::ApplicationTunnel as XdsApplicationTunnel;
|
||||||
use xds::istio::workload::GatewayAddress as XdsGatewayAddress;
|
use xds::istio::workload::GatewayAddress as XdsGatewayAddress;
|
||||||
use xds::istio::workload::Workload as XdsWorkload;
|
use xds::istio::workload::Workload as XdsWorkload;
|
||||||
|
|
||||||
|
// The protocol that the final workload expects
|
||||||
#[derive(
|
#[derive(
|
||||||
Default, Debug, Hash, Eq, PartialEq, Clone, Copy, serde::Serialize, serde::Deserialize,
|
Default,
|
||||||
|
Debug,
|
||||||
|
Hash,
|
||||||
|
Eq,
|
||||||
|
PartialEq,
|
||||||
|
Ord,
|
||||||
|
PartialOrd,
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
serde::Serialize,
|
||||||
|
serde::Deserialize,
|
||||||
)]
|
)]
|
||||||
pub enum Protocol {
|
pub enum InboundProtocol {
|
||||||
#[default]
|
#[default]
|
||||||
TCP,
|
TCP,
|
||||||
HBONE,
|
HBONE,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<xds::istio::workload::TunnelProtocol> for Protocol {
|
impl From<xds::istio::workload::TunnelProtocol> for InboundProtocol {
|
||||||
fn from(value: xds::istio::workload::TunnelProtocol) -> Self {
|
fn from(value: xds::istio::workload::TunnelProtocol) -> Self {
|
||||||
match value {
|
match value {
|
||||||
xds::istio::workload::TunnelProtocol::Hbone => Protocol::HBONE,
|
xds::istio::workload::TunnelProtocol::Hbone => InboundProtocol::HBONE,
|
||||||
xds::istio::workload::TunnelProtocol::None => Protocol::TCP,
|
xds::istio::workload::TunnelProtocol::None => InboundProtocol::TCP,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The protocol that the sender should use to send data. Can be different from ServerProtocol when there is a
|
||||||
|
// proxy in the middle (e.g. e/w gateway with double hbone).
|
||||||
|
#[derive(
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
Hash,
|
||||||
|
Eq,
|
||||||
|
PartialEq,
|
||||||
|
Ord,
|
||||||
|
PartialOrd,
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
serde::Serialize,
|
||||||
|
serde::Deserialize,
|
||||||
|
)]
|
||||||
|
pub enum OutboundProtocol {
|
||||||
|
#[default]
|
||||||
|
TCP,
|
||||||
|
HBONE,
|
||||||
|
DOUBLEHBONE,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<InboundProtocol> for OutboundProtocol {
|
||||||
|
fn from(value: InboundProtocol) -> Self {
|
||||||
|
match value {
|
||||||
|
InboundProtocol::HBONE => OutboundProtocol::HBONE,
|
||||||
|
InboundProtocol::TCP => OutboundProtocol::TCP,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -145,6 +187,15 @@ pub mod application_tunnel {
|
||||||
PROXY,
|
PROXY,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Protocol {
|
||||||
|
pub fn supports_localhost_send(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
Protocol::NONE => false,
|
||||||
|
Protocol::PROXY => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<XdsProtocol> for Protocol {
|
impl From<XdsProtocol> for Protocol {
|
||||||
fn from(value: XdsProtocol) -> Self {
|
fn from(value: XdsProtocol) -> Self {
|
||||||
match value {
|
match value {
|
||||||
|
@ -179,7 +230,7 @@ pub struct Workload {
|
||||||
pub network_gateway: Option<GatewayAddress>,
|
pub network_gateway: Option<GatewayAddress>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub protocol: Protocol,
|
pub protocol: InboundProtocol,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub network_mode: NetworkMode,
|
pub network_mode: NetworkMode,
|
||||||
|
|
||||||
|
@ -229,6 +280,13 @@ pub struct Workload {
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "is_default")]
|
#[serde(default, skip_serializing_if = "is_default")]
|
||||||
pub services: Vec<NamespacedHostname>,
|
pub services: Vec<NamespacedHostname>,
|
||||||
|
|
||||||
|
#[serde(default = "default_capacity")]
|
||||||
|
pub capacity: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_capacity() -> u32 {
|
||||||
|
1
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_default<T: Default + PartialEq>(t: &T) -> bool {
|
pub fn is_default<T: Default + PartialEq>(t: &T) -> bool {
|
||||||
|
@ -392,7 +450,7 @@ impl TryFrom<XdsWorkload> for (Workload, HashMap<String, PortList>) {
|
||||||
waypoint: wp,
|
waypoint: wp,
|
||||||
network_gateway: network_gw,
|
network_gateway: network_gw,
|
||||||
|
|
||||||
protocol: Protocol::from(xds::istio::workload::TunnelProtocol::try_from(
|
protocol: InboundProtocol::from(xds::istio::workload::TunnelProtocol::try_from(
|
||||||
resource.tunnel_protocol,
|
resource.tunnel_protocol,
|
||||||
)?),
|
)?),
|
||||||
network_mode: NetworkMode::from(xds::istio::workload::NetworkMode::try_from(
|
network_mode: NetworkMode::from(xds::istio::workload::NetworkMode::try_from(
|
||||||
|
@ -450,6 +508,7 @@ impl TryFrom<XdsWorkload> for (Workload, HashMap<String, PortList>) {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
capacity: resource.capacity.unwrap_or(1),
|
||||||
services,
|
services,
|
||||||
};
|
};
|
||||||
// Return back part we did not use (service) so it can be consumed without cloning
|
// Return back part we did not use (service) so it can be consumed without cloning
|
||||||
|
@ -696,7 +755,7 @@ impl WorkloadByAddr {
|
||||||
let is_pod = w.uid.contains("//Pod/");
|
let is_pod = w.uid.contains("//Pod/");
|
||||||
// We fallback to looking for HBONE -- a resource marked as in the mesh is likely
|
// We fallback to looking for HBONE -- a resource marked as in the mesh is likely
|
||||||
// to have more useful context than one not in the mesh.
|
// to have more useful context than one not in the mesh.
|
||||||
let is_hbone = w.protocol == Protocol::HBONE;
|
let is_hbone = w.protocol == InboundProtocol::HBONE;
|
||||||
match (is_pod, is_hbone) {
|
match (is_pod, is_hbone) {
|
||||||
(true, true) => 3,
|
(true, true) => 3,
|
||||||
(true, false) => 2,
|
(true, false) => 2,
|
||||||
|
@ -849,11 +908,11 @@ mod tests {
|
||||||
use crate::config::ConfigSource;
|
use crate::config::ConfigSource;
|
||||||
use crate::state::{DemandProxyState, ProxyState, ServiceResolutionMode};
|
use crate::state::{DemandProxyState, ProxyState, ServiceResolutionMode};
|
||||||
use crate::test_helpers::helpers::initialize_telemetry;
|
use crate::test_helpers::helpers::initialize_telemetry;
|
||||||
use crate::xds::istio::workload::load_balancing::HealthPolicy;
|
|
||||||
use crate::xds::istio::workload::PortList as XdsPortList;
|
use crate::xds::istio::workload::PortList as XdsPortList;
|
||||||
use crate::xds::istio::workload::Service as XdsService;
|
use crate::xds::istio::workload::Service as XdsService;
|
||||||
use crate::xds::istio::workload::WorkloadStatus as XdsStatus;
|
use crate::xds::istio::workload::WorkloadStatus as XdsStatus;
|
||||||
use crate::xds::istio::workload::WorkloadStatus;
|
use crate::xds::istio::workload::WorkloadStatus;
|
||||||
|
use crate::xds::istio::workload::load_balancing::HealthPolicy;
|
||||||
use crate::xds::istio::workload::{LoadBalancing, Port as XdsPort};
|
use crate::xds::istio::workload::{LoadBalancing, Port as XdsPort};
|
||||||
use crate::xds::{LocalClient, ProxyStateUpdateMutator};
|
use crate::xds::{LocalClient, ProxyStateUpdateMutator};
|
||||||
use crate::{cert_fetcher, test_helpers};
|
use crate::{cert_fetcher, test_helpers};
|
||||||
|
@ -969,8 +1028,8 @@ mod tests {
|
||||||
},
|
},
|
||||||
)]);
|
)]);
|
||||||
|
|
||||||
let uid1 = format!("cluster1//v1/Pod/default/my-pod/{:?}", ip1);
|
let uid1 = format!("cluster1//v1/Pod/default/my-pod/{ip1:?}");
|
||||||
let uid2 = format!("cluster1//v1/Pod/default/my-pod/{:?}", ip2);
|
let uid2 = format!("cluster1//v1/Pod/default/my-pod/{ip2:?}");
|
||||||
|
|
||||||
updater
|
updater
|
||||||
.insert_workload(
|
.insert_workload(
|
||||||
|
@ -1675,7 +1734,7 @@ mod tests {
|
||||||
|
|
||||||
let xds_ip1 = Bytes::copy_from_slice(&[127, 0, 0, 1]);
|
let xds_ip1 = Bytes::copy_from_slice(&[127, 0, 0, 1]);
|
||||||
let ip1 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
|
let ip1 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
|
||||||
let uid1 = format!("cluster1//v1/Pod/default/my-pod/{:?}", ip1);
|
let uid1 = format!("cluster1//v1/Pod/default/my-pod/{ip1:?}");
|
||||||
|
|
||||||
let services = HashMap::from([(
|
let services = HashMap::from([(
|
||||||
"ns/svc1.ns.svc.cluster.local".to_string(),
|
"ns/svc1.ns.svc.cluster.local".to_string(),
|
||||||
|
|
|
@ -20,15 +20,15 @@ use std::{env, fmt, io};
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use once_cell::sync::OnceCell;
|
use once_cell::sync::OnceCell;
|
||||||
use serde::ser::SerializeMap;
|
|
||||||
use serde::Serializer;
|
use serde::Serializer;
|
||||||
|
use serde::ser::SerializeMap;
|
||||||
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tracing::{error, field, info, warn, Event, Subscriber};
|
use tracing::{Event, Subscriber, error, field, info, warn};
|
||||||
use tracing_appender::non_blocking::NonBlocking;
|
use tracing_appender::non_blocking::NonBlocking;
|
||||||
|
use tracing_core::Field;
|
||||||
use tracing_core::field::Visit;
|
use tracing_core::field::Visit;
|
||||||
use tracing_core::span::Record;
|
use tracing_core::span::Record;
|
||||||
use tracing_core::Field;
|
|
||||||
use tracing_log::NormalizeEvent;
|
use tracing_log::NormalizeEvent;
|
||||||
|
|
||||||
use tracing_subscriber::fmt::format::{JsonVisitor, Writer};
|
use tracing_subscriber::fmt::format::{JsonVisitor, Writer};
|
||||||
|
@ -37,7 +37,7 @@ use tracing_subscriber::field::RecordFields;
|
||||||
use tracing_subscriber::fmt::time::{FormatTime, SystemTime};
|
use tracing_subscriber::fmt::time::{FormatTime, SystemTime};
|
||||||
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields};
|
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields};
|
||||||
use tracing_subscriber::registry::LookupSpan;
|
use tracing_subscriber::registry::LookupSpan;
|
||||||
use tracing_subscriber::{filter, prelude::*, reload, Layer, Registry};
|
use tracing_subscriber::{Layer, Registry, filter, prelude::*, reload};
|
||||||
|
|
||||||
pub static APPLICATION_START_TIME: Lazy<Instant> = Lazy::new(Instant::now);
|
pub static APPLICATION_START_TIME: Lazy<Instant> = Lazy::new(Instant::now);
|
||||||
static LOG_HANDLE: OnceCell<LogHandle> = OnceCell::new();
|
static LOG_HANDLE: OnceCell<LogHandle> = OnceCell::new();
|
||||||
|
@ -170,7 +170,7 @@ impl Visitor<'_> {
|
||||||
} else {
|
} else {
|
||||||
" "
|
" "
|
||||||
};
|
};
|
||||||
write!(self.writer, "{}{:?}", padding, value)
|
write!(self.writer, "{padding}{value:?}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,9 +188,9 @@ impl field::Visit for Visitor<'_> {
|
||||||
// Skip fields that are actually log metadata that have already been handled
|
// Skip fields that are actually log metadata that have already been handled
|
||||||
name if name.starts_with("log.") => Ok(()),
|
name if name.starts_with("log.") => Ok(()),
|
||||||
// For the message, write out the message and a tab to separate the future fields
|
// For the message, write out the message and a tab to separate the future fields
|
||||||
"message" => write!(self.writer, "{:?}\t", val),
|
"message" => write!(self.writer, "{val:?}\t"),
|
||||||
// For the rest, k=v.
|
// For the rest, k=v.
|
||||||
_ => self.write_padded(&format_args!("{}={:?}", field.name(), val)),
|
_ => self.write_padded(&format_args!("{}={val:?}", field.name())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -234,7 +234,7 @@ where
|
||||||
let target = meta.target();
|
let target = meta.target();
|
||||||
// No need to prefix everything
|
// No need to prefix everything
|
||||||
let target = target.strip_prefix("ztunnel::").unwrap_or(target);
|
let target = target.strip_prefix("ztunnel::").unwrap_or(target);
|
||||||
write!(writer, "{}", target)?;
|
write!(writer, "{target}")?;
|
||||||
|
|
||||||
// Write out span fields. Istio logging outside of Rust doesn't really have this concept
|
// Write out span fields. Istio logging outside of Rust doesn't really have this concept
|
||||||
if let Some(scope) = ctx.event_scope() {
|
if let Some(scope) = ctx.event_scope() {
|
||||||
|
@ -243,7 +243,7 @@ where
|
||||||
let ext = span.extensions();
|
let ext = span.extensions();
|
||||||
if let Some(fields) = &ext.get::<FormattedFields<N>>() {
|
if let Some(fields) = &ext.get::<FormattedFields<N>>() {
|
||||||
if !fields.is_empty() {
|
if !fields.is_empty() {
|
||||||
write!(writer, "{{{}}}", fields)?;
|
write!(writer, "{{{fields}}}")?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,7 +285,7 @@ impl<S: SerializeMap> Visit for JsonVisitory<S> {
|
||||||
if self.state.is_ok() {
|
if self.state.is_ok() {
|
||||||
self.state = self
|
self.state = self
|
||||||
.serializer
|
.serializer
|
||||||
.serialize_entry(field.name(), &format_args!("{:?}", value))
|
.serialize_entry(field.name(), &format_args!("{value:?}"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,9 +326,7 @@ impl io::Write for WriteAdaptor<'_> {
|
||||||
let s =
|
let s =
|
||||||
std::str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
std::str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
||||||
|
|
||||||
self.fmt_write
|
self.fmt_write.write_str(s).map_err(io::Error::other)?;
|
||||||
.write_str(s)
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
|
||||||
|
|
||||||
Ok(s.len())
|
Ok(s.len())
|
||||||
}
|
}
|
||||||
|
@ -412,7 +410,7 @@ impl<'a> FormatFields<'a> for IstioJsonFormat {
|
||||||
/// Inspired by https://github.com/dbrgn/tracing-test
|
/// Inspired by https://github.com/dbrgn/tracing-test
|
||||||
#[cfg(any(test, feature = "testing"))]
|
#[cfg(any(test, feature = "testing"))]
|
||||||
pub mod testing {
|
pub mod testing {
|
||||||
use crate::telemetry::{fmt_layer, IstioJsonFormat, APPLICATION_START_TIME};
|
use crate::telemetry::{APPLICATION_START_TIME, IstioJsonFormat, fmt_layer};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
@ -507,7 +505,7 @@ pub mod testing {
|
||||||
.map(|h| {
|
.map(|h| {
|
||||||
h.iter()
|
h.iter()
|
||||||
.sorted_by_key(|(k, _)| *k)
|
.sorted_by_key(|(k, _)| *k)
|
||||||
.map(|(k, err)| format!("{}:{}", k, err))
|
.map(|(k, err)| format!("{k}:{err}"))
|
||||||
.join("\n")
|
.join("\n")
|
||||||
})
|
})
|
||||||
.join("\n\n");
|
.join("\n\n");
|
||||||
|
|
|
@ -15,17 +15,17 @@
|
||||||
use crate::config::ConfigSource;
|
use crate::config::ConfigSource;
|
||||||
use crate::config::{self, RootCert};
|
use crate::config::{self, RootCert};
|
||||||
use crate::state::service::{Endpoint, EndpointSet, Service};
|
use crate::state::service::{Endpoint, EndpointSet, Service};
|
||||||
use crate::state::workload::Protocol::{HBONE, TCP};
|
use crate::state::workload::InboundProtocol::{HBONE, TCP};
|
||||||
use crate::state::workload::{
|
use crate::state::workload::{
|
||||||
gatewayaddress, GatewayAddress, NamespacedHostname, NetworkAddress, Workload,
|
GatewayAddress, NamespacedHostname, NetworkAddress, Workload, gatewayaddress,
|
||||||
};
|
};
|
||||||
use crate::state::workload::{HealthStatus, Protocol};
|
use crate::state::workload::{HealthStatus, InboundProtocol};
|
||||||
use crate::state::{DemandProxyState, ProxyState};
|
use crate::state::{DemandProxyState, ProxyState};
|
||||||
use crate::xds::istio::security::Authorization as XdsAuthorization;
|
use crate::xds::istio::security::Authorization as XdsAuthorization;
|
||||||
use crate::xds::istio::workload::address;
|
|
||||||
use crate::xds::istio::workload::Address as XdsAddress;
|
use crate::xds::istio::workload::Address as XdsAddress;
|
||||||
use crate::xds::istio::workload::Service as XdsService;
|
use crate::xds::istio::workload::Service as XdsService;
|
||||||
use crate::xds::istio::workload::Workload as XdsWorkload;
|
use crate::xds::istio::workload::Workload as XdsWorkload;
|
||||||
|
use crate::xds::istio::workload::address;
|
||||||
use crate::xds::{Handler, LocalConfig, LocalWorkload, ProxyStateUpdater, XdsResource, XdsUpdate};
|
use crate::xds::{Handler, LocalConfig, LocalWorkload, ProxyStateUpdater, XdsResource, XdsUpdate};
|
||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use bytes::{BufMut, Bytes};
|
use bytes::{BufMut, Bytes};
|
||||||
|
@ -169,10 +169,9 @@ pub fn localhost_error_message() -> String {
|
||||||
TEST_VIP,
|
TEST_VIP,
|
||||||
];
|
];
|
||||||
format!(
|
format!(
|
||||||
"These tests use the following loopback addresses: {:?}. \
|
"These tests use the following loopback addresses: {addrs:?}. \
|
||||||
Your OS may require an explicit alias for each. If so, you'll need to manually \
|
Your OS may require an explicit alias for each. If so, you'll need to manually \
|
||||||
configure your system for each IP (e.g. `sudo ifconfig lo0 alias 127.0.0.2 up`).",
|
configure your system for each IP (e.g. `sudo ifconfig lo0 alias 127.0.0.2 up`).",
|
||||||
addrs
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,6 +219,7 @@ pub fn test_default_workload() -> Workload {
|
||||||
node: "".into(),
|
node: "".into(),
|
||||||
status: Default::default(),
|
status: Default::default(),
|
||||||
cluster_id: "Kubernetes".into(),
|
cluster_id: "Kubernetes".into(),
|
||||||
|
capacity: 1,
|
||||||
|
|
||||||
authorization_policies: Vec::new(),
|
authorization_policies: Vec::new(),
|
||||||
native_tunnel: false,
|
native_tunnel: false,
|
||||||
|
@ -232,13 +232,13 @@ pub fn test_default_workload() -> Workload {
|
||||||
fn test_custom_workload(
|
fn test_custom_workload(
|
||||||
ip_str: &str,
|
ip_str: &str,
|
||||||
name: &str,
|
name: &str,
|
||||||
protocol: Protocol,
|
protocol: InboundProtocol,
|
||||||
echo_port: u16,
|
echo_port: u16,
|
||||||
services_vec: Vec<&Service>,
|
services_vec: Vec<&Service>,
|
||||||
hostname_only: bool,
|
hostname_only: bool,
|
||||||
) -> anyhow::Result<LocalWorkload> {
|
) -> anyhow::Result<LocalWorkload> {
|
||||||
let host = match hostname_only {
|
let host = match hostname_only {
|
||||||
true => format!("{}.reflect.internal.", ip_str),
|
true => format!("{ip_str}.reflect.internal."),
|
||||||
false => "".to_string(),
|
false => "".to_string(),
|
||||||
};
|
};
|
||||||
let wips = match hostname_only {
|
let wips = match hostname_only {
|
||||||
|
@ -249,7 +249,7 @@ fn test_custom_workload(
|
||||||
workload_ips: wips,
|
workload_ips: wips,
|
||||||
hostname: host.into(),
|
hostname: host.into(),
|
||||||
protocol,
|
protocol,
|
||||||
uid: format!("cluster1//v1/Pod/default/{}", name).into(),
|
uid: format!("cluster1//v1/Pod/default/{name}").into(),
|
||||||
name: name.into(),
|
name: name.into(),
|
||||||
namespace: "default".into(),
|
namespace: "default".into(),
|
||||||
service_account: "default".into(),
|
service_account: "default".into(),
|
||||||
|
@ -281,7 +281,7 @@ fn test_custom_svc(
|
||||||
}],
|
}],
|
||||||
ports: HashMap::from([(80u16, echo_port)]),
|
ports: HashMap::from([(80u16, echo_port)]),
|
||||||
endpoints: EndpointSet::from_list([Endpoint {
|
endpoints: EndpointSet::from_list([Endpoint {
|
||||||
workload_uid: format!("cluster1//v1/Pod/default/{}", workload_name).into(),
|
workload_uid: format!("cluster1//v1/Pod/default/{workload_name}").into(),
|
||||||
port: HashMap::from([(80u16, echo_port)]),
|
port: HashMap::from([(80u16, echo_port)]),
|
||||||
status: HealthStatus::Healthy,
|
status: HealthStatus::Healthy,
|
||||||
}]),
|
}]),
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::future::Future;
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
@ -53,6 +52,7 @@ pub struct TestApp {
|
||||||
|
|
||||||
pub namespace: Option<super::netns::Namespace>,
|
pub namespace: Option<super::netns::Namespace>,
|
||||||
pub shutdown: ShutdownTrigger,
|
pub shutdown: ShutdownTrigger,
|
||||||
|
pub ztunnel_identity: Option<identity::Identity>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<(&Bound, Arc<SecretManager>)> for TestApp {
|
impl From<(&Bound, Arc<SecretManager>)> for TestApp {
|
||||||
|
@ -67,14 +67,14 @@ impl From<(&Bound, Arc<SecretManager>)> for TestApp {
|
||||||
cert_manager,
|
cert_manager,
|
||||||
namespace: None,
|
namespace: None,
|
||||||
shutdown: app.shutdown.trigger(),
|
shutdown: app.shutdown.trigger(),
|
||||||
|
ztunnel_identity: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn with_app<F, Fut, FO>(cfg: config::Config, f: F)
|
pub async fn with_app<F, FO>(cfg: config::Config, f: F)
|
||||||
where
|
where
|
||||||
F: Fn(TestApp) -> Fut,
|
F: AsyncFn(TestApp) -> FO,
|
||||||
Fut: Future<Output = FO>,
|
|
||||||
{
|
{
|
||||||
initialize_telemetry();
|
initialize_telemetry();
|
||||||
let cert_manager = identity::mock::new_secret_manager(Duration::from_secs(10));
|
let cert_manager = identity::mock::new_secret_manager(Duration::from_secs(10));
|
||||||
|
@ -105,7 +105,7 @@ impl TestApp {
|
||||||
let get_resp = move || async move {
|
let get_resp = move || async move {
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method(Method::GET)
|
.method(Method::GET)
|
||||||
.uri(format!("http://localhost:{}/{path}", port))
|
.uri(format!("http://localhost:{port}/{path}"))
|
||||||
.header("content-type", "application/json")
|
.header("content-type", "application/json")
|
||||||
.body(Empty::<Bytes>::new())
|
.body(Empty::<Bytes>::new())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -130,7 +130,7 @@ impl TestApp {
|
||||||
let get_resp = move || async move {
|
let get_resp = move || async move {
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method(Method::GET)
|
.method(Method::GET)
|
||||||
.uri(format!("http://localhost:{}/{path}", port))
|
.uri(format!("http://localhost:{port}/{path}"))
|
||||||
.header("content-type", "application/json")
|
.header("content-type", "application/json")
|
||||||
.body(Empty::<Bytes>::new())
|
.body(Empty::<Bytes>::new())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
|
@ -58,7 +58,7 @@ impl CaServer {
|
||||||
Duration::from_secs(0),
|
Duration::from_secs(0),
|
||||||
Duration::from_secs(100),
|
Duration::from_secs(100),
|
||||||
);
|
);
|
||||||
let root_cert = RootCert::Static(certs.chain.iter().map(|c| c.as_pem()).join("\n").into());
|
let root_cert = RootCert::Static(certs.roots.iter().map(|c| c.as_pem()).join("\n").into());
|
||||||
let acceptor = tls::mock::MockServerCertProvider::new(certs);
|
let acceptor = tls::mock::MockServerCertProvider::new(certs);
|
||||||
let mut tls_stream = crate::hyper_util::tls_server(acceptor, listener);
|
let mut tls_stream = crate::hyper_util::tls_server(acceptor, listener);
|
||||||
let srv = IstioCertificateServiceServer::new(server);
|
let srv = IstioCertificateServiceServer::new(server);
|
||||||
|
@ -86,6 +86,7 @@ impl CaServer {
|
||||||
),
|
),
|
||||||
true,
|
true,
|
||||||
60 * 60 * 24,
|
60 * 60 * 24,
|
||||||
|
Vec::new(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
|
@ -13,32 +13,34 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::config::Address;
|
use crate::config::Address;
|
||||||
use crate::dns::resolver::{Answer, Resolver};
|
|
||||||
use crate::dns::Metrics;
|
use crate::dns::Metrics;
|
||||||
|
use crate::dns::resolver::{Answer, Resolver};
|
||||||
use crate::drain::DrainTrigger;
|
use crate::drain::DrainTrigger;
|
||||||
use crate::proxy::Error;
|
use crate::proxy::Error;
|
||||||
use crate::state::workload::Workload;
|
|
||||||
use crate::state::WorkloadInfo;
|
use crate::state::WorkloadInfo;
|
||||||
|
use crate::state::workload::Workload;
|
||||||
use crate::test_helpers::new_proxy_state;
|
use crate::test_helpers::new_proxy_state;
|
||||||
use crate::xds::istio::workload::Workload as XdsWorkload;
|
use crate::xds::istio::workload::Workload as XdsWorkload;
|
||||||
use crate::{dns, drain, metrics};
|
use crate::{dns, drain, metrics};
|
||||||
use futures_util::ready;
|
use futures_util::ready;
|
||||||
use futures_util::stream::{Stream, StreamExt};
|
use futures_util::stream::{Stream, StreamExt};
|
||||||
use hickory_client::client::{AsyncClient, ClientHandle};
|
use hickory_client::ClientError;
|
||||||
use hickory_client::error::ClientError;
|
use hickory_client::client::{Client, ClientHandle};
|
||||||
use hickory_proto::error::{ProtoError, ProtoErrorKind};
|
use hickory_proto::DnsHandle;
|
||||||
use hickory_proto::iocompat::AsyncIoTokioAsStd;
|
|
||||||
use hickory_proto::op::{Edns, Message, MessageType, OpCode, Query, ResponseCode};
|
use hickory_proto::op::{Edns, Message, MessageType, OpCode, Query, ResponseCode};
|
||||||
use hickory_proto::rr::rdata::{A, AAAA, CNAME};
|
use hickory_proto::rr::rdata::{A, AAAA, CNAME};
|
||||||
use hickory_proto::rr::{DNSClass, Name, RData, Record, RecordType};
|
use hickory_proto::rr::{DNSClass, Name, RData, Record, RecordType};
|
||||||
|
use hickory_proto::runtime::TokioRuntimeProvider;
|
||||||
|
use hickory_proto::runtime::iocompat::AsyncIoTokioAsStd;
|
||||||
use hickory_proto::serialize::binary::BinDecodable;
|
use hickory_proto::serialize::binary::BinDecodable;
|
||||||
use hickory_proto::tcp::TcpClientStream;
|
use hickory_proto::tcp::TcpClientStream;
|
||||||
use hickory_proto::udp::UdpClientStream;
|
use hickory_proto::udp::UdpClientStream;
|
||||||
|
use hickory_proto::xfer::Protocol;
|
||||||
use hickory_proto::xfer::{DnsRequest, DnsRequestOptions, DnsResponse};
|
use hickory_proto::xfer::{DnsRequest, DnsRequestOptions, DnsResponse};
|
||||||
use hickory_proto::DnsHandle;
|
use hickory_proto::{ProtoError, ProtoErrorKind};
|
||||||
use hickory_resolver::config::{NameServerConfig, ResolverConfig, ResolverOpts};
|
use hickory_resolver::config::{NameServerConfig, ResolverConfig, ResolverOpts};
|
||||||
use hickory_server::authority::{LookupError, MessageRequest};
|
use hickory_server::authority::{LookupError, MessageRequest};
|
||||||
use hickory_server::server::{Protocol, Request};
|
use hickory_server::server::Request;
|
||||||
use prometheus_client::registry::Registry;
|
use prometheus_client::registry::Registry;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
|
@ -46,7 +48,7 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use tokio::net::{TcpStream, UdpSocket};
|
use tokio::net::TcpStream;
|
||||||
|
|
||||||
const TTL: u32 = 5;
|
const TTL: u32 = 5;
|
||||||
|
|
||||||
|
@ -72,11 +74,14 @@ pub fn cname(name: Name, canonical_name: Name) -> Record {
|
||||||
|
|
||||||
/// Creates a new DNS client that establishes a TCP connection to the nameserver at the given
|
/// Creates a new DNS client that establishes a TCP connection to the nameserver at the given
|
||||||
/// address.
|
/// address.
|
||||||
pub async fn new_tcp_client(addr: SocketAddr) -> AsyncClient {
|
pub async fn new_tcp_client(addr: SocketAddr) -> Client {
|
||||||
let (stream, sender) = TcpClientStream::<AsyncIoTokioAsStd<TcpStream>>::new(addr);
|
let (stream, sender) = TcpClientStream::<AsyncIoTokioAsStd<TcpStream>>::new(
|
||||||
let (client, bg) = AsyncClient::new(Box::new(stream), sender, None)
|
addr,
|
||||||
.await
|
None,
|
||||||
.unwrap();
|
None,
|
||||||
|
TokioRuntimeProvider::new(),
|
||||||
|
);
|
||||||
|
let (client, bg) = Client::new(Box::new(stream), sender, None).await.unwrap();
|
||||||
|
|
||||||
// Run the client exchange in the background.
|
// Run the client exchange in the background.
|
||||||
tokio::spawn(bg);
|
tokio::spawn(bg);
|
||||||
|
@ -85,9 +90,10 @@ pub async fn new_tcp_client(addr: SocketAddr) -> AsyncClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new DNS client that establishes a UDP connection to the nameserver at the given address.
|
/// Creates a new DNS client that establishes a UDP connection to the nameserver at the given address.
|
||||||
pub async fn new_udp_client(addr: SocketAddr) -> AsyncClient {
|
pub async fn new_udp_client(addr: SocketAddr) -> Client {
|
||||||
let stream = UdpClientStream::<UdpSocket>::new(addr);
|
let stream =
|
||||||
let (client, bg) = AsyncClient::connect(stream).await.unwrap();
|
UdpClientStream::<TokioRuntimeProvider>::builder(addr, TokioRuntimeProvider::new()).build();
|
||||||
|
let (client, bg) = Client::connect(stream).await.unwrap();
|
||||||
|
|
||||||
// Run the client exchange in the background.
|
// Run the client exchange in the background.
|
||||||
tokio::spawn(bg);
|
tokio::spawn(bg);
|
||||||
|
@ -106,7 +112,7 @@ pub async fn send_request<C: ClientHandle>(
|
||||||
|
|
||||||
/// Sends a request with the given maximum response payload size.
|
/// Sends a request with the given maximum response payload size.
|
||||||
pub async fn send_with_max_size(
|
pub async fn send_with_max_size(
|
||||||
client: &mut AsyncClient,
|
client: &mut Client,
|
||||||
name: Name,
|
name: Name,
|
||||||
rr_type: RecordType,
|
rr_type: RecordType,
|
||||||
max_payload: u16,
|
max_payload: u16,
|
||||||
|
@ -230,15 +236,17 @@ fn internal_resolver_config(tcp: SocketAddr, udp: SocketAddr) -> ResolverConfig
|
||||||
let mut rc = ResolverConfig::new();
|
let mut rc = ResolverConfig::new();
|
||||||
rc.add_name_server(NameServerConfig {
|
rc.add_name_server(NameServerConfig {
|
||||||
socket_addr: udp,
|
socket_addr: udp,
|
||||||
protocol: hickory_resolver::config::Protocol::Udp,
|
protocol: Protocol::Udp,
|
||||||
tls_dns_name: None,
|
tls_dns_name: None,
|
||||||
|
http_endpoint: None,
|
||||||
trust_negative_responses: false,
|
trust_negative_responses: false,
|
||||||
bind_addr: None,
|
bind_addr: None,
|
||||||
});
|
});
|
||||||
rc.add_name_server(NameServerConfig {
|
rc.add_name_server(NameServerConfig {
|
||||||
socket_addr: tcp,
|
socket_addr: tcp,
|
||||||
protocol: hickory_resolver::config::Protocol::Tcp,
|
protocol: Protocol::Tcp,
|
||||||
tls_dns_name: None,
|
tls_dns_name: None,
|
||||||
|
http_endpoint: None,
|
||||||
trust_negative_responses: false,
|
trust_negative_responses: false,
|
||||||
bind_addr: None,
|
bind_addr: None,
|
||||||
});
|
});
|
||||||
|
@ -290,6 +298,8 @@ pub async fn run_dns(responses: HashMap<Name, Vec<IpAddr>>) -> anyhow::Result<Te
|
||||||
}),
|
}),
|
||||||
state.clone(),
|
state.clone(),
|
||||||
),
|
),
|
||||||
|
Some("prefered-namespace".to_string()),
|
||||||
|
true, // ipv6_enabled for tests
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -353,12 +363,13 @@ impl crate::dns::Forwarder for FakeForwarder {
|
||||||
_: Option<&Workload>,
|
_: Option<&Workload>,
|
||||||
request: &Request,
|
request: &Request,
|
||||||
) -> Result<Answer, LookupError> {
|
) -> Result<Answer, LookupError> {
|
||||||
let name: Name = request.query().name().into();
|
let query = request.request_info()?.query;
|
||||||
|
let name: Name = query.name().into();
|
||||||
let utf = name.to_string();
|
let utf = name.to_string();
|
||||||
if let Some(ip) = utf.strip_suffix(".reflect.internal.") {
|
if let Some(ip) = utf.strip_suffix(".reflect.internal.") {
|
||||||
// Magic to allow `ip.reflect.internal` to always return ip (like nip.io)
|
// Magic to allow `ip.reflect.internal` to always return ip (like nip.io)
|
||||||
return Ok(Answer::new(
|
return Ok(Answer::new(
|
||||||
vec![a(request.query().name().into(), ip.parse().unwrap())],
|
vec![a(query.name().into(), ip.parse().unwrap())],
|
||||||
false,
|
false,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -368,17 +379,18 @@ impl crate::dns::Forwarder for FakeForwarder {
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
let rtype = request.query().query_type();
|
|
||||||
|
let rtype = query.query_type();
|
||||||
for ip in ips {
|
for ip in ips {
|
||||||
match ip {
|
match ip {
|
||||||
IpAddr::V4(ip) => {
|
IpAddr::V4(ip) => {
|
||||||
if rtype == RecordType::A {
|
if rtype == RecordType::A {
|
||||||
out.push(a(request.query().name().into(), *ip));
|
out.push(a(query.name().into(), *ip));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
IpAddr::V6(ip) => {
|
IpAddr::V6(ip) => {
|
||||||
if rtype == RecordType::AAAA {
|
if rtype == RecordType::AAAA {
|
||||||
out.push(aaaa(request.query().name().into(), *ip));
|
out.push(aaaa(query.name().into(), *ip));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,8 +13,9 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use pin_project_lite::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
use tonic::body::BoxBody;
|
|
||||||
use tonic::Status;
|
use tonic::Status;
|
||||||
|
use tonic::body::Body;
|
||||||
|
|
||||||
use tower::{BoxError, ServiceExt};
|
use tower::{BoxError, ServiceExt};
|
||||||
|
|
||||||
// Copied from https://github.com/hyperium/tonic/blob/34b863b1d2a204ef3dd871ec86860fc92aafb451/examples/src/tls_rustls/server.rs
|
// Copied from https://github.com/hyperium/tonic/blob/34b863b1d2a204ef3dd871ec86860fc92aafb451/examples/src/tls_rustls/server.rs
|
||||||
|
@ -25,7 +26,7 @@ use tower::{BoxError, ServiceExt};
|
||||||
/// and does not support the `poll_ready` method that is used by tower services.
|
/// and does not support the `poll_ready` method that is used by tower services.
|
||||||
///
|
///
|
||||||
/// This is provided here because the equivalent adaptor in hyper-util does not support
|
/// This is provided here because the equivalent adaptor in hyper-util does not support
|
||||||
/// tonic::body::BoxBody bodies.
|
/// tonic::body::Body bodies.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct TowerToHyperService<S> {
|
pub struct TowerToHyperService<S> {
|
||||||
service: S,
|
service: S,
|
||||||
|
@ -40,20 +41,17 @@ impl<S> TowerToHyperService<S> {
|
||||||
|
|
||||||
impl<S> hyper::service::Service<hyper::Request<hyper::body::Incoming>> for TowerToHyperService<S>
|
impl<S> hyper::service::Service<hyper::Request<hyper::body::Incoming>> for TowerToHyperService<S>
|
||||||
where
|
where
|
||||||
S: tower::Service<hyper::Request<BoxBody>> + Clone,
|
S: tower::Service<hyper::Request<Body>> + Clone,
|
||||||
S::Error: Into<BoxError> + 'static,
|
S::Error: Into<BoxError> + 'static,
|
||||||
{
|
{
|
||||||
type Response = S::Response;
|
type Response = S::Response;
|
||||||
type Error = BoxError;
|
type Error = BoxError;
|
||||||
type Future = TowerToHyperServiceFuture<S, hyper::Request<BoxBody>>;
|
type Future = TowerToHyperServiceFuture<S, hyper::Request<Body>>;
|
||||||
|
|
||||||
fn call(&self, req: hyper::Request<hyper::body::Incoming>) -> Self::Future {
|
fn call(&self, req: hyper::Request<hyper::body::Incoming>) -> Self::Future {
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
let req = req.map(|incoming| {
|
let req =
|
||||||
incoming
|
req.map(|incoming| Body::new(incoming.map_err(|err| Status::from_error(err.into()))));
|
||||||
.map_err(|err| Status::from_error(err.into()))
|
|
||||||
.boxed_unsync()
|
|
||||||
});
|
|
||||||
TowerToHyperServiceFuture {
|
TowerToHyperServiceFuture {
|
||||||
future: self.service.clone().oneshot(req),
|
future: self.service.clone().oneshot(req),
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,8 @@
|
||||||
use crate::config::{ConfigSource, ProxyMode};
|
use crate::config::{ConfigSource, ProxyMode};
|
||||||
use crate::rbac::Authorization;
|
use crate::rbac::Authorization;
|
||||||
use crate::state::service::{Endpoint, Service};
|
use crate::state::service::{Endpoint, Service};
|
||||||
use crate::state::workload::{gatewayaddress, HealthStatus, Workload};
|
use crate::state::workload::{HealthStatus, Workload, gatewayaddress};
|
||||||
|
use crate::strng::Strng;
|
||||||
use crate::test_helpers::app::TestApp;
|
use crate::test_helpers::app::TestApp;
|
||||||
use crate::test_helpers::netns::{Namespace, Resolver};
|
use crate::test_helpers::netns::{Namespace, Resolver};
|
||||||
use crate::test_helpers::*;
|
use crate::test_helpers::*;
|
||||||
|
@ -26,6 +27,7 @@ use crate::inpod::istio::zds::WorkloadInfo;
|
||||||
use crate::signal::ShutdownTrigger;
|
use crate::signal::ShutdownTrigger;
|
||||||
use crate::test_helpers::inpod::start_ztunnel_server;
|
use crate::test_helpers::inpod::start_ztunnel_server;
|
||||||
use crate::test_helpers::linux::TestMode::{Dedicated, Shared};
|
use crate::test_helpers::linux::TestMode::{Dedicated, Shared};
|
||||||
|
use arcstr::ArcStr;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use nix::unistd::mkdtemp;
|
use nix::unistd::mkdtemp;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
|
@ -122,38 +124,104 @@ impl WorkloadManager {
|
||||||
wli: Option<state::WorkloadInfo>,
|
wli: Option<state::WorkloadInfo>,
|
||||||
) -> anyhow::Result<TestApp> {
|
) -> anyhow::Result<TestApp> {
|
||||||
let mut inpod_uds: PathBuf = "/dev/null".into();
|
let mut inpod_uds: PathBuf = "/dev/null".into();
|
||||||
let ztunnel_server = if self.mode == Shared {
|
let current_mode = self.mode;
|
||||||
inpod_uds = self.tmp_dir.join(node);
|
let proxy_mode = match current_mode {
|
||||||
Some(start_ztunnel_server(inpod_uds.clone()).await)
|
Shared => ProxyMode::Shared,
|
||||||
|
Dedicated => ProxyMode::Dedicated,
|
||||||
|
};
|
||||||
|
let ztunnel_name = format!("ztunnel-{node}");
|
||||||
|
|
||||||
|
// Define ztunnel's own identity and workload info if it's a Shared proxy.
|
||||||
|
// These are used for registering ztunnel as a workload and for cfg.ztunnel_identity/workload.
|
||||||
|
let ztunnel_shared_identity: Option<identity::Identity> = if proxy_mode == ProxyMode::Shared
|
||||||
|
{
|
||||||
|
Some(identity::Identity::Spiffe {
|
||||||
|
trust_domain: "cluster.local".into(),
|
||||||
|
namespace: "default".into(),
|
||||||
|
service_account: ztunnel_name.clone().into(),
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
let ns = TestWorkloadBuilder::new(&format!("ztunnel-{node}"), self)
|
|
||||||
.on_node(node)
|
let ztunnel_shared_workload_info: Option<state::WorkloadInfo> =
|
||||||
.uncaptured()
|
if proxy_mode == ProxyMode::Shared {
|
||||||
.register()
|
Some(state::WorkloadInfo::new(
|
||||||
.await?;
|
ztunnel_name.clone(),
|
||||||
|
"default".to_string(),
|
||||||
|
ztunnel_name.clone(),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let ztunnel_server = match current_mode {
|
||||||
|
Shared => {
|
||||||
|
inpod_uds = self.tmp_dir.join(node);
|
||||||
|
Some(start_ztunnel_server(inpod_uds.clone()).await)
|
||||||
|
}
|
||||||
|
Dedicated => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let ns = match current_mode {
|
||||||
|
Shared => {
|
||||||
|
// Shared mode: Ztunnel has its own identity, registered as HBONE
|
||||||
|
TestWorkloadBuilder::new(&ztunnel_name, self)
|
||||||
|
.on_node(node)
|
||||||
|
.identity(
|
||||||
|
ztunnel_shared_identity
|
||||||
|
.clone()
|
||||||
|
.expect("Shared mode must have an identity for ztunnel registration"),
|
||||||
|
)
|
||||||
|
.hbone() // Shared ztunnel uses HBONE protocol
|
||||||
|
.register()
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
Dedicated => {
|
||||||
|
TestWorkloadBuilder::new(&ztunnel_name, self)
|
||||||
|
.on_node(node)
|
||||||
|
.uncaptured() // Dedicated ztunnel is treated as uncaptured TCP
|
||||||
|
.register()
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let _ztunnel_local_workload = self
|
||||||
|
.workloads
|
||||||
|
.last()
|
||||||
|
.cloned()
|
||||||
|
.expect("ztunnel workload should be registered");
|
||||||
|
|
||||||
let ip = ns.ip();
|
let ip = ns.ip();
|
||||||
let initial_config = LocalConfig {
|
let initial_config = LocalConfig {
|
||||||
workloads: self.workloads.clone(),
|
workloads: self.workloads.clone(),
|
||||||
policies: self.policies.clone(),
|
policies: self.policies.clone(),
|
||||||
services: self.services.values().cloned().collect_vec(),
|
services: self.services.values().cloned().collect_vec(),
|
||||||
};
|
};
|
||||||
let proxy_mode = if ztunnel_server.is_some() {
|
|
||||||
ProxyMode::Shared
|
|
||||||
} else {
|
|
||||||
ProxyMode::Dedicated
|
|
||||||
};
|
|
||||||
let (mut tx_cfg, rx_cfg) = mpsc_ack(1);
|
let (mut tx_cfg, rx_cfg) = mpsc_ack(1);
|
||||||
tx_cfg.send(initial_config).await?;
|
tx_cfg.send(initial_config).await?;
|
||||||
let local_xds_config = Some(ConfigSource::Dynamic(Arc::new(Mutex::new(rx_cfg))));
|
let local_xds_config = Some(ConfigSource::Dynamic(Arc::new(Mutex::new(rx_cfg))));
|
||||||
|
|
||||||
|
// Config for ztunnel's own identity and workload, primarily for when it acts as a server (metrics endpoint).
|
||||||
|
let cfg_ztunnel_identity = ztunnel_shared_identity.clone();
|
||||||
|
let cfg_ztunnel_workload_info = ztunnel_shared_workload_info.clone();
|
||||||
|
|
||||||
|
// Config for the workload this ztunnel instance is proxying for :
|
||||||
|
// If Shared, ztunnel is effectively proxying for itself
|
||||||
|
// If Dedicated, it's for the application workload `wli`
|
||||||
|
let cfg_proxy_workload_information = match proxy_mode {
|
||||||
|
// Ztunnel's own info for shared mode proxy
|
||||||
|
ProxyMode::Shared => ztunnel_shared_workload_info.clone(),
|
||||||
|
// Application's workload info for dedicated mode
|
||||||
|
ProxyMode::Dedicated => wli,
|
||||||
|
};
|
||||||
|
|
||||||
let cfg = config::Config {
|
let cfg = config::Config {
|
||||||
xds_address: None,
|
xds_address: None,
|
||||||
dns_proxy: true,
|
dns_proxy: true,
|
||||||
fake_ca: true,
|
fake_ca: true,
|
||||||
local_xds_config,
|
local_xds_config,
|
||||||
local_node: Some(node.to_string()),
|
local_node: Some(node.to_string()),
|
||||||
proxy_workload_information: wli,
|
proxy_workload_information: cfg_proxy_workload_information,
|
||||||
inpod_uds,
|
inpod_uds,
|
||||||
proxy_mode,
|
proxy_mode,
|
||||||
// We use packet mark even in dedicated to distinguish proxy from application
|
// We use packet mark even in dedicated to distinguish proxy from application
|
||||||
|
@ -163,12 +231,17 @@ impl WorkloadManager {
|
||||||
} else {
|
} else {
|
||||||
Some(true)
|
Some(true)
|
||||||
},
|
},
|
||||||
|
localhost_app_tunnel: true,
|
||||||
|
ztunnel_identity: cfg_ztunnel_identity,
|
||||||
|
ztunnel_workload: cfg_ztunnel_workload_info,
|
||||||
..config::parse_config().unwrap()
|
..config::parse_config().unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
||||||
// Setup the ztunnel...
|
// Setup the ztunnel...
|
||||||
let cloned_ns = ns.clone();
|
let cloned_ns = ns.clone();
|
||||||
let cloned_ns2 = ns.clone();
|
let cloned_ns2 = ns.clone();
|
||||||
|
let ztunnel_identity = ztunnel_shared_identity.clone();
|
||||||
// run_ready will spawn a thread and block on it. Run with spawn_blocking so it doesn't block the runtime.
|
// run_ready will spawn a thread and block on it. Run with spawn_blocking so it doesn't block the runtime.
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
ns.run_ready(move |ready| async move {
|
ns.run_ready(move |ready| async move {
|
||||||
|
@ -207,9 +280,9 @@ impl WorkloadManager {
|
||||||
ip,
|
ip,
|
||||||
)),
|
)),
|
||||||
cert_manager,
|
cert_manager,
|
||||||
|
|
||||||
namespace: Some(cloned_ns),
|
namespace: Some(cloned_ns),
|
||||||
shutdown,
|
shutdown,
|
||||||
|
ztunnel_identity: ztunnel_identity.clone(),
|
||||||
};
|
};
|
||||||
ta.ready().await;
|
ta.ready().await;
|
||||||
info!("ready");
|
info!("ready");
|
||||||
|
@ -350,6 +423,11 @@ impl<'a> TestServiceBuilder<'a> {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn subject_alt_names(mut self, mut sans: Vec<ArcStr>) -> Self {
|
||||||
|
self.s.subject_alt_names.append(&mut sans);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Set the service waypoint
|
/// Set the service waypoint
|
||||||
pub fn waypoint(mut self, waypoint: IpAddr) -> Self {
|
pub fn waypoint(mut self, waypoint: IpAddr) -> Self {
|
||||||
self.s.waypoint = Some(GatewayAddress {
|
self.s.waypoint = Some(GatewayAddress {
|
||||||
|
@ -414,6 +492,11 @@ impl<'a> TestWorkloadBuilder<'a> {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn network(mut self, network: Strng) -> Self {
|
||||||
|
self.w.workload.network = network;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn identity(mut self, identity: identity::Identity) -> Self {
|
pub fn identity(mut self, identity: identity::Identity) -> Self {
|
||||||
match identity {
|
match identity {
|
||||||
identity::Identity::Spiffe {
|
identity::Identity::Spiffe {
|
||||||
|
@ -453,12 +536,17 @@ impl<'a> TestWorkloadBuilder<'a> {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set a waypoint to the workload
|
/// Mutate the workload
|
||||||
pub fn mutate_workload(mut self, f: impl FnOnce(&mut Workload)) -> Self {
|
pub fn mutate_workload(mut self, f: impl FnOnce(&mut Workload)) -> Self {
|
||||||
f(&mut self.w.workload);
|
f(&mut self.w.workload);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn network_gateway(mut self, network_gateway: GatewayAddress) -> Self {
|
||||||
|
self.w.workload.network_gateway = Some(network_gateway);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Append a service to the workload
|
/// Append a service to the workload
|
||||||
pub fn service(mut self, service: &str, server_port: u16, target_port: u16) -> Self {
|
pub fn service(mut self, service: &str, server_port: u16, target_port: u16) -> Self {
|
||||||
self.w
|
self.w
|
||||||
|
@ -494,18 +582,26 @@ impl<'a> TestWorkloadBuilder<'a> {
|
||||||
pub async fn register(mut self) -> anyhow::Result<Namespace> {
|
pub async fn register(mut self) -> anyhow::Result<Namespace> {
|
||||||
let zt = self.manager.ztunnels.get(self.w.workload.node.as_str());
|
let zt = self.manager.ztunnels.get(self.w.workload.node.as_str());
|
||||||
let node = self.w.workload.node.clone();
|
let node = self.w.workload.node.clone();
|
||||||
let network_namespace = if self.manager.mode == Dedicated && zt.is_some() {
|
let network_namespace = match (self.manager.mode, zt.is_some()) {
|
||||||
// This is a bit of hack. For dedicated mode, we run the app and ztunnel in the same namespace
|
(Dedicated, true) => {
|
||||||
// We probably should express this more natively in the framework, but for now we just detect it
|
// This is a bit of hack. For dedicated mode, we run the app and ztunnel in the same namespace
|
||||||
// and re-use the namespace.
|
// We probably should express this more natively in the framework, but for now we just detect it
|
||||||
tracing::info!("node already has ztunnel and dedicate mode, sharing");
|
// and re-use the namespace.
|
||||||
zt.as_ref().unwrap().namespace.clone()
|
tracing::info!("node already has ztunnel and dedicate mode, sharing");
|
||||||
} else {
|
zt.as_ref().unwrap().namespace.clone()
|
||||||
self.manager
|
}
|
||||||
|
_ => self
|
||||||
|
.manager
|
||||||
.namespaces
|
.namespaces
|
||||||
.child(&self.w.workload.node, &self.w.workload.name)?
|
.child(&self.w.workload.node, &self.w.workload.name)?,
|
||||||
};
|
};
|
||||||
self.w.workload.workload_ips = vec![network_namespace.ip()];
|
if self.w.workload.network_gateway.is_some() {
|
||||||
|
// This is a little inefficient, because we create the
|
||||||
|
// namespace, but never actually use it.
|
||||||
|
self.w.workload.workload_ips = vec![];
|
||||||
|
} else {
|
||||||
|
self.w.workload.workload_ips = vec![network_namespace.ip()];
|
||||||
|
}
|
||||||
self.w.workload.uid = format!(
|
self.w.workload.uid = format!(
|
||||||
"cluster1//v1/Pod/{}/{}",
|
"cluster1//v1/Pod/{}/{}",
|
||||||
self.w.workload.namespace, self.w.workload.name,
|
self.w.workload.namespace, self.w.workload.name,
|
||||||
|
@ -545,7 +641,7 @@ impl<'a> TestWorkloadBuilder<'a> {
|
||||||
let fd = network_namespace.netns().file().as_raw_fd();
|
let fd = network_namespace.netns().file().as_raw_fd();
|
||||||
let msg = inpod::Message::Start(inpod::StartZtunnelMessage {
|
let msg = inpod::Message::Start(inpod::StartZtunnelMessage {
|
||||||
uid: uid.to_string(),
|
uid: uid.to_string(),
|
||||||
workload_info: Some(wli),
|
workload_info: Some(wli.clone()),
|
||||||
fd,
|
fd,
|
||||||
});
|
});
|
||||||
zt_info
|
zt_info
|
||||||
|
|
|
@ -34,12 +34,13 @@ macro_rules! function {
|
||||||
/// and automatically setups up a namespace based on the test name (to avoid conflicts).
|
/// and automatically setups up a namespace based on the test name (to avoid conflicts).
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! setup_netns_test {
|
macro_rules! setup_netns_test {
|
||||||
($mode:expr) => {{
|
($mode:expr) => {{ setup_netns_test!($mode, ztunnel::function!()) }};
|
||||||
|
($mode:expr, $function:expr) => {{
|
||||||
if unsafe { libc::getuid() } != 0 {
|
if unsafe { libc::getuid() } != 0 {
|
||||||
panic!("CI tests should run as root; this is supposed to happen automatically?");
|
panic!("CI tests should run as root; this is supposed to happen automatically?");
|
||||||
}
|
}
|
||||||
ztunnel::test_helpers::helpers::initialize_telemetry();
|
ztunnel::test_helpers::helpers::initialize_telemetry();
|
||||||
let function_name = ztunnel::function!()
|
let function_name = $function
|
||||||
.strip_prefix(module_path!())
|
.strip_prefix(module_path!())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.strip_prefix("::")
|
.strip_prefix("::")
|
||||||
|
@ -67,8 +68,8 @@ macro_rules! setup_netns_test {
|
||||||
/// The special ctor macro ensures this is run *before* any code. In particular, before tokio runtime.
|
/// The special ctor macro ensures this is run *before* any code. In particular, before tokio runtime.
|
||||||
pub fn initialize_namespace_tests() {
|
pub fn initialize_namespace_tests() {
|
||||||
use libc::getuid;
|
use libc::getuid;
|
||||||
use nix::mount::{mount, MsFlags};
|
use nix::mount::{MsFlags, mount};
|
||||||
use nix::sched::{unshare, CloneFlags};
|
use nix::sched::{CloneFlags, unshare};
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
// First, drop into a new user namespace.
|
// First, drop into a new user namespace.
|
||||||
|
|
|
@ -23,7 +23,7 @@ use std::{sync, thread};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use netns_rs::NetNs;
|
use netns_rs::NetNs;
|
||||||
use tokio::runtime::{Handle, RuntimeFlavor};
|
use tokio::runtime::{Handle, RuntimeFlavor};
|
||||||
use tracing::{debug, error, warn, Instrument};
|
use tracing::{Instrument, debug, error, warn};
|
||||||
|
|
||||||
use crate::test_helpers::helpers;
|
use crate::test_helpers::helpers;
|
||||||
|
|
||||||
|
|
|
@ -22,9 +22,9 @@ use std::net::{SocketAddr, SocketAddrV4};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{cmp, io};
|
use std::{cmp, io};
|
||||||
|
|
||||||
|
use hyper::Response;
|
||||||
use hyper::server::conn::http2;
|
use hyper::server::conn::http2;
|
||||||
use hyper::service::service_fn;
|
use hyper::service::service_fn;
|
||||||
use hyper::Response;
|
|
||||||
use hyper_util::rt::TokioIo;
|
use hyper_util::rt::TokioIo;
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
@ -222,16 +222,19 @@ pub struct HboneTestServer {
|
||||||
listener: TcpListener,
|
listener: TcpListener,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
name: String,
|
name: String,
|
||||||
|
/// Write this message when acting as waypoint to show that waypoint was hit.
|
||||||
|
waypoint_message: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HboneTestServer {
|
impl HboneTestServer {
|
||||||
pub async fn new(mode: Mode, name: &str) -> Self {
|
pub async fn new(mode: Mode, name: &str, waypoint_message: Vec<u8>) -> Self {
|
||||||
let addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 15008);
|
let addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 15008);
|
||||||
let listener = TcpListener::bind(addr).await.unwrap();
|
let listener = TcpListener::bind(addr).await.unwrap();
|
||||||
Self {
|
Self {
|
||||||
listener,
|
listener,
|
||||||
mode,
|
mode,
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
|
waypoint_message,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,24 +257,28 @@ impl HboneTestServer {
|
||||||
let mut tls_stream = crate::hyper_util::tls_server(acceptor, self.listener);
|
let mut tls_stream = crate::hyper_util::tls_server(acceptor, self.listener);
|
||||||
let mode = self.mode;
|
let mode = self.mode;
|
||||||
while let Some(socket) = tls_stream.next().await {
|
while let Some(socket) = tls_stream.next().await {
|
||||||
|
let waypoint_message = self.waypoint_message.clone();
|
||||||
if let Err(err) = http2::Builder::new(TokioExecutor)
|
if let Err(err) = http2::Builder::new(TokioExecutor)
|
||||||
.serve_connection(
|
.serve_connection(
|
||||||
TokioIo::new(socket),
|
TokioIo::new(socket),
|
||||||
service_fn(move |req| async move {
|
service_fn(move |req| {
|
||||||
info!("waypoint: received request");
|
let waypoint_message = waypoint_message.clone();
|
||||||
tokio::task::spawn(async move {
|
async move {
|
||||||
match hyper::upgrade::on(req).await {
|
info!("waypoint: received request");
|
||||||
Ok(upgraded) => {
|
tokio::task::spawn(async move {
|
||||||
let mut io = TokioIo::new(upgraded);
|
match hyper::upgrade::on(req).await {
|
||||||
// let (mut ri, mut wi) = tokio::io::split(TokioIo::new(upgraded));
|
Ok(upgraded) => {
|
||||||
// Signal we are the waypoint so tests can validate this
|
let mut io = TokioIo::new(upgraded);
|
||||||
io.write_all(b"waypoint\n").await.unwrap();
|
// let (mut ri, mut wi) = tokio::io::split(TokioIo::new(upgraded));
|
||||||
handle_stream(mode, &mut io).await;
|
// Signal we are the waypoint so tests can validate this
|
||||||
|
io.write_all(&waypoint_message[..]).await.unwrap();
|
||||||
|
handle_stream(mode, &mut io).await;
|
||||||
|
}
|
||||||
|
Err(e) => error!("No upgrade {e}"),
|
||||||
}
|
}
|
||||||
Err(e) => error!("No upgrade {e}"),
|
});
|
||||||
}
|
Ok::<_, Infallible>(Response::new(Full::<Bytes>::from("streaming...")))
|
||||||
});
|
}
|
||||||
Ok::<_, Infallible>(Response::new(Full::<Bytes>::from("streaming...")))
|
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -73,7 +73,7 @@ impl AdsServer {
|
||||||
Duration::from_secs(0),
|
Duration::from_secs(0),
|
||||||
Duration::from_secs(100),
|
Duration::from_secs(100),
|
||||||
);
|
);
|
||||||
let root_cert = RootCert::Static(certs.chain.iter().map(|c| c.as_pem()).join("\n").into());
|
let root_cert = RootCert::Static(certs.roots.iter().map(|c| c.as_pem()).join("\n").into());
|
||||||
let acceptor = tls::mock::MockServerCertProvider::new(certs);
|
let acceptor = tls::mock::MockServerCertProvider::new(certs);
|
||||||
let listener_addr_string = "https://".to_string() + &server_addr.to_string();
|
let listener_addr_string = "https://".to_string() + &server_addr.to_string();
|
||||||
let mut tls_stream = crate::hyper_util::tls_server(acceptor, listener);
|
let mut tls_stream = crate::hyper_util::tls_server(acceptor, listener);
|
||||||
|
|
|
@ -53,8 +53,12 @@ pub enum Error {
|
||||||
#[cfg(feature = "tls-boring")]
|
#[cfg(feature = "tls-boring")]
|
||||||
SslError(#[from] boring::error::ErrorStack),
|
SslError(#[from] boring::error::ErrorStack),
|
||||||
|
|
||||||
|
#[error("invalid operation: {0:?}")]
|
||||||
|
#[cfg(feature = "tls-openssl")]
|
||||||
|
SslError(#[from] openssl::error::ErrorStack),
|
||||||
|
|
||||||
#[error("invalid certificate generation: {0:?}")]
|
#[error("invalid certificate generation: {0:?}")]
|
||||||
#[cfg(feature = "tls-ring")]
|
#[cfg(any(feature = "tls-ring", feature = "tls-aws-lc"))]
|
||||||
RcgenError(Arc<rcgen::Error>),
|
RcgenError(Arc<rcgen::Error>),
|
||||||
|
|
||||||
#[error("failed to build server verifier: {0}")]
|
#[error("failed to build server verifier: {0}")]
|
||||||
|
@ -70,7 +74,7 @@ impl From<InvalidUri> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "tls-ring")]
|
#[cfg(any(feature = "tls-ring", feature = "tls-aws-lc"))]
|
||||||
impl From<rcgen::Error> for Error {
|
impl From<rcgen::Error> for Error {
|
||||||
fn from(err: rcgen::Error) -> Self {
|
fn from(err: rcgen::Error) -> Self {
|
||||||
Error::RcgenError(Arc::new(err))
|
Error::RcgenError(Arc::new(err))
|
||||||
|
|
|
@ -1,28 +1,28 @@
|
||||||
-----BEGIN PRIVATE KEY-----
|
-----BEGIN PRIVATE KEY-----
|
||||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDHgk8TybbndFMc
|
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC6k7qVMi/o6MKj
|
||||||
PgmmnXVg+2vkBLkGPgT0yiXZzHo9LATH1DG09eiS5GDRYK49VFRx3KUqCB6HKdrH
|
XiwCxJ8lmv1OodoygXNV2AJsw7tN8ShGt62klbEjQG9YO1654dYuW+B+M6ZnMbvp
|
||||||
d9kmOG3W6JEHKKfgY7ZOeY0OOii2LCYG9jcL2KNbfBNWnrSahRle65nmWxWHmovu
|
Xg3tPMfBjfEUFdIUUOJyP6CJccX7HDV+oeJdlU0B3Cq6mWgEAnAjEZ7Gtzpj0jgX
|
||||||
qu4jI5WwhgLu/1JyibHRp2iUIDfHbtE9Sg4I6ij8h6nbLvZG6QqohFzEZsOPwRIR
|
TruL3wKslKtf26/+ixtxGj9OtQjnt+VUNWpxSGrflG85Rvn4xZr1qKnVhNUGcxMU
|
||||||
Rs5iA9qFAygd+nblW3c0hJudUL4FOSC1yIMla3ozeQhbAvlvsY3jlyxM7XxxVKKm
|
IFlrievMxdgO5yzLqrCPaaNBmiacaY+oLCh4LEIEYWncUU8M9LB5EzPipsyReItr
|
||||||
98FoZAbhMZq/NSdoRlvaVCydsJEIHX1z+4LTJAehMisP8SHmjZi8zyBq19Nc0GCH
|
uNsE8PWo7NDTH3fWewJoqBM4CgcJgwLXfpDdz1q6P/4adU5vRIzEGbJOwXs+VKz9
|
||||||
WU7g8DGNAgMBAAECggEADVkf0s5aZSpwfBF/gk0DIzG3InqZaXxBRyJ2PVfzrr+Q
|
PHDqzLDTAgMBAAECggEACRmgfZtri8wihi5XLjFh48cp+LZbOqBmvDajc6OeVKBh
|
||||||
81C0YavZO2i5vJB4H+TSQIr3ZvRDnJ31hAZ1+hto5+nH+D9lWfs54Y663AKSojua
|
CwX1cbQdZi9D3wVSSvWxaSRxECa7xIe/9xNv0XTn91BRhF3GF9iUGudwkJOciPFo
|
||||||
cLlM7gc4yMBop9E2+qagj6v6MEIUUu4JYja/94xkMbsJm7Vr7ftd8Q2/DrgIG1jl
|
yIrjzBW2olMXQG+wC5YE2ykGWgHg1rKPqs9bxWg0rK85ppzD+kHVrlAN62xYCVkw
|
||||||
tK8cJ4Cybmnw8IzGaa+bBW0ZAkT4F/qOdVax/3kac4gKQrDTvQawvqyULqisTyQU
|
1a3L5vsYI9dUnYKRHmxNOHRXNru2h2/15EuLCvbObAgYjz2OUtFgnZ9gQnCDpBVD
|
||||||
8TfG0yDema9CwoqSR8El7PgdXBRCwO8Z9YlKn1TaWP2rkIVB6sJQmqjTRmjjEFKl
|
wtzYvKNq0jIvU7OZGnRDk0sFPzKZyZg3n7o2jxrUMDiPwIXSrF0GY+FkVt+XG9l2
|
||||||
zzuolc0EJcvlvbDhZz1R1cJSiOftY/yPgjRNRlM1iQKBgQDr20jaTnObQoLG/UxL
|
18YSpXTOO9INLRxSIBp8+X4to/+vi2wENUwxmo4diQKBgQD48yOjBzy5bcNznOwn
|
||||||
fC7ntCK372RkcnDaz1uMAGXmmxkle+ohRRZDrQ8ExMAy+o9K+oml5Y72lxy+YElm
|
Bx7gNoElwJIWiCk3S7h7xiXMafjeAL6BPJbBfNmJrd32HbqEab1c4o7exD7T5zB2
|
||||||
eF6uZxe+GL7FKekVw18AAS9fMqMcLGRpVA2/gWEqfE9CgXqy4fCE8zO5NmBHUtEr
|
9hdT0GPNuy/hGoLc7JzOXCMAt5M6zx4aTl4B5kbn+ZDegYpVP2PXcPo0Xnrci7cn
|
||||||
y+CLHq792kQQBxDyJXQ5df01uQKBgQDYjFRSRXSOzN654a61GVcelqr6apr7La5s
|
/atvDJFb9Jx3FUoSAp2WxyjD6QKBgQC/3GTSw7Q6UcieAEodi93HOEgtnxCtBa1r
|
||||||
IWeVVdx8lU0k59tiWQf7+EwK7dMtChYAVZm8mokGpqEvf8cV+cttALEYoWmIfL94
|
zMaVFgvgDLSAshbD9Kcj4+mbdDLfcDxKdnxIM7IDdOmPwoTZXdU7Dr/GFOV7Xc/S
|
||||||
IByi1PRsBbkgUNKOWVWe/ae2DoM3A5hJ/H5mDJFGRN4W9zl/uSlRzkiFC6c7Pxj6
|
ntZZrZ5mvkYqu0egY9FWxW4qYcHrWVrFHvBIx5Nh+T1Ng0AdCh2noRiGOvbh38vi
|
||||||
an0Hmj3EdQKBgC5mVH3GI04vFoVJPaI4Cw5ATPg8m1Z7gSI62vq+9U0ZxCewqg3j
|
6ZwY0++FWwKBgB27VA89ZwgyTrx5Ck8i75IRkF0E4a2lN17uQSsvIAaIxXsmFZpz
|
||||||
ho7H1AWPI3SkAF6yzTOa5rYyYlA9pxMGqTHMTEp0mcs2BInohp78nLIwxw/618I0
|
tQGIuqZRyIeDR2pXApGcUTnTsDk4jDY479JHuhOw/mfg9a4S6CtRH7EPO5szb0Hv
|
||||||
7AN504DFNd6SlG3urx+orMtKHETL0SS5ljIoVirQrsUsbrQElEndoXPZAoGAItzc
|
CBUr/l3oFWAtkmrZ9j+ds5QbkIrUeNxi1F2f36Be12z1cvIxs8Z1JK45AoGAKSS0
|
||||||
ym4CKOdUyEpMLT3KJ9OL7UrybpPZK/Fo0sMC/0+uHs3xJPE+dtNvsqa9Q9gG2MDv
|
IlaBgbgkE23wduGVj7FMGSxxQQlkpSdmtnXh2gwfcTG/nAoNBa9MW1/RwQSqiTVb
|
||||||
Qk0C2H5FHveMXr5bgM4GtPdvnRiwXq+UzKZKP4XgWxKIA4DgZvthX88QUvASOX8b
|
dYhQdllN9Nw77PkoTt/c1DeK9qepd3tEcDrBKztGhMdgls6RB9CSzVflY/jndDos
|
||||||
/mPxk4WM19evex+dRl1WkYzhvIkZBV/Vhz7OyO0CgYEAihR+kOzBBDcRDDitEHVc
|
QQ87VdlDAipRR2jx7w6m2bIvW0TIzkJWjSR1yt0CgYBw3PfOPIHSTnRMPYwobs9O
|
||||||
J/pztxlDrkszpvE6iqeYh8Ayc5eebiAevVJiYXtftPK6Jsl9G5vEreG9jxQSYiQQ
|
Sz59BNc5vULpoFlSbBbb8KR9sNxaE0d3n/XooeH9TZ7ZZf0TfaVqs5W4wyS+znfX
|
||||||
Qc1DEVfmewqURAr1fuiVPHCuw1rEj6ySVtyAIQmsNYshMgK41llE9AAXK0TtuH4O
|
jrl8haEKqSTjmrgQj+fdZ2CzZp4jV9YxIKiaEfgf/5JSxdM1suXVsP1b8vscSGsZ
|
||||||
3IOig0kNPjzk5LMRuveJSHs=
|
nDmyhZ+sEBocxD5cm9e0Tg==
|
||||||
-----END PRIVATE KEY-----
|
-----END PRIVATE KEY-----
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCLIZGLab2juncQ
|
||||||
|
yF3RQPXcJmuktVjdTGtNICS2CKcaToYKgYAmp6VPgTXHHB/fwNMsDnQb50szTgEl
|
||||||
|
LPzGT4YapgWIz9JOFyPsSoXBvraVRBxT20dFD2ARK3ilGaoDkItlu4vL9QTNbgXF
|
||||||
|
ucYmZkiD2GtLtNcqFNC75tm4IJ09NywzD88IA/8RHSZLy+2yeT6OI1O/3igs66xT
|
||||||
|
HQTdmqNnqxeckyxtwxUafayfk9W7xGhxHK8pFRUfvnOl/Qm56RMlQfP7FBjg4bHS
|
||||||
|
wL+FfDKBLItvcwO4i8lQpya0ZsqMTtxGT11nRDH5NZMT1w6kCKTyOECJUq2nZZ9b
|
||||||
|
VeeoRmdNAgMBAAECggEAE66rx5htpMYp9mSWCxaAwYRo6XvjJqlbK6W+s8bNFvZh
|
||||||
|
VYak8bL5OqJZkIGcy7tcVGj2CYWCuK8SD+eJmedhng77DPPzeSsiYJjZS8OWUk74
|
||||||
|
n+9PKYiniz5GWrri946g/cMWn4OZypMEO4jQrJl/LDG3WhYq8y/PKKnbhoYMoH5i
|
||||||
|
ebv8YLGzzPZm0Vd3JM+wvHkd/CoAvrEWXuhvgxEXyCfpNfStrRbf3Frsk7yRrTx7
|
||||||
|
KbSINMvZPemRhaBewr1mU6HWsbu2W5sm2hpe1KmABrUFvDq7ad4LcAuQc54zhdbC
|
||||||
|
WkR86+QSDXhCE+ZlR3TyjfGCcsBYzWnRNVmP+liNEQKBgQC/o82IFHv3AGnWKRC3
|
||||||
|
+rULXHCLqrAiVaqaW442/OjzjesFbouKzL8V8wKw+ag/4wECIm5r6ogtMIhCOVXc
|
||||||
|
bQEcGbvhIF5irh/8j0CpaEctiguJTOyy9tShYxJVzOYS44NsDAIyCdQIWYOzeNWP
|
||||||
|
l7aaRNs1MFf9eD4I5ATqbF5f3QKBgQC521at9CvQTDoS4MrWuV2XcVlAAjz05voh
|
||||||
|
8p7ergCEY5JLKU3k231zVVKaGns2ert6SyCNXD7xGC/Eo/9zEtG/xzoomNRfYixs
|
||||||
|
czcNx/yRX/GUOWqG1SDFck5wfbrZ4jTgmhe8B2RG7t8J848dUZRb7eJ0s6gXdCW9
|
||||||
|
xHprUdRmMQKBgD5XA7obp8PO357qFuUyagh7FqVobgmNQoUZ+WZL2V+5L9XBgyUw
|
||||||
|
u4xhU+PMIv49UwultbPnREsm+XxJeHPPBchlWqe+RtXk/MTEuO0i3dyjhmMwoeMJ
|
||||||
|
xluFheZhVAqa9hqEwYYTimT48Y3FZftjB+ShN4nS4xyyK8PqoOq9O+oFAoGAIbjF
|
||||||
|
YmyiInoiM1isFQevDpJXYkDFtJ3QFqbB4p9popu6aH7HDlYwzeNWSHWzk2/zYj4N
|
||||||
|
Wvi4xt/fkus6pzNr8UMBr2oDZocWjlrdS1fU4L+qwn0kcfBrsMeLqed2JqBffb0X
|
||||||
|
v1sL+77Noy2Y8vXhWEiyRQBv6El/q43htGU1h5ECgYBXnJBFtYZ5J1CnFYOVGXD1
|
||||||
|
Rqp0dYVEJdnwZZIVPyEPiMzpYZjUbuodwcMQyHlJzyPn2Pn60Lx+Ie/mNgkltVtl
|
||||||
|
si2Di6ZLn9ok120YXRl4hufWGsA8b+cwPo72aIoAFP+K8LMRjHKGMS+XnHkX1N9/
|
||||||
|
42G8+1ugr/men4HybDQV+w==
|
||||||
|
-----END PRIVATE KEY-----
|