mirror of https://github.com/istio/ztunnel.git
Compare commits
34 Commits
1.26.0-alp
...
master
Author | SHA1 | Date |
---|---|---|
|
85a94b6cc4 | |
|
3fa6335035 | |
|
dfa3b58bbc | |
|
c2d2534edb | |
|
84f0e52e64 | |
|
f030073f2f | |
|
3233bb1017 | |
|
7df8cf5d08 | |
|
7cddb868e9 | |
|
ac477c15a8 | |
|
5d0352588c | |
|
b86fd9989b | |
|
facd9a28a0 | |
|
224b2c34ac | |
|
c52e0bbdbf | |
|
442923910b | |
|
c616a29092 | |
|
d6d3b606ed | |
|
8d9a56a416 | |
|
615277a05a | |
|
3d1223af09 | |
|
79dfd10249 | |
|
9f6ae51005 | |
|
46acf76463 | |
|
58cf2a0f94 | |
|
9c01d1276d | |
|
d9ea32ce21 | |
|
c96dd032da | |
|
903cf079de | |
|
ad8bea43ef | |
|
6eaa32e8ac | |
|
3470f4bba2 | |
|
93a0973175 | |
|
b8dddb7301 |
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "istio build-tools",
|
||||
"image": "gcr.io/istio-testing/build-tools:master-fcd42145fc132acd1e8f607e9e7aca15058e9fb9",
|
||||
"image": "gcr.io/istio-testing/build-tools:master-8e6480403f5cf4c9a4cd9d65174d01850e632e1a",
|
||||
"privileged": true,
|
||||
"remoteEnv": {
|
||||
"USE_GKE_GCLOUD_AUTH_PLUGIN": "True",
|
||||
|
|
File diff suppressed because it is too large
Load Diff
37
Cargo.toml
37
Cargo.toml
|
@ -48,32 +48,33 @@ async-stream = "0.3"
|
|||
async-trait = "0.1"
|
||||
base64 = "0.22"
|
||||
byteorder = "1.5"
|
||||
bytes = { version = "1.9", features = ["serde"] }
|
||||
bytes = { version = "1.10", features = ["serde"] }
|
||||
chrono = "0.4"
|
||||
duration-str = "0.13"
|
||||
duration-str = "0.17"
|
||||
futures = "0.3"
|
||||
futures-core = "0.3"
|
||||
futures-util = "0.3"
|
||||
jemalloc_pprof = { version = "0.6.0", optional = true }
|
||||
tikv-jemallocator = { version = "0.6.0", features = ["profiling", "unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||
hashbrown = "0.15"
|
||||
hickory-client = "0.24"
|
||||
hickory-proto = "0.24"
|
||||
hickory-resolver = "0.24"
|
||||
hickory-server = { version = "0.24", features = [ "hickory-resolver" ] }
|
||||
hickory-client = "0.25"
|
||||
hickory-proto = "0.25"
|
||||
hickory-resolver = "0.25"
|
||||
hickory-server = { version = "0.25", features = [ "resolver" ]}
|
||||
http-body = { package = "http-body", version = "1" }
|
||||
http-body-util = "0.1"
|
||||
hyper = { version = "1.5", features = ["full"] }
|
||||
hyper = { version = "1.6", features = ["full"] }
|
||||
hyper-rustls = { version = "0.27.0", default-features = false, features = ["logging", "http1", "http2"] }
|
||||
hyper-util = { version = "0.1", features = ["full"] }
|
||||
ipnet = { version = "2.9", features = ["serde"] }
|
||||
itertools = "0.13"
|
||||
ipnet = { version = "2.11", features = ["serde"] }
|
||||
itertools = "0.14"
|
||||
keyed_priority_queue = "0.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
nix = { version = "0.29", features = ["socket", "sched", "uio", "fs", "ioctl", "user", "net", "mount"] }
|
||||
once_cell = "1.19"
|
||||
ppp = "2.2"
|
||||
nix = { version = "0.29", features = ["socket", "sched", "uio", "fs", "ioctl", "user", "net", "mount", "resource" ] }
|
||||
once_cell = "1.21"
|
||||
num_cpus = "1.16"
|
||||
ppp = "2.3"
|
||||
prometheus-client = { version = "0.23" }
|
||||
prometheus-parse = "0.2"
|
||||
prost = "0.13"
|
||||
|
@ -90,10 +91,10 @@ socket2 = { version = "0.5", features = ["all"] }
|
|||
textnonce = { version = "1.0" }
|
||||
thiserror = "2.0"
|
||||
tls-listener = { version = "0.11" }
|
||||
tokio = { version = "1.43", features = ["full", "test-util"] }
|
||||
tokio = { version = "1.44", features = ["full", "test-util"] }
|
||||
tokio-rustls = { version = "0.26", default-features = false }
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
tonic = { version = "0.12", default-features = false, features = ["prost", "codegen"] }
|
||||
tonic = { version = "0.13", default-features = false, features = ["prost", "codegen"] }
|
||||
tower = { version = "0.5", features = ["full"] }
|
||||
tracing = { version = "0.1"}
|
||||
tracing-subscriber = { version = "0.3", features = ["registry", "env-filter", "json"] }
|
||||
|
@ -105,20 +106,20 @@ pin-project-lite = "0.2"
|
|||
pingora-pool = "0.4"
|
||||
flurry = "0.5"
|
||||
h2 = "0.4"
|
||||
http = "1.2"
|
||||
http = "1.3"
|
||||
split-iter = "0.1"
|
||||
arcstr = { version = "1.2", features = ["serde"] }
|
||||
tracing-core = "0.1"
|
||||
tracing-appender = "0.2"
|
||||
tokio-util = { version = "0.7", features = ["io-util"] }
|
||||
educe = "0.6.0"
|
||||
educe = "0.6"
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
netns-rs = "0.1"
|
||||
pprof = { version = "0.14", features = ["protobuf", "protobuf-codec", "criterion"] }
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = { version = "0.12", default-features = false, features = ["prost"] }
|
||||
tonic-build = { version = "0.13", default-features = false, features = ["prost"] }
|
||||
prost-build = "0.13"
|
||||
anyhow = "1.0"
|
||||
rustc_version = "0.4"
|
||||
|
@ -154,7 +155,7 @@ test-case = "3.3"
|
|||
oid-registry = "0.8"
|
||||
rcgen = { version = "0.13", features = ["pem", "x509-parser"] }
|
||||
x509-parser = { version = "0.17", default-features = false, features = ["verify"] }
|
||||
ctor = "0.3"
|
||||
ctor = "0.4"
|
||||
|
||||
[lints.clippy]
|
||||
# This rule makes code more confusing
|
||||
|
|
|
@ -1 +1 @@
|
|||
a1d5c4198ab79a14c09c034f2d95245efa3e2bcb
|
||||
d46067e1a8ba3db4abe2635af5807f00ba1981e6
|
||||
|
|
|
@ -32,7 +32,7 @@ set -x
|
|||
####################################################################
|
||||
|
||||
# DEFAULT_KIND_IMAGE is used to set the Kubernetes version for KinD unless overridden in params to setup_kind_cluster(s)
|
||||
DEFAULT_KIND_IMAGE="gcr.io/istio-testing/kind-node:v1.32.0"
|
||||
DEFAULT_KIND_IMAGE="gcr.io/istio-testing/kind-node:v1.33.1"
|
||||
|
||||
# the default kind cluster should be ipv4 if not otherwise specified
|
||||
KIND_IP_FAMILY="${KIND_IP_FAMILY:-ipv4}"
|
||||
|
|
|
@ -75,7 +75,7 @@ fi
|
|||
TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io}
|
||||
PROJECT_ID=${PROJECT_ID:-istio-testing}
|
||||
if [[ "${IMAGE_VERSION:-}" == "" ]]; then
|
||||
IMAGE_VERSION=master-fcd42145fc132acd1e8f607e9e7aca15058e9fb9
|
||||
IMAGE_VERSION=master-8e6480403f5cf4c9a4cd9d65174d01850e632e1a
|
||||
fi
|
||||
if [[ "${IMAGE_NAME:-}" == "" ]]; then
|
||||
IMAGE_NAME=build-tools
|
||||
|
|
|
@ -147,6 +147,17 @@ dependencies = [
|
|||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-recursion"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.6"
|
||||
|
@ -264,7 +275,7 @@ dependencies = [
|
|||
"bitflags 2.8.0",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.10.5",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"log",
|
||||
|
@ -516,6 +527,12 @@ dependencies = [
|
|||
"itertools 0.10.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "critical-section"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b"
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.14"
|
||||
|
@ -628,9 +645,9 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
|
|||
|
||||
[[package]]
|
||||
name = "duration-str"
|
||||
version = "0.13.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99b55e40ba8fc1ef074c9f9031b4cb88bb1f30c946f80a9305df44973c0b9a2d"
|
||||
checksum = "9add086174f60bcbcfde7175e71dcfd99da24dfd12f611d0faf74f4f26e15a06"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"rust_decimal",
|
||||
|
@ -884,6 +901,19 @@ dependencies = [
|
|||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "generator"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"log",
|
||||
"rustversion",
|
||||
"windows",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.1.16"
|
||||
|
@ -1000,9 +1030,9 @@ checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
|
|||
|
||||
[[package]]
|
||||
name = "hickory-client"
|
||||
version = "0.24.4"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "156579a5cd8d1fc6f0df87cc21b6ee870db978a163a1ba484acd98a4eff5a6de"
|
||||
checksum = "1bbd1b5def7a1b77783366577e86cb51172196f689823b0f8107da9391ba183f"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"data-encoding",
|
||||
|
@ -1011,20 +1041,22 @@ dependencies = [
|
|||
"hickory-proto",
|
||||
"once_cell",
|
||||
"radix_trie",
|
||||
"rand 0.8.5",
|
||||
"thiserror 1.0.69",
|
||||
"rand 0.9.0",
|
||||
"thiserror 2.0.11",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hickory-proto"
|
||||
version = "0.24.4"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248"
|
||||
checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
"cfg-if",
|
||||
"critical-section",
|
||||
"data-encoding",
|
||||
"enum-as-inner",
|
||||
"futures-channel",
|
||||
|
@ -1033,9 +1065,10 @@ dependencies = [
|
|||
"idna",
|
||||
"ipnet",
|
||||
"once_cell",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.0",
|
||||
"ring",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.11",
|
||||
"tinyvec",
|
||||
"tokio",
|
||||
"tracing",
|
||||
|
@ -1044,41 +1077,44 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "hickory-resolver"
|
||||
version = "0.24.4"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e"
|
||||
checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"hickory-proto",
|
||||
"ipconfig",
|
||||
"lru-cache",
|
||||
"moka",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.0",
|
||||
"resolv-conf",
|
||||
"serde",
|
||||
"smallvec",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.11",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hickory-server"
|
||||
version = "0.24.4"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "090078aff4e305853f8ccfbc89e6a1eec8a189bcb842be46255a2b660dae9416"
|
||||
checksum = "716f516285473ce476dfc996bac9a3c9ef2fee4f380ebec5980b12216fe4f547"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"data-encoding",
|
||||
"enum-as-inner",
|
||||
"futures-util",
|
||||
"hickory-proto",
|
||||
"hickory-resolver",
|
||||
"ipnet",
|
||||
"prefix-trie",
|
||||
"serde",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.11",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
|
@ -1107,9 +1143,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
|
||||
checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
|
@ -1220,7 +1256,7 @@ dependencies = [
|
|||
"iana-time-zone-haiku",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
"windows-core",
|
||||
"windows-core 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1440,15 +1476,6 @@ dependencies = [
|
|||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.14.0"
|
||||
|
@ -1527,15 +1554,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linked-hash-map"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.15"
|
||||
|
@ -1564,6 +1585,19 @@ version = "0.4.26"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e"
|
||||
|
||||
[[package]]
|
||||
name = "loom"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"generator",
|
||||
"scoped-tls",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.13.0"
|
||||
|
@ -1573,15 +1607,6 @@ dependencies = [
|
|||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lru-cache"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c"
|
||||
dependencies = [
|
||||
"linked-hash-map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "match_cfg"
|
||||
version = "0.1.0"
|
||||
|
@ -1656,6 +1681,25 @@ dependencies = [
|
|||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"loom",
|
||||
"parking_lot",
|
||||
"portable-atomic",
|
||||
"rustc_version",
|
||||
"smallvec",
|
||||
"tagptr",
|
||||
"thiserror 1.0.69",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multimap"
|
||||
version = "0.10.0"
|
||||
|
@ -1802,9 +1846,13 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.20.3"
|
||||
version = "1.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e"
|
||||
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||
dependencies = [
|
||||
"critical-section",
|
||||
"portable-atomic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
|
@ -1967,6 +2015,12 @@ dependencies = [
|
|||
"plotters-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e"
|
||||
|
||||
[[package]]
|
||||
name = "powerfmt"
|
||||
version = "0.2.0"
|
||||
|
@ -2015,6 +2069,16 @@ dependencies = [
|
|||
"zerocopy 0.7.35",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prefix-trie"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb5f930995ba4986bd239ba8d8fded67cad82d1db329c4f316f312847cba16aa"
|
||||
dependencies = [
|
||||
"ipnet",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prettyplease"
|
||||
version = "0.2.29"
|
||||
|
@ -2521,6 +2585,12 @@ dependencies = [
|
|||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scoped-tls"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
|
@ -2725,6 +2795,12 @@ dependencies = [
|
|||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tagptr"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.17.1"
|
||||
|
@ -2951,9 +3027,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.12.3"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52"
|
||||
checksum = "85839f0b32fd242bb3209262371d07feda6d780d16ee9d2bc88581b89da1549b"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
|
@ -2972,9 +3048,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tonic-build"
|
||||
version = "0.12.3"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11"
|
||||
checksum = "d85f0383fadd15609306383a90e85eaed44169f931a5d2be1b42c76ceff1825e"
|
||||
dependencies = [
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
|
@ -3155,6 +3231,9 @@ name = "uuid"
|
|||
version = "1.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93d59ca99a559661b96bf898d8fce28ed87935fd2bea9f05983c1464dd6c71b1"
|
||||
dependencies = [
|
||||
"getrandom 0.3.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "valuable"
|
||||
|
@ -3325,6 +3404,16 @@ version = "0.4.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.58.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6"
|
||||
dependencies = [
|
||||
"windows-core 0.58.0",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.52.0"
|
||||
|
@ -3334,6 +3423,60 @@ dependencies = [
|
|||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.58.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99"
|
||||
dependencies = [
|
||||
"windows-implement",
|
||||
"windows-interface",
|
||||
"windows-result",
|
||||
"windows-strings",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-implement"
|
||||
version = "0.58.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-interface"
|
||||
version = "0.58.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e"
|
||||
dependencies = [
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-strings"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10"
|
||||
dependencies = [
|
||||
"windows-result",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
|
@ -3484,9 +3627,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
|||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.6.8"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d"
|
||||
checksum = "63d3fcd9bba44b03821e7d699eeee959f3126dcc4aa8e4ae18ec617c2a5cea10"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
@ -3694,12 +3837,13 @@ dependencies = [
|
|||
"hyper-rustls",
|
||||
"hyper-util",
|
||||
"ipnet",
|
||||
"itertools 0.13.0",
|
||||
"itertools 0.14.0",
|
||||
"keyed_priority_queue",
|
||||
"libc",
|
||||
"log",
|
||||
"netns-rs",
|
||||
"nix 0.29.0",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
"pingora-pool",
|
||||
|
|
22
src/app.rs
22
src/app.rs
|
@ -136,6 +136,25 @@ pub async fn build_with_cert(
|
|||
|
||||
if config.proxy_mode == config::ProxyMode::Shared {
|
||||
tracing::info!("shared proxy mode - in-pod mode enabled");
|
||||
|
||||
// Create ztunnel inbound listener only if its specific identity and workload info are configured.
|
||||
if let Some(inbound) = proxy_gen.create_ztunnel_self_proxy_listener().await? {
|
||||
// Run the inbound listener in the data plane worker pool
|
||||
let mut xds_rx_for_inbound = xds_rx.clone();
|
||||
data_plane_pool.send(DataPlaneTask {
|
||||
block_shutdown: true,
|
||||
fut: Box::pin(async move {
|
||||
tracing::info!("Starting ztunnel inbound listener task");
|
||||
let _ = xds_rx_for_inbound.changed().await;
|
||||
tokio::task::spawn(async move {
|
||||
inbound.run().in_current_span().await;
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}),
|
||||
})?;
|
||||
}
|
||||
|
||||
let run_future = init_inpod_proxy_mgr(
|
||||
&mut registry,
|
||||
&mut admin_server,
|
||||
|
@ -247,7 +266,8 @@ fn new_data_plane_pool(num_worker_threads: usize) -> mpsc::Sender<DataPlaneTask>
|
|||
.thread_name_fn(|| {
|
||||
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||
format!("ztunnel-proxy-{id}")
|
||||
// Thread name can only be 16 chars so keep it short
|
||||
format!("ztunnel-{id}")
|
||||
})
|
||||
.enable_all()
|
||||
.build()
|
||||
|
|
160
src/config.rs
160
src/config.rs
|
@ -54,10 +54,12 @@ const LOCAL_XDS_PATH: &str = "LOCAL_XDS_PATH";
|
|||
const LOCAL_XDS: &str = "LOCAL_XDS";
|
||||
const XDS_ON_DEMAND: &str = "XDS_ON_DEMAND";
|
||||
const XDS_ADDRESS: &str = "XDS_ADDRESS";
|
||||
const PREFERED_SERVICE_NAMESPACE: &str = "PREFERED_SERVICE_NAMESPACE";
|
||||
const CA_ADDRESS: &str = "CA_ADDRESS";
|
||||
const SECRET_TTL: &str = "SECRET_TTL";
|
||||
const FAKE_CA: &str = "FAKE_CA";
|
||||
const ZTUNNEL_WORKER_THREADS: &str = "ZTUNNEL_WORKER_THREADS";
|
||||
const ZTUNNEL_CPU_LIMIT: &str = "ZTUNNEL_CPU_LIMIT";
|
||||
const POOL_MAX_STREAMS_PER_CONNECTION: &str = "POOL_MAX_STREAMS_PER_CONNECTION";
|
||||
const POOL_UNUSED_RELEASE_TIMEOUT: &str = "POOL_UNUSED_RELEASE_TIMEOUT";
|
||||
// CONNECTION_TERMINATION_DEADLINE configures an explicit deadline
|
||||
|
@ -70,6 +72,10 @@ const ENABLE_ORIG_SRC: &str = "ENABLE_ORIG_SRC";
|
|||
const PROXY_CONFIG: &str = "PROXY_CONFIG";
|
||||
const IPV6_ENABLED: &str = "IPV6_ENABLED";
|
||||
|
||||
const HTTP2_STREAM_WINDOW_SIZE: &str = "HTTP2_STREAM_WINDOW_SIZE";
|
||||
const HTTP2_CONNECTION_WINDOW_SIZE: &str = "HTTP2_CONNECTION_WINDOW_SIZE";
|
||||
const HTTP2_FRAME_SIZE: &str = "HTTP2_FRAME_SIZE";
|
||||
|
||||
const UNSTABLE_ENABLE_SOCKS5: &str = "UNSTABLE_ENABLE_SOCKS5";
|
||||
|
||||
const DEFAULT_WORKER_THREADS: u16 = 2;
|
||||
|
@ -237,6 +243,12 @@ pub struct Config {
|
|||
// Allow custom alternative XDS hostname verification
|
||||
pub alt_xds_hostname: Option<String>,
|
||||
|
||||
/// Prefered service namespace to use for service resolution.
|
||||
/// If unset, local namespaces is preferred and other namespaces have equal priority.
|
||||
/// If set, the local namespace is preferred, then the defined prefered_service_namespace
|
||||
/// and finally other namespaces at an equal priority.
|
||||
pub prefered_service_namespace: Option<String>,
|
||||
|
||||
/// TTL for CSR requests
|
||||
pub secret_ttl: Duration,
|
||||
/// YAML config for local XDS workloads
|
||||
|
@ -293,6 +305,10 @@ pub struct Config {
|
|||
|
||||
// If true, when AppTunnel is set for
|
||||
pub localhost_app_tunnel: bool,
|
||||
|
||||
pub ztunnel_identity: Option<identity::Identity>,
|
||||
|
||||
pub ztunnel_workload: Option<state::WorkloadInfo>,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, Clone, Copy, Debug)]
|
||||
|
@ -399,6 +415,60 @@ fn parse_headers(prefix: &str) -> Result<MetadataVector, Error> {
|
|||
Ok(metadata)
|
||||
}
|
||||
|
||||
fn get_cpu_count() -> Result<usize, Error> {
|
||||
// Allow overriding the count with an env var. This can be used to pass the CPU limit on Kubernetes
|
||||
// from the downward API.
|
||||
// Note the downward API will return the total thread count ("logical cores") if no limit is set,
|
||||
// so it is really the same as num_cpus.
|
||||
// We allow num_cpus for cases its not set (not on Kubernetes, etc).
|
||||
match parse::<usize>(ZTUNNEL_CPU_LIMIT)? {
|
||||
Some(limit) => Ok(limit),
|
||||
// This is *logical cores*
|
||||
None => Ok(num_cpus::get()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse worker threads configuration, supporting both fixed numbers and percentages
|
||||
fn parse_worker_threads(default: usize) -> Result<usize, Error> {
|
||||
match parse::<String>(ZTUNNEL_WORKER_THREADS)? {
|
||||
Some(value) => {
|
||||
if let Some(percent_str) = value.strip_suffix('%') {
|
||||
// Parse as percentage
|
||||
let percent: f64 = percent_str.parse().map_err(|e| {
|
||||
Error::EnvVar(
|
||||
ZTUNNEL_WORKER_THREADS.to_string(),
|
||||
value.clone(),
|
||||
format!("invalid percentage: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
if percent <= 0.0 || percent > 100.0 {
|
||||
return Err(Error::EnvVar(
|
||||
ZTUNNEL_WORKER_THREADS.to_string(),
|
||||
value,
|
||||
"percentage must be between 0 and 100".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let cpu_count = get_cpu_count()?;
|
||||
// Round up, minimum of 1
|
||||
let threads = ((cpu_count as f64 * percent / 100.0).ceil() as usize).max(1);
|
||||
Ok(threads)
|
||||
} else {
|
||||
// Parse as fixed number
|
||||
value.parse::<usize>().map_err(|e| {
|
||||
Error::EnvVar(
|
||||
ZTUNNEL_WORKER_THREADS.to_string(),
|
||||
value,
|
||||
format!("invalid number: {}", e),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
None => Ok(default),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_config() -> Result<Config, Error> {
|
||||
let pc = parse_proxy_config()?;
|
||||
construct_config(pc)
|
||||
|
@ -438,6 +508,14 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
|||
.or_else(|| Some(default_istiod_address.clone())),
|
||||
))?;
|
||||
|
||||
let prefered_service_namespace = match parse::<String>(PREFERED_SERVICE_NAMESPACE) {
|
||||
Ok(ns) => ns,
|
||||
Err(e) => {
|
||||
warn!(err=?e, "failed to parse {PREFERED_SERVICE_NAMESPACE}, continuing with default behavior");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let istio_meta_cluster_id = ISTIO_META_PREFIX.to_owned() + CLUSTER_ID;
|
||||
let cluster_id: String = match parse::<String>(&istio_meta_cluster_id)? {
|
||||
Some(id) => id,
|
||||
|
@ -600,6 +678,29 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
|||
|
||||
let socket_config_defaults = SocketConfig::default();
|
||||
|
||||
// Read ztunnel identity and workload info from Downward API if available
|
||||
let (ztunnel_identity, ztunnel_workload) = match (
|
||||
parse::<String>("POD_NAMESPACE")?,
|
||||
parse::<String>("SERVICE_ACCOUNT")?,
|
||||
parse::<String>("POD_NAME")?,
|
||||
) {
|
||||
(Some(namespace), Some(service_account), Some(pod_name)) => {
|
||||
let trust_domain = std::env::var("TRUST_DOMAIN")
|
||||
.unwrap_or_else(|_| crate::identity::manager::DEFAULT_TRUST_DOMAIN.to_string());
|
||||
|
||||
let identity = identity::Identity::from_parts(
|
||||
trust_domain.into(),
|
||||
namespace.clone().into(),
|
||||
service_account.clone().into(),
|
||||
);
|
||||
|
||||
let workload = state::WorkloadInfo::new(pod_name, namespace, service_account);
|
||||
|
||||
(Some(identity), Some(workload))
|
||||
}
|
||||
_ => (None, None),
|
||||
};
|
||||
|
||||
validate_config(Config {
|
||||
proxy: parse_default(ENABLE_PROXY, true)?,
|
||||
// Enable by default; running the server is not an issue, clients still need to opt-in to sending their
|
||||
|
@ -619,9 +720,15 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
|||
DEFAULT_POOL_UNUSED_RELEASE_TIMEOUT,
|
||||
)?,
|
||||
|
||||
window_size: 4 * 1024 * 1024,
|
||||
connection_window_size: 4 * 1024 * 1024,
|
||||
frame_size: 1024 * 1024,
|
||||
// window size: per-stream limit
|
||||
window_size: parse_default(HTTP2_STREAM_WINDOW_SIZE, 4 * 1024 * 1024)?,
|
||||
// connection window size: per connection.
|
||||
// Setting this to the same value as window_size can introduce deadlocks in some applications
|
||||
// where clients do not read data on streamA until they receive data on streamB.
|
||||
// If streamA consumes the entire connection window, we enter a deadlock.
|
||||
// A 4x limit should be appropriate without introducing too much potential buffering.
|
||||
connection_window_size: parse_default(HTTP2_CONNECTION_WINDOW_SIZE, 16 * 1024 * 1024)?,
|
||||
frame_size: parse_default(HTTP2_FRAME_SIZE, 1024 * 1024)?,
|
||||
|
||||
self_termination_deadline: match parse_duration(CONNECTION_TERMINATION_DEADLINE)? {
|
||||
Some(period) => period,
|
||||
|
@ -675,6 +782,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
|||
|
||||
xds_address,
|
||||
xds_root_cert,
|
||||
prefered_service_namespace,
|
||||
ca_address,
|
||||
ca_root_cert,
|
||||
alt_xds_hostname: parse(ALT_XDS_HOSTNAME)?,
|
||||
|
@ -688,8 +796,7 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
|||
fake_ca,
|
||||
auth,
|
||||
|
||||
num_worker_threads: parse_default(
|
||||
ZTUNNEL_WORKER_THREADS,
|
||||
num_worker_threads: parse_worker_threads(
|
||||
pc.concurrency.unwrap_or(DEFAULT_WORKER_THREADS).into(),
|
||||
)?,
|
||||
|
||||
|
@ -753,6 +860,8 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
|
|||
ca_headers: parse_headers(ISTIO_CA_HEADER_PREFIX)?,
|
||||
|
||||
localhost_app_tunnel: parse_default(LOCALHOST_APP_TUNNEL, true)?,
|
||||
ztunnel_identity,
|
||||
ztunnel_workload,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1055,4 +1164,45 @@ pub mod tests {
|
|||
assert!(metadata.vec.contains(&(key, value)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_worker_threads() {
|
||||
unsafe {
|
||||
// Test fixed number
|
||||
env::set_var(ZTUNNEL_WORKER_THREADS, "4");
|
||||
assert_eq!(parse_worker_threads(2).unwrap(), 4);
|
||||
|
||||
// Test percentage with CPU limit
|
||||
env::set_var(ZTUNNEL_CPU_LIMIT, "8");
|
||||
env::set_var(ZTUNNEL_WORKER_THREADS, "50%");
|
||||
assert_eq!(parse_worker_threads(2).unwrap(), 4); // 50% of 8 CPUs = 4 threads
|
||||
|
||||
// Test percentage with CPU limit
|
||||
env::set_var(ZTUNNEL_CPU_LIMIT, "16");
|
||||
env::set_var(ZTUNNEL_WORKER_THREADS, "30%");
|
||||
assert_eq!(parse_worker_threads(2).unwrap(), 5); // Round up to 5
|
||||
|
||||
// Test low percentage that rounds up to 1
|
||||
env::set_var(ZTUNNEL_CPU_LIMIT, "4");
|
||||
env::set_var(ZTUNNEL_WORKER_THREADS, "10%");
|
||||
assert_eq!(parse_worker_threads(2).unwrap(), 1); // 10% of 4 CPUs = 0.4, rounds up to 1
|
||||
|
||||
// Test default when no env var is set
|
||||
env::remove_var(ZTUNNEL_WORKER_THREADS);
|
||||
assert_eq!(parse_worker_threads(2).unwrap(), 2);
|
||||
|
||||
// Test without CPU limit (should use system CPU count)
|
||||
env::remove_var(ZTUNNEL_CPU_LIMIT);
|
||||
let system_cpus = num_cpus::get();
|
||||
assert_eq!(get_cpu_count().unwrap(), system_cpus);
|
||||
|
||||
// Test with CPU limit
|
||||
env::set_var(ZTUNNEL_CPU_LIMIT, "12");
|
||||
assert_eq!(get_cpu_count().unwrap(), 12);
|
||||
|
||||
// Clean up
|
||||
env::remove_var(ZTUNNEL_WORKER_THREADS);
|
||||
env::remove_var(ZTUNNEL_CPU_LIMIT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,21 +14,23 @@
|
|||
|
||||
use crate::dns::resolver::{Answer, Resolver};
|
||||
use crate::proxy::SocketFactory;
|
||||
use hickory_proto::iocompat::AsyncIoTokioAsStd;
|
||||
use hickory_proto::runtime::RuntimeProvider;
|
||||
use hickory_proto::runtime::iocompat::AsyncIoTokioAsStd;
|
||||
use hickory_resolver::ResolveError;
|
||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
use hickory_resolver::error::ResolveError;
|
||||
use hickory_resolver::name_server;
|
||||
use hickory_resolver::name_server::{GenericConnector, RuntimeProvider};
|
||||
use hickory_resolver::name_server::GenericConnector;
|
||||
use hickory_server::authority::LookupError;
|
||||
use hickory_server::server::Request;
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::{TcpStream, UdpSocket};
|
||||
|
||||
/// A forwarding [Resolver] that delegates requests to an upstream [TokioAsyncResolver].
|
||||
pub struct Forwarder(hickory_resolver::AsyncResolver<GenericConnector<RuntimeProviderAdaptor>>);
|
||||
pub struct Forwarder(hickory_resolver::Resolver<GenericConnector<RuntimeProviderAdaptor>>);
|
||||
|
||||
impl Forwarder {
|
||||
/// Creates a new [Forwarder] from the provided resolver configuration.
|
||||
|
@ -41,20 +43,21 @@ impl Forwarder {
|
|||
socket_factory,
|
||||
handle: Default::default(),
|
||||
});
|
||||
let resolver = hickory_resolver::AsyncResolver::new(cfg, opts, provider);
|
||||
Ok(Self(resolver))
|
||||
let mut resolver = hickory_resolver::Resolver::builder_with_config(cfg, provider);
|
||||
*resolver.options_mut() = opts;
|
||||
Ok(Self(resolver.build()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct RuntimeProviderAdaptor {
|
||||
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
||||
handle: name_server::TokioHandle,
|
||||
handle: hickory_proto::runtime::TokioHandle,
|
||||
}
|
||||
|
||||
const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
impl RuntimeProvider for RuntimeProviderAdaptor {
|
||||
type Handle = name_server::TokioHandle;
|
||||
type Timer = hickory_proto::TokioTime;
|
||||
type Handle = hickory_proto::runtime::TokioHandle;
|
||||
type Timer = hickory_proto::runtime::TokioTime;
|
||||
type Udp = UdpSocket;
|
||||
type Tcp = AsyncIoTokioAsStd<TcpStream>;
|
||||
|
||||
|
@ -65,6 +68,8 @@ impl RuntimeProvider for RuntimeProviderAdaptor {
|
|||
fn connect_tcp(
|
||||
&self,
|
||||
server_addr: SocketAddr,
|
||||
bind_addr: Option<SocketAddr>,
|
||||
wait_for: Option<Duration>,
|
||||
) -> Pin<Box<dyn Send + Future<Output = std::io::Result<Self::Tcp>>>> {
|
||||
let sf = self.socket_factory.clone();
|
||||
Box::pin(async move {
|
||||
|
@ -73,7 +78,20 @@ impl RuntimeProvider for RuntimeProviderAdaptor {
|
|||
} else {
|
||||
sf.new_tcp_v6()
|
||||
}?;
|
||||
socket.connect(server_addr).await.map(AsyncIoTokioAsStd)
|
||||
|
||||
if let Some(bind_addr) = bind_addr {
|
||||
socket.bind(bind_addr)?;
|
||||
}
|
||||
let future = socket.connect(server_addr);
|
||||
let wait_for = wait_for.unwrap_or(CONNECT_TIMEOUT);
|
||||
match tokio::time::timeout(wait_for, future).await {
|
||||
Ok(Ok(socket)) => Ok(AsyncIoTokioAsStd(socket)),
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => Err(io::Error::new(
|
||||
io::ErrorKind::TimedOut,
|
||||
format!("connection to {server_addr:?} timed out after {wait_for:?}"),
|
||||
)),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -91,8 +109,9 @@ impl RuntimeProvider for RuntimeProviderAdaptor {
|
|||
impl Resolver for Forwarder {
|
||||
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
||||
// TODO(nmittler): Should we allow requests to the upstream resolver to be authoritative?
|
||||
let name = request.query().name();
|
||||
let rr_type = request.query().query_type();
|
||||
let query = request.request_info()?.query;
|
||||
let name = query.name();
|
||||
let rr_type = query.query_type();
|
||||
self.0
|
||||
.lookup(name, rr_type)
|
||||
.await
|
||||
|
@ -107,10 +126,11 @@ mod tests {
|
|||
use crate::dns::resolver::Resolver;
|
||||
use crate::test_helpers::dns::{a_request, ip, n, run_dns, socket_addr};
|
||||
use crate::test_helpers::helpers::initialize_telemetry;
|
||||
use hickory_proto::ProtoErrorKind;
|
||||
use hickory_proto::op::ResponseCode;
|
||||
use hickory_proto::rr::RecordType;
|
||||
use hickory_resolver::error::ResolveErrorKind;
|
||||
use hickory_server::server::Protocol;
|
||||
use hickory_proto::xfer::Protocol;
|
||||
use hickory_resolver::ResolveErrorKind;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -160,12 +180,13 @@ mod tests {
|
|||
.expect("expected resolve error");
|
||||
|
||||
// Expect NoRecordsFound with a NXDomain response code.
|
||||
let kind = err.kind();
|
||||
match kind {
|
||||
ResolveErrorKind::NoRecordsFound { response_code, .. } => {
|
||||
if let ResolveErrorKind::Proto(proto) = err.kind() {
|
||||
if let ProtoErrorKind::NoRecordsFound { response_code, .. } = proto.kind() {
|
||||
// Respond with the error code.
|
||||
assert_eq!(&ResponseCode::NXDomain, response_code);
|
||||
return;
|
||||
}
|
||||
_ => panic!("unexpected error kind {kind}"),
|
||||
}
|
||||
panic!("unexpected error kind {}", err.kind())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,9 +13,10 @@
|
|||
// limitations under the License.
|
||||
|
||||
use crate::dns::resolver::{Answer, Resolver};
|
||||
use hickory_proto::ProtoErrorKind;
|
||||
use hickory_proto::op::{Edns, Header, MessageType, OpCode, ResponseCode};
|
||||
use hickory_proto::rr::Record;
|
||||
use hickory_resolver::error::ResolveErrorKind;
|
||||
use hickory_resolver::ResolveErrorKind;
|
||||
use hickory_server::authority::{LookupError, MessageResponse, MessageResponseBuilder};
|
||||
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
||||
use std::sync::Arc;
|
||||
|
@ -117,16 +118,14 @@ async fn send_lookup_error<R: ResponseHandler>(
|
|||
}
|
||||
LookupError::ResponseCode(code) => send_error(request, response_handle, code).await,
|
||||
LookupError::ResolveError(e) => {
|
||||
match e.kind() {
|
||||
ResolveErrorKind::NoRecordsFound { response_code, .. } => {
|
||||
if let ResolveErrorKind::Proto(proto) = e.kind() {
|
||||
if let ProtoErrorKind::NoRecordsFound { response_code, .. } = proto.kind() {
|
||||
// Respond with the error code.
|
||||
send_error(request, response_handle, *response_code).await
|
||||
}
|
||||
_ => {
|
||||
// TODO(nmittler): log?
|
||||
send_error(request, response_handle, ResponseCode::ServFail).await
|
||||
return send_error(request, response_handle, *response_code).await;
|
||||
}
|
||||
}
|
||||
// TODO(nmittler): log?
|
||||
send_error(request, response_handle, ResponseCode::ServFail).await
|
||||
}
|
||||
LookupError::Io(_) => {
|
||||
// TODO(nmittler): log?
|
||||
|
@ -189,7 +188,7 @@ fn response_edns(request: &Request) -> Option<Edns> {
|
|||
let mut resp_edns: Edns = Edns::new();
|
||||
resp_edns.set_max_payload(req_edns.max_payload().max(512));
|
||||
resp_edns.set_version(req_edns.version());
|
||||
resp_edns.set_dnssec_ok(req_edns.dnssec_ok());
|
||||
resp_edns.set_dnssec_ok(req_edns.flags().dnssec_ok);
|
||||
|
||||
Some(resp_edns)
|
||||
} else {
|
||||
|
@ -207,11 +206,10 @@ mod tests {
|
|||
use hickory_proto::op::{Message, MessageType, OpCode, ResponseCode};
|
||||
use hickory_proto::rr::{Name, Record, RecordType};
|
||||
use hickory_proto::serialize::binary::BinEncoder;
|
||||
use hickory_proto::xfer::Protocol;
|
||||
use hickory_server::authority::LookupError;
|
||||
use hickory_server::authority::MessageResponse;
|
||||
use hickory_server::server::{
|
||||
Protocol, Request, RequestHandler, ResponseHandler, ResponseInfo,
|
||||
};
|
||||
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
|
@ -262,7 +260,7 @@ mod tests {
|
|||
#[async_trait::async_trait]
|
||||
impl Resolver for FakeResolver {
|
||||
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
||||
let name = Name::from(request.query().name().clone());
|
||||
let name = Name::from(request.request_info()?.query.name().clone());
|
||||
let records = vec![a(name, Ipv4Addr::new(127, 0, 0, 1))];
|
||||
Ok(Answer::new(records, false))
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ use std::time::Duration;
|
|||
use crate::metrics::{DefaultedUnknown, DeferRecorder, Recorder};
|
||||
|
||||
use crate::state::workload::Workload;
|
||||
use crate::strng;
|
||||
use crate::strng::RichStrng;
|
||||
|
||||
pub struct Metrics {
|
||||
|
@ -78,7 +79,7 @@ impl DeferRecorder for Metrics {}
|
|||
|
||||
#[derive(Clone, Hash, Debug, PartialEq, Eq, EncodeLabelSet)]
|
||||
pub struct DnsLabels {
|
||||
request_query_type: RichStrng,
|
||||
request_query_type: DefaultedUnknown<RichStrng>,
|
||||
request_protocol: RichStrng,
|
||||
|
||||
// Source workload.
|
||||
|
@ -89,7 +90,12 @@ pub struct DnsLabels {
|
|||
impl DnsLabels {
|
||||
pub fn new(r: &Request) -> Self {
|
||||
Self {
|
||||
request_query_type: r.query().query_type().to_string().to_lowercase().into(),
|
||||
request_query_type: r
|
||||
.request_info()
|
||||
.map(|q| q.query.query_type().to_string().to_lowercase())
|
||||
.ok()
|
||||
.map(|s| RichStrng::from(strng::new(s)))
|
||||
.into(),
|
||||
request_protocol: r.protocol().to_string().to_lowercase().into(),
|
||||
source_canonical_service: Default::default(),
|
||||
source_canonical_revision: Default::default(),
|
||||
|
|
|
@ -39,7 +39,9 @@ pub fn trim_domain(name: &Name, domain: &Name) -> Option<Name> {
|
|||
// Create a Name from the labels leading up to the domain.
|
||||
let iter = name.iter();
|
||||
let num_labels = iter.len() - domain.num_labels() as usize;
|
||||
Some(Name::from_labels(iter.take(num_labels)).unwrap())
|
||||
let mut name = Name::from_labels(iter.take(num_labels)).unwrap();
|
||||
name.set_fqdn(false);
|
||||
Some(name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use hickory_proto::error::ProtoErrorKind;
|
||||
use hickory_proto::ProtoErrorKind;
|
||||
use hickory_proto::op::ResponseCode;
|
||||
use hickory_proto::rr::rdata::{A, AAAA, CNAME};
|
||||
use hickory_proto::rr::{Name, RData, Record, RecordType};
|
||||
|
@ -47,7 +47,7 @@ use crate::drain::{DrainMode, DrainWatcher};
|
|||
use crate::metrics::{DeferRecorder, IncrementRecorder, Recorder};
|
||||
use crate::proxy::Error;
|
||||
use crate::state::DemandProxyState;
|
||||
use crate::state::service::IpFamily;
|
||||
use crate::state::service::{IpFamily, Service};
|
||||
use crate::state::workload::Workload;
|
||||
use crate::state::workload::address::Address;
|
||||
use crate::{config, dns};
|
||||
|
@ -85,6 +85,7 @@ impl Server {
|
|||
drain: DrainWatcher,
|
||||
socket_factory: &(dyn SocketFactory + Send + Sync),
|
||||
local_workload_information: Arc<LocalWorkloadFetcher>,
|
||||
prefered_service_namespace: Option<String>,
|
||||
) -> Result<Self, Error> {
|
||||
// if the address we got from config is supposed to be v6-enabled,
|
||||
// actually check if the local pod context our socketfactory operates in supports V6.
|
||||
|
@ -102,6 +103,7 @@ impl Server {
|
|||
forwarder,
|
||||
metrics,
|
||||
local_workload_information,
|
||||
prefered_service_namespace,
|
||||
);
|
||||
let store = Arc::new(store);
|
||||
let handler = dns::handler::Handler::new(store.clone());
|
||||
|
@ -191,6 +193,7 @@ struct Store {
|
|||
svc_domain: Name,
|
||||
metrics: Arc<Metrics>,
|
||||
local_workload: Arc<LocalWorkloadFetcher>,
|
||||
prefered_service_namespace: Option<String>,
|
||||
}
|
||||
|
||||
impl Store {
|
||||
|
@ -200,6 +203,7 @@ impl Store {
|
|||
forwarder: Arc<dyn Forwarder>,
|
||||
metrics: Arc<Metrics>,
|
||||
local_workload_information: Arc<LocalWorkloadFetcher>,
|
||||
prefered_service_namespace: Option<String>,
|
||||
) -> Self {
|
||||
let domain = as_name(domain);
|
||||
let svc_domain = append_name(as_name("svc"), &domain);
|
||||
|
@ -211,6 +215,7 @@ impl Store {
|
|||
svc_domain,
|
||||
metrics,
|
||||
local_workload: local_workload_information,
|
||||
prefered_service_namespace,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -359,7 +364,7 @@ impl Store {
|
|||
let search_name_str = search_name.to_string().into();
|
||||
search_name.set_fqdn(true);
|
||||
|
||||
let service = state
|
||||
let services: Vec<Arc<Service>> = state
|
||||
.services
|
||||
.get_by_host(&search_name_str)
|
||||
.iter()
|
||||
|
@ -382,13 +387,30 @@ impl Store {
|
|||
})
|
||||
// Get the service matching the client namespace. If no match exists, just
|
||||
// return the first service.
|
||||
.find_or_first(|service| service.namespace == client.namespace)
|
||||
.cloned();
|
||||
// .find_or_first(|service| service.namespace == client.namespace)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
// TODO: ideally we'd sort these by creation time so that the oldest would be used if there are no namespace matches
|
||||
// presently service doesn't have creation time in WDS, but we could add it
|
||||
// TODO: if the local namespace doesn't define a service, kube service should be prioritized over se
|
||||
let service = match services
|
||||
.iter()
|
||||
.find(|service| service.namespace == client.namespace)
|
||||
{
|
||||
Some(service) => Some(service),
|
||||
None => match self.prefered_service_namespace.as_ref() {
|
||||
Some(prefered_namespace) => services.iter().find_or_first(|service| {
|
||||
service.namespace == prefered_namespace.as_str()
|
||||
}),
|
||||
None => services.first(),
|
||||
},
|
||||
};
|
||||
|
||||
// First, lookup the host as a service.
|
||||
if let Some(service) = service {
|
||||
return Some(ServerMatch {
|
||||
server: Address::Service(service),
|
||||
server: Address::Service(service.clone()),
|
||||
name: search_name,
|
||||
alias,
|
||||
});
|
||||
|
@ -509,6 +531,7 @@ impl Store {
|
|||
|
||||
fn access_log(request: &Request, source: Option<&Workload>, result: &str, ep_count: usize) {
|
||||
let src = source.as_ref();
|
||||
let query = request.request_info().ok().map(|info| info.query);
|
||||
event!(
|
||||
target: "dns",
|
||||
parent: None,
|
||||
|
@ -517,8 +540,8 @@ fn access_log(request: &Request, source: Option<&Workload>, result: &str, ep_cou
|
|||
src.workload = src.map(|w| w.name.as_str()).unwrap_or("unknown"),
|
||||
src.namespace = src.map(|w| w.namespace.as_str()).unwrap_or("unknown"),
|
||||
|
||||
query = request.query().query_type().to_string(),
|
||||
domain = request.query().name().to_string(),
|
||||
query = query.map(|q| q.query_type().to_string()),
|
||||
domain = query.map(|q| q.name().to_string()),
|
||||
|
||||
result = result,
|
||||
endpoints = ep_count,
|
||||
|
@ -532,8 +555,8 @@ impl Resolver for Store {
|
|||
skip_all,
|
||||
fields(
|
||||
src=%request.src(),
|
||||
query=%request.query().query_type(),
|
||||
name=%request.query().name(),
|
||||
query=%request.request_info()?.query.query_type(),
|
||||
name=%request.request_info()?.query.name(),
|
||||
),
|
||||
)]
|
||||
async fn lookup(&self, request: &Request) -> Result<Answer, LookupError> {
|
||||
|
@ -546,8 +569,9 @@ impl Resolver for Store {
|
|||
LookupError::ResponseCode(ResponseCode::ServFail)
|
||||
})?;
|
||||
|
||||
let query = request.request_info()?.query;
|
||||
// Make sure the request is for IP records. Anything else, we forward.
|
||||
let record_type = request.query().query_type();
|
||||
let record_type = query.query_type();
|
||||
if !is_record_type_supported(record_type) {
|
||||
debug!("unknown record type");
|
||||
let result = self.forward(Some(&client), request).await;
|
||||
|
@ -562,7 +586,12 @@ impl Resolver for Store {
|
|||
}
|
||||
Err(e) => {
|
||||
// Forwarding failed. Just return the error.
|
||||
access_log(request, Some(&client), "forwarding failed", 0);
|
||||
access_log(
|
||||
request,
|
||||
Some(&client),
|
||||
&format!("forwarding failed ({e})"),
|
||||
0,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
@ -570,7 +599,7 @@ impl Resolver for Store {
|
|||
}
|
||||
|
||||
// Find the service for the requested host.
|
||||
let requested_name = Name::from(request.query().name().clone());
|
||||
let requested_name = Name::from(query.name().clone());
|
||||
trace!("incoming request {requested_name:?}");
|
||||
let Some(service_match) = self.find_server(&client, &requested_name) else {
|
||||
trace!("unknown host, forwarding");
|
||||
|
@ -587,7 +616,12 @@ impl Resolver for Store {
|
|||
}
|
||||
Err(e) => {
|
||||
// Forwarding failed. Just return the error.
|
||||
access_log(request, Some(&client), "forwarding failed", 0);
|
||||
access_log(
|
||||
request,
|
||||
Some(&client),
|
||||
&format!("forwarding failed ({e})"),
|
||||
0,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
@ -919,7 +953,7 @@ mod tests {
|
|||
use std::net::{SocketAddrV4, SocketAddrV6};
|
||||
|
||||
use bytes::Bytes;
|
||||
use hickory_server::server::Protocol;
|
||||
use hickory_proto::xfer::Protocol;
|
||||
use prometheus_client::registry::Registry;
|
||||
|
||||
use super::*;
|
||||
|
@ -944,6 +978,7 @@ mod tests {
|
|||
|
||||
const NS1: &str = "ns1";
|
||||
const NS2: &str = "ns2";
|
||||
const PREFERRED: &str = "preferred-ns";
|
||||
const NW1: Strng = strng::literal!("nw1");
|
||||
const NW2: Strng = strng::literal!("nw2");
|
||||
|
||||
|
@ -1051,6 +1086,7 @@ mod tests {
|
|||
forwarder,
|
||||
metrics: test_metrics(),
|
||||
local_workload,
|
||||
prefered_service_namespace: None,
|
||||
};
|
||||
|
||||
let namespaced_domain = n(format!("{}.svc.cluster.local", c.client_namespace));
|
||||
|
@ -1366,6 +1402,18 @@ mod tests {
|
|||
expect_code: ResponseCode::NXDomain,
|
||||
..Default::default()
|
||||
},
|
||||
Case {
|
||||
name: "success: preferred namespace is chosen if local namespace is not defined",
|
||||
host: "preferred.io.",
|
||||
expect_records: vec![a(n("preferred.io."), ipv4("10.10.10.211"))],
|
||||
..Default::default()
|
||||
},
|
||||
Case {
|
||||
name: "success: external service resolves to local namespace's address",
|
||||
host: "everywhere.io.",
|
||||
expect_records: vec![a(n("everywhere.io."), ipv4("10.10.10.112"))],
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
// Create and start the proxy.
|
||||
|
@ -1383,6 +1431,7 @@ mod tests {
|
|||
drain,
|
||||
&factory,
|
||||
local_workload,
|
||||
Some(PREFERRED.to_string()),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -1469,6 +1518,7 @@ mod tests {
|
|||
drain,
|
||||
&factory,
|
||||
local_workload,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -1518,6 +1568,7 @@ mod tests {
|
|||
}),
|
||||
state.clone(),
|
||||
),
|
||||
prefered_service_namespace: None,
|
||||
};
|
||||
|
||||
let ip4n6_client_ip = ip("::ffff:202:202");
|
||||
|
@ -1551,6 +1602,7 @@ mod tests {
|
|||
drain,
|
||||
&factory,
|
||||
local_workload,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -1568,7 +1620,7 @@ mod tests {
|
|||
let resp = send_request(&mut udp_client, n("large.com."), RecordType::A).await;
|
||||
// UDP is truncated
|
||||
assert!(resp.truncated());
|
||||
assert_eq!(75, resp.answers().len(), "expected UDP to be truncated");
|
||||
assert_eq!(74, resp.answers().len(), "expected UDP to be truncated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1667,6 +1719,16 @@ mod tests {
|
|||
xds_external_service("www.google.com", &[na(NW1, "1.1.1.1")]),
|
||||
xds_service("productpage", NS1, &[na(NW1, "9.9.9.9")]),
|
||||
xds_service("example", NS2, &[na(NW1, "10.10.10.10")]),
|
||||
// Service with the same name in another namespace
|
||||
// This should not be used if the preferred service namespace is set
|
||||
xds_namespaced_external_service("everywhere.io", NS2, &[na(NW1, "10.10.10.110")]),
|
||||
xds_namespaced_external_service("preferred.io", NS2, &[na(NW1, "10.10.10.210")]),
|
||||
// Preferred service namespace
|
||||
xds_namespaced_external_service("everywhere.io", PREFERRED, &[na(NW1, "10.10.10.111")]),
|
||||
xds_namespaced_external_service("preferred.io", PREFERRED, &[na(NW1, "10.10.10.211")]),
|
||||
// Service with the same name in the same namespace
|
||||
// Client in NS1 should use this service
|
||||
xds_namespaced_external_service("everywhere.io", NS1, &[na(NW1, "10.10.10.112")]),
|
||||
with_fqdn(
|
||||
"details.ns2.svc.cluster.remote",
|
||||
xds_service(
|
||||
|
@ -1817,9 +1879,17 @@ mod tests {
|
|||
}
|
||||
|
||||
fn xds_external_service<S: AsRef<str>>(hostname: S, addrs: &[NetworkAddress]) -> XdsService {
|
||||
xds_namespaced_external_service(hostname, NS1, addrs)
|
||||
}
|
||||
|
||||
fn xds_namespaced_external_service<S1: AsRef<str>, S2: AsRef<str>>(
|
||||
hostname: S1,
|
||||
ns: S2,
|
||||
vips: &[NetworkAddress],
|
||||
) -> XdsService {
|
||||
with_fqdn(
|
||||
hostname.as_ref(),
|
||||
xds_service(hostname.as_ref(), NS1, addrs),
|
||||
xds_service(hostname.as_ref(), ns.as_ref(), vips),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -1881,24 +1951,25 @@ mod tests {
|
|||
_: Option<&Workload>,
|
||||
request: &Request,
|
||||
) -> Result<Answer, LookupError> {
|
||||
let name = request.query().name().into();
|
||||
let query = request.request_info()?.query;
|
||||
let name = query.name().into();
|
||||
let Some(ips) = self.ips.get(&name) else {
|
||||
// Not found.
|
||||
return Err(LookupError::ResponseCode(ResponseCode::NXDomain));
|
||||
};
|
||||
|
||||
let mut out = Vec::new();
|
||||
let rtype = request.query().query_type();
|
||||
let rtype = query.query_type();
|
||||
for ip in ips {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
if rtype == RecordType::A {
|
||||
out.push(a(request.query().name().into(), *ip));
|
||||
out.push(a(query.name().into(), *ip));
|
||||
}
|
||||
}
|
||||
IpAddr::V6(ip) => {
|
||||
if rtype == RecordType::AAAA {
|
||||
out.push(aaaa(request.query().name().into(), *ip));
|
||||
out.push(aaaa(query.name().into(), *ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,10 +58,7 @@ async fn load_token(path: &PathBuf) -> io::Result<Vec<u8>> {
|
|||
let t = tokio::fs::read(path).await?;
|
||||
|
||||
if t.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"token file exists, but was empty",
|
||||
));
|
||||
return Err(io::Error::other("token file exists, but was empty"));
|
||||
}
|
||||
Ok(t)
|
||||
}
|
||||
|
|
|
@ -38,6 +38,9 @@ use keyed_priority_queue::KeyedPriorityQueue;
|
|||
|
||||
const CERT_REFRESH_FAILURE_RETRY_DELAY_MAX_INTERVAL: Duration = Duration::from_secs(150);
|
||||
|
||||
/// Default trust domain to use if not otherwise specified.
|
||||
pub const DEFAULT_TRUST_DOMAIN: &str = "cluster.local";
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
|
||||
pub enum Identity {
|
||||
Spiffe {
|
||||
|
@ -130,11 +133,10 @@ impl Identity {
|
|||
#[cfg(any(test, feature = "testing"))]
|
||||
impl Default for Identity {
|
||||
fn default() -> Self {
|
||||
const TRUST_DOMAIN: &str = "cluster.local";
|
||||
const SERVICE_ACCOUNT: &str = "ztunnel";
|
||||
const NAMESPACE: &str = "istio-system";
|
||||
Identity::Spiffe {
|
||||
trust_domain: TRUST_DOMAIN.into(),
|
||||
trust_domain: DEFAULT_TRUST_DOMAIN.into(),
|
||||
namespace: NAMESPACE.into(),
|
||||
service_account: SERVICE_ACCOUNT.into(),
|
||||
}
|
||||
|
|
|
@ -12,6 +12,9 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use std::env;
|
||||
|
||||
pub mod admin;
|
||||
pub mod app;
|
||||
pub mod assertions;
|
||||
|
@ -42,3 +45,7 @@ pub mod xds;
|
|||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_helpers;
|
||||
|
||||
#[allow(dead_code)]
|
||||
static PQC_ENABLED: Lazy<bool> =
|
||||
Lazy::new(|| env::var("COMPLIANCE_POLICY").unwrap_or_default() == "pqc");
|
||||
|
|
24
src/main.rs
24
src/main.rs
|
@ -14,8 +14,9 @@
|
|||
|
||||
extern crate core;
|
||||
|
||||
use nix::sys::resource::{Resource, getrlimit, setrlimit};
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
use tracing::{info, warn};
|
||||
use ztunnel::*;
|
||||
|
||||
#[cfg(feature = "jemalloc")]
|
||||
|
@ -28,6 +29,26 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
|||
#[unsafe(export_name = "malloc_conf")]
|
||||
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
|
||||
|
||||
// We use this on Unix systems to increase the number of open file descriptors
|
||||
// if possible. This is useful for high-load scenarios where the default limit
|
||||
// is too low, which can lead to droopped connections and other issues:
|
||||
// see: https://github.com/istio/ztunnel/issues/1585
|
||||
fn increase_open_files_limit() {
|
||||
#[cfg(unix)]
|
||||
if let Ok((soft_limit, hard_limit)) = getrlimit(Resource::RLIMIT_NOFILE) {
|
||||
if let Err(e) = setrlimit(Resource::RLIMIT_NOFILE, hard_limit, hard_limit) {
|
||||
warn!("failed to set file descriptor limits: {e}");
|
||||
} else {
|
||||
info!(
|
||||
"set file descriptor limits from {} to {}",
|
||||
soft_limit, hard_limit
|
||||
);
|
||||
}
|
||||
} else {
|
||||
warn!("failed to get file descriptor limits");
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let _log_flush = telemetry::setup_logging();
|
||||
|
||||
|
@ -74,6 +95,7 @@ fn version() -> anyhow::Result<()> {
|
|||
|
||||
async fn proxy(cfg: Arc<config::Config>) -> anyhow::Result<()> {
|
||||
info!("version: {}", version::BuildInfo::new());
|
||||
increase_open_files_limit();
|
||||
info!("running with config: {}", serde_yaml::to_string(&cfg)?);
|
||||
app::build(cfg).await?.wait_termination().await
|
||||
}
|
||||
|
|
12
src/proxy.rs
12
src/proxy.rs
|
@ -20,7 +20,7 @@ use std::sync::Arc;
|
|||
use std::time::Duration;
|
||||
use std::{fmt, io};
|
||||
|
||||
use hickory_proto::error::ProtoError;
|
||||
use hickory_proto::ProtoError;
|
||||
|
||||
use crate::strng::Strng;
|
||||
use rand::Rng;
|
||||
|
@ -48,8 +48,9 @@ use crate::state::{DemandProxyState, WorkloadInfo};
|
|||
use crate::{config, identity, socket, tls};
|
||||
|
||||
pub mod connection_manager;
|
||||
pub mod inbound;
|
||||
|
||||
mod h2;
|
||||
mod inbound;
|
||||
mod inbound_passthrough;
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod metrics;
|
||||
|
@ -259,6 +260,8 @@ pub(super) struct ProxyInputs {
|
|||
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
||||
local_workload_information: Arc<LocalWorkloadInformation>,
|
||||
resolver: Option<Arc<dyn Resolver + Send + Sync>>,
|
||||
// If true, inbound connections created with these inputs will not attempt to preserve the original source IP.
|
||||
pub disable_inbound_freebind: bool,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
|
@ -271,6 +274,7 @@ impl ProxyInputs {
|
|||
socket_factory: Arc<dyn SocketFactory + Send + Sync>,
|
||||
resolver: Option<Arc<dyn Resolver + Send + Sync>>,
|
||||
local_workload_information: Arc<LocalWorkloadInformation>,
|
||||
disable_inbound_freebind: bool,
|
||||
) -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
cfg,
|
||||
|
@ -280,6 +284,7 @@ impl ProxyInputs {
|
|||
socket_factory,
|
||||
local_workload_information,
|
||||
resolver,
|
||||
disable_inbound_freebind,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -479,6 +484,9 @@ pub enum Error {
|
|||
#[error("requested service {0} found, but has no IP addresses")]
|
||||
NoIPForService(String),
|
||||
|
||||
#[error("no service for target address: {0}")]
|
||||
NoService(SocketAddr),
|
||||
|
||||
#[error(
|
||||
"ip addresses were resolved for workload {0}, but valid dns response had no A/AAAA records"
|
||||
)]
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
use crate::copy;
|
||||
use bytes::{BufMut, Bytes};
|
||||
use bytes::Bytes;
|
||||
use futures_core::ready;
|
||||
use h2::Reason;
|
||||
use std::io::Error;
|
||||
|
@ -85,7 +85,10 @@ pub struct H2StreamWriteHalf {
|
|||
_dropped: Option<DropCounter>,
|
||||
}
|
||||
|
||||
pub struct TokioH2Stream(H2Stream);
|
||||
pub struct TokioH2Stream {
|
||||
stream: H2Stream,
|
||||
buf: Bytes,
|
||||
}
|
||||
|
||||
struct DropCounter {
|
||||
// Whether the other end of this shared counter has already dropped.
|
||||
|
@ -144,7 +147,10 @@ impl Drop for DropCounter {
|
|||
// then the specific implementation will conflict with the generic one.
|
||||
impl TokioH2Stream {
|
||||
pub fn new(stream: H2Stream) -> Self {
|
||||
Self(stream)
|
||||
Self {
|
||||
stream,
|
||||
buf: Bytes::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,24 +160,21 @@ impl tokio::io::AsyncRead for TokioH2Stream {
|
|||
cx: &mut Context<'_>,
|
||||
buf: &mut tokio::io::ReadBuf<'_>,
|
||||
) -> Poll<std::io::Result<()>> {
|
||||
let pinned = std::pin::Pin::new(&mut self.0.read);
|
||||
copy::ResizeBufRead::poll_bytes(pinned, cx).map(|r| match r {
|
||||
Ok(bytes) => {
|
||||
if buf.remaining() < bytes.len() {
|
||||
Err(Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!(
|
||||
"kould overflow buffer of with {} remaining",
|
||||
buf.remaining()
|
||||
),
|
||||
))
|
||||
} else {
|
||||
buf.put(bytes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
})
|
||||
// Just return the bytes we have left over and don't poll the stream because
|
||||
// its unclear what to do if there are bytes left over from the previous read, and when we
|
||||
// poll, we get an error.
|
||||
if self.buf.is_empty() {
|
||||
// If we have no unread bytes, we can poll the stream
|
||||
// and fill self.buf with the bytes we read.
|
||||
let pinned = std::pin::Pin::new(&mut self.stream.read);
|
||||
let res = ready!(copy::ResizeBufRead::poll_bytes(pinned, cx))?;
|
||||
self.buf = res;
|
||||
}
|
||||
// Copy as many bytes as we can from self.buf.
|
||||
let cnt = Ord::min(buf.remaining(), self.buf.len());
|
||||
buf.put_slice(&self.buf[..cnt]);
|
||||
self.buf = self.buf.split_off(cnt);
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -181,7 +184,7 @@ impl tokio::io::AsyncWrite for TokioH2Stream {
|
|||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, tokio::io::Error>> {
|
||||
let pinned = std::pin::Pin::new(&mut self.0.write);
|
||||
let pinned = std::pin::Pin::new(&mut self.stream.write);
|
||||
let buf = Bytes::copy_from_slice(buf);
|
||||
copy::AsyncWriteBuf::poll_write_buf(pinned, cx, buf)
|
||||
}
|
||||
|
@ -190,7 +193,7 @@ impl tokio::io::AsyncWrite for TokioH2Stream {
|
|||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), std::io::Error>> {
|
||||
let pinned = std::pin::Pin::new(&mut self.0.write);
|
||||
let pinned = std::pin::Pin::new(&mut self.stream.write);
|
||||
copy::AsyncWriteBuf::poll_flush(pinned, cx)
|
||||
}
|
||||
|
||||
|
@ -198,7 +201,7 @@ impl tokio::io::AsyncWrite for TokioH2Stream {
|
|||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), std::io::Error>> {
|
||||
let pinned = std::pin::Pin::new(&mut self.0.write);
|
||||
let pinned = std::pin::Pin::new(&mut self.stream.write);
|
||||
copy::AsyncWriteBuf::poll_shutdown(pinned, cx)
|
||||
}
|
||||
}
|
||||
|
@ -302,6 +305,6 @@ fn h2_to_io_error(e: h2::Error) -> std::io::Error {
|
|||
if e.is_io() {
|
||||
e.into_io().unwrap()
|
||||
} else {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, e)
|
||||
std::io::Error::other(e)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ use crate::state::{DemandProxyState, ProxyRbacContext};
|
|||
use crate::strng::Strng;
|
||||
use crate::tls::TlsError;
|
||||
|
||||
pub(super) struct Inbound {
|
||||
pub struct Inbound {
|
||||
listener: socket::Listener,
|
||||
drain: DrainWatcher,
|
||||
pi: Arc<ProxyInputs>,
|
||||
|
@ -53,7 +53,7 @@ pub(super) struct Inbound {
|
|||
}
|
||||
|
||||
impl Inbound {
|
||||
pub(super) async fn new(pi: Arc<ProxyInputs>, drain: DrainWatcher) -> Result<Inbound, Error> {
|
||||
pub(crate) async fn new(pi: Arc<ProxyInputs>, drain: DrainWatcher) -> Result<Inbound, Error> {
|
||||
let listener = pi
|
||||
.socket_factory
|
||||
.tcp_bind(pi.cfg.inbound_addr)
|
||||
|
@ -74,11 +74,12 @@ impl Inbound {
|
|||
})
|
||||
}
|
||||
|
||||
pub(super) fn address(&self) -> SocketAddr {
|
||||
/// Returns the socket address this proxy is listening on.
|
||||
pub fn address(&self) -> SocketAddr {
|
||||
self.listener.local_addr()
|
||||
}
|
||||
|
||||
pub(super) async fn run(self) {
|
||||
pub async fn run(self) {
|
||||
let pi = self.pi.clone();
|
||||
let acceptor = InboundCertProvider {
|
||||
local_workload: self.pi.local_workload_information.clone(),
|
||||
|
@ -122,7 +123,7 @@ impl Inbound {
|
|||
let conn = Connection {
|
||||
src_identity,
|
||||
src,
|
||||
dst_network: strng::new(&network), // inbound request must be on our network
|
||||
dst_network: network.clone(), // inbound request must be on our network
|
||||
dst,
|
||||
};
|
||||
debug!(%conn, "accepted connection");
|
||||
|
@ -244,10 +245,18 @@ impl Inbound {
|
|||
SocketAddr::new(loopback, ri.upstream_addr.port()),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
enable_original_source.then_some(ri.rbac_ctx.conn.src.ip()),
|
||||
ri.upstream_addr,
|
||||
)
|
||||
// When ztunnel is proxying to its own internal endpoints (metrics server after HBONE termination),
|
||||
// we must not attempt to use the original external client's IP as the source for this internal connection.
|
||||
// Setting `disable_inbound_freebind` to true for such self-proxy scenarios ensures `upstream_src_ip` is `None`,
|
||||
// causing `freebind_connect` to use a local IP for the connection to ztunnel's own service.
|
||||
// For regular inbound traffic to other workloads, `disable_inbound_freebind` is false, and original source
|
||||
// preservation depends on `enable_original_source`.
|
||||
let upstream_src_ip = if pi.disable_inbound_freebind {
|
||||
None
|
||||
} else {
|
||||
enable_original_source.then_some(ri.rbac_ctx.conn.src.ip())
|
||||
};
|
||||
(upstream_src_ip, ri.upstream_addr)
|
||||
};
|
||||
|
||||
// Establish upstream connection between original source and destination
|
||||
|
@ -536,7 +545,7 @@ impl Inbound {
|
|||
|
||||
/// find_inbound_upstream determines the next hop for an inbound request.
|
||||
#[expect(clippy::type_complexity)]
|
||||
fn find_inbound_upstream(
|
||||
pub(super) fn find_inbound_upstream(
|
||||
cfg: &Config,
|
||||
state: &DemandProxyState,
|
||||
conn: &Connection,
|
||||
|
@ -545,6 +554,7 @@ impl Inbound {
|
|||
) -> Result<(SocketAddr, Option<TunnelRequest>, Vec<Arc<Service>>), Error> {
|
||||
// We always target the local workload IP as the destination. But we need to determine the port to send to.
|
||||
let target_ip = conn.dst.ip();
|
||||
|
||||
// First, fetch the actual target SocketAddr as well as all possible services this could be for.
|
||||
// Given they may request the pod directly, there may be multiple possible services; we will
|
||||
// select a final one (if any) later.
|
||||
|
@ -640,7 +650,7 @@ impl Inbound {
|
|||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TunnelRequest {
|
||||
pub(super) struct TunnelRequest {
|
||||
tunnel_target: SocketAddr,
|
||||
protocol: Protocol,
|
||||
}
|
||||
|
@ -702,37 +712,36 @@ fn build_response(status: StatusCode) -> Response<()> {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
mod tests {
|
||||
use super::{Inbound, ProxyInputs};
|
||||
use crate::{config, proxy::ConnectionManager, proxy::inbound::HboneAddress, strng};
|
||||
|
||||
use crate::{
|
||||
config,
|
||||
identity::manager::mock::new_secret_manager,
|
||||
proxy::{
|
||||
ConnectionManager, DefaultSocketFactory, LocalWorkloadInformation,
|
||||
h2::server::RequestParts, inbound::HboneAddress,
|
||||
},
|
||||
rbac::Connection,
|
||||
state::{
|
||||
self, DemandProxyState,
|
||||
self, DemandProxyState, WorkloadInfo,
|
||||
service::{Endpoint, EndpointSet, Service},
|
||||
workload::{
|
||||
ApplicationTunnel, GatewayAddress, InboundProtocol, NetworkAddress, Workload,
|
||||
application_tunnel::Protocol as AppProtocol, gatewayaddress::Destination,
|
||||
ApplicationTunnel, GatewayAddress, HealthStatus, InboundProtocol, NetworkAddress,
|
||||
NetworkMode, Workload, application_tunnel::Protocol as AppProtocol,
|
||||
gatewayaddress::Destination,
|
||||
},
|
||||
},
|
||||
test_helpers,
|
||||
strng, test_helpers,
|
||||
};
|
||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
use http::{Method, Uri};
|
||||
use prometheus_client::registry::Registry;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{Arc, RwLock},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::identity::manager::mock::new_secret_manager;
|
||||
use crate::proxy::DefaultSocketFactory;
|
||||
use crate::proxy::LocalWorkloadInformation;
|
||||
use crate::proxy::h2::server::RequestParts;
|
||||
use crate::state::WorkloadInfo;
|
||||
use crate::state::workload::HealthStatus;
|
||||
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
use http::{Method, Uri};
|
||||
use prometheus_client::registry::Registry;
|
||||
use test_case::test_case;
|
||||
|
||||
const CLIENT_POD_IP: &str = "10.0.0.1";
|
||||
|
@ -904,6 +913,7 @@ mod tests {
|
|||
sf,
|
||||
None,
|
||||
local_workload,
|
||||
false,
|
||||
));
|
||||
let inbound_request = Inbound::build_inbound_request(&pi, conn, &request_parts).await;
|
||||
match want {
|
||||
|
@ -959,7 +969,6 @@ mod tests {
|
|||
"waypoint",
|
||||
WAYPOINT_POD_IP,
|
||||
Waypoint::None,
|
||||
// the waypoint's _workload_ gets the app tunnel field
|
||||
server_waypoint.app_tunnel(),
|
||||
),
|
||||
("client", CLIENT_POD_IP, Waypoint::None, None),
|
||||
|
@ -975,6 +984,7 @@ mod tests {
|
|||
namespace: "default".into(),
|
||||
service_account: strng::format!("service-account-{name}"),
|
||||
application_tunnel: app_tunnel,
|
||||
network_mode: NetworkMode::Standard,
|
||||
..test_helpers::test_default_workload()
|
||||
});
|
||||
|
||||
|
|
|
@ -35,10 +35,10 @@ use crate::proxy::{ConnectionOpen, ConnectionResult, DerivedWorkload, metrics};
|
|||
use crate::drain::DrainWatcher;
|
||||
use crate::drain::run_with_drain;
|
||||
use crate::proxy::h2::{H2Stream, client::WorkloadKey};
|
||||
use crate::state::ServiceResolutionMode;
|
||||
use crate::state::service::ServiceDescription;
|
||||
use crate::state::service::{Service, ServiceDescription};
|
||||
use crate::state::workload::OutboundProtocol;
|
||||
use crate::state::workload::{InboundProtocol, NetworkAddress, Workload, address::Address};
|
||||
use crate::state::{ServiceResolutionMode, Upstream};
|
||||
use crate::{assertions, copy, proxy, socket};
|
||||
|
||||
use super::h2::TokioH2Stream;
|
||||
|
@ -369,6 +369,83 @@ impl OutboundConnection {
|
|||
}
|
||||
}
|
||||
|
||||
// This function is called when the select next hop is on a different network,
|
||||
// so we expect the upstream workload to have a network gatewy configured.
|
||||
//
|
||||
// When we use a gateway to reach to a workload on a remote network we have to
|
||||
// use double HBONE (HBONE incapsulated inside HBONE). The gateway will
|
||||
// terminate the outer HBONE tunnel and forward the inner HBONE to the actual
|
||||
// destination as a opaque stream of bytes and the actual destination will
|
||||
// interpret it as an HBONE connection.
|
||||
//
|
||||
// If the upstream workload does not have an E/W gateway this function returns
|
||||
// an error indicating that it could not find a valid destination.
|
||||
//
|
||||
// A note about double HBONE, in double HBONE both inner and outer HBONE use
|
||||
// destination service name as HBONE target URI.
|
||||
//
|
||||
// Having target URI in the outer HBONE tunnel allows E/W gateway to figure out
|
||||
// where to route the data next witout the need to terminate inner HBONE tunnel.
|
||||
// In other words, it could forward inner HBONE as if it's an opaque stream of
|
||||
// bytes without trying to interpret it.
|
||||
//
|
||||
// NOTE: when connecting through an E/W gateway, regardless of whether there is
|
||||
// a waypoint or not, we always use service hostname and the service port. It's
|
||||
// somewhat different from how regular HBONE works, so I'm calling it out here.
|
||||
async fn build_request_through_gateway(
|
||||
&self,
|
||||
source: Arc<Workload>,
|
||||
// next hop on the remote network that we picked as our destination.
|
||||
// It may be a local view of a Waypoint workload on remote network or
|
||||
// a local view of the service workload (when waypoint is not
|
||||
// configured).
|
||||
upstream: Upstream,
|
||||
// This is a target service we wanted to reach in the first place.
|
||||
//
|
||||
// NOTE: Crossing network boundaries is only supported for services
|
||||
// at the moment, so we should always have a service we could use.
|
||||
service: &Service,
|
||||
target: SocketAddr,
|
||||
) -> Result<Request, Error> {
|
||||
if let Some(gateway) = &upstream.workload.network_gateway {
|
||||
let gateway_upstream = self
|
||||
.pi
|
||||
.state
|
||||
.fetch_network_gateway(gateway, &source, target)
|
||||
.await?;
|
||||
let hbone_target_destination = Some(HboneAddress::SvcHostname(
|
||||
service.hostname.clone(),
|
||||
target.port(),
|
||||
));
|
||||
|
||||
debug!("built request to a destination on another network through an E/W gateway");
|
||||
Ok(Request {
|
||||
protocol: OutboundProtocol::DOUBLEHBONE,
|
||||
source,
|
||||
hbone_target_destination,
|
||||
actual_destination_workload: Some(gateway_upstream.workload.clone()),
|
||||
intended_destination_service: Some(ServiceDescription::from(service)),
|
||||
actual_destination: gateway_upstream.workload_socket_addr().ok_or(
|
||||
Error::NoValidDestination(Box::new((*gateway_upstream.workload).clone())),
|
||||
)?,
|
||||
// The outer tunnel of double HBONE is terminated by the E/W
|
||||
// gateway and so for the credentials of the next hop
|
||||
// (upstream_sans) we use gateway credentials.
|
||||
upstream_sans: gateway_upstream.workload_and_services_san(),
|
||||
// The inner HBONE tunnel is terminated by either the server
|
||||
// we want to reach or a Waypoint in front of it, depending on
|
||||
// the configuration. So for the final destination credentials
|
||||
// (final_sans) we use the upstream workload credentials.
|
||||
final_sans: upstream.service_sans(),
|
||||
})
|
||||
} else {
|
||||
// Do not try to send cross-network traffic without network gateway.
|
||||
Err(Error::NoValidDestination(Box::new(
|
||||
(*upstream.workload).clone(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// build_request computes all information about the request we should send
|
||||
// TODO: Do we want a single lock for source and upstream...?
|
||||
async fn build_request(
|
||||
|
@ -381,7 +458,7 @@ impl OutboundConnection {
|
|||
|
||||
// If this is to-service traffic check for a service waypoint
|
||||
// Capture result of whether this is svc addressed
|
||||
let svc_addressed = if let Some(Address::Service(target_service)) = state
|
||||
let service = if let Some(Address::Service(target_service)) = state
|
||||
.fetch_address(&NetworkAddress {
|
||||
network: self.pi.cfg.network.clone(),
|
||||
address: target.ip(),
|
||||
|
@ -393,6 +470,18 @@ impl OutboundConnection {
|
|||
.fetch_service_waypoint(&target_service, &source_workload, target)
|
||||
.await?
|
||||
{
|
||||
if waypoint.workload.network != source_workload.network {
|
||||
debug!("picked a waypoint on remote network");
|
||||
return self
|
||||
.build_request_through_gateway(
|
||||
source_workload.clone(),
|
||||
waypoint,
|
||||
&target_service,
|
||||
target,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let upstream_sans = waypoint.workload_and_services_san();
|
||||
let actual_destination =
|
||||
waypoint
|
||||
|
@ -413,10 +502,10 @@ impl OutboundConnection {
|
|||
});
|
||||
}
|
||||
// this was service addressed but we did not find a waypoint
|
||||
true
|
||||
Some(target_service)
|
||||
} else {
|
||||
// this wasn't service addressed
|
||||
false
|
||||
None
|
||||
};
|
||||
|
||||
let Some(us) = state
|
||||
|
@ -428,7 +517,7 @@ impl OutboundConnection {
|
|||
)
|
||||
.await?
|
||||
else {
|
||||
if svc_addressed {
|
||||
if service.is_some() {
|
||||
return Err(Error::NoHealthyUpstream(target));
|
||||
}
|
||||
debug!("built request as passthrough; no upstream found");
|
||||
|
@ -446,37 +535,26 @@ impl OutboundConnection {
|
|||
|
||||
// Check whether we are using an E/W gateway and sending cross network traffic
|
||||
if us.workload.network != source_workload.network {
|
||||
if let Some(ew_gtw) = &us.workload.network_gateway {
|
||||
let gtw_us = {
|
||||
self.pi
|
||||
.state
|
||||
.fetch_network_gateway(ew_gtw, &source_workload, target)
|
||||
.await?
|
||||
};
|
||||
|
||||
let svc = us
|
||||
.destination_service
|
||||
.as_ref()
|
||||
.expect("Workloads with network gateways must be service addressed.");
|
||||
let hbone_target_destination =
|
||||
Some(HboneAddress::SvcHostname(svc.hostname.clone(), us.port));
|
||||
|
||||
return Ok(Request {
|
||||
protocol: OutboundProtocol::DOUBLEHBONE,
|
||||
source: source_workload,
|
||||
hbone_target_destination,
|
||||
actual_destination_workload: Some(gtw_us.workload.clone()),
|
||||
intended_destination_service: us.destination_service.clone(),
|
||||
actual_destination: gtw_us.workload_socket_addr().ok_or(
|
||||
Error::NoValidDestination(Box::new((*gtw_us.workload).clone())),
|
||||
)?,
|
||||
upstream_sans: gtw_us.workload_and_services_san(),
|
||||
final_sans: us.service_sans(),
|
||||
});
|
||||
} else {
|
||||
// Do not try to send cross-network traffic without network gateway.
|
||||
return Err(Error::NoValidDestination(Box::new((*us.workload).clone())));
|
||||
}
|
||||
// Workloads on remote network must be service addressed, so if we got here
|
||||
// and we don't have a service for the original target address then it's a
|
||||
// bug either in ztunnel itself or in istiod.
|
||||
//
|
||||
// For a double HBONE protocol implementation we have to know the
|
||||
// destination service and if there is no service for the target it's a bug.
|
||||
//
|
||||
// This situation "should never happen" because for workloads fetch_upstream
|
||||
// above only checks the workloads on the same network as this ztunnel
|
||||
// instance and therefore it should not be able to find a workload on a
|
||||
// different network.
|
||||
debug_assert!(
|
||||
service.is_some(),
|
||||
"workload on remote network is not service addressed"
|
||||
);
|
||||
debug!("picked a workload on remote network");
|
||||
let service = service.as_ref().ok_or(Error::NoService(target))?;
|
||||
return self
|
||||
.build_request_through_gateway(source_workload.clone(), us, service, target)
|
||||
.await;
|
||||
}
|
||||
|
||||
// We are not using a network gateway and there is no workload address.
|
||||
|
@ -491,7 +569,7 @@ impl OutboundConnection {
|
|||
// Check if we need to go through a workload addressed waypoint.
|
||||
// Don't traverse waypoint twice if the source is sandwich-outbound.
|
||||
// Don't traverse waypoint if traffic was addressed to a service (handled before)
|
||||
if !from_waypoint && !svc_addressed {
|
||||
if !from_waypoint && service.is_none() {
|
||||
// For case upstream server has enabled waypoint
|
||||
let waypoint = state
|
||||
.fetch_workload_waypoint(&us.workload, &source_workload, target)
|
||||
|
@ -716,6 +794,7 @@ mod tests {
|
|||
local_workload_information: local_workload_information.clone(),
|
||||
connection_manager: ConnectionManager::default(),
|
||||
resolver: None,
|
||||
disable_inbound_freebind: false,
|
||||
}),
|
||||
id: TraceParent::new(),
|
||||
pool: WorkloadHBONEPool::new(
|
||||
|
@ -815,6 +894,8 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn build_request_double_hbone() {
|
||||
// example.com service has a workload on remote network.
|
||||
// E/W gateway is addressed by an IP.
|
||||
run_build_request_multi(
|
||||
"127.0.0.1",
|
||||
"127.0.0.3:80",
|
||||
|
@ -866,11 +947,13 @@ mod tests {
|
|||
],
|
||||
Some(ExpectedRequest {
|
||||
protocol: OutboundProtocol::DOUBLEHBONE,
|
||||
hbone_destination: "example.com:8080",
|
||||
hbone_destination: "example.com:80",
|
||||
destination: "10.22.1.1:15009",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
// example.com service has a workload on remote network.
|
||||
// E/W gateway is addressed by a hostname.
|
||||
run_build_request_multi(
|
||||
"127.0.0.1",
|
||||
"127.0.0.3:80",
|
||||
|
@ -943,11 +1026,218 @@ mod tests {
|
|||
],
|
||||
Some(ExpectedRequest {
|
||||
protocol: OutboundProtocol::DOUBLEHBONE,
|
||||
hbone_destination: "example.com:8080",
|
||||
hbone_destination: "example.com:80",
|
||||
destination: "127.0.0.5:15008",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
// example.com service has a waypoint and waypoint workload is on remote network.
|
||||
// E/W gateway is addressed by an IP.
|
||||
run_build_request_multi(
|
||||
"127.0.0.1",
|
||||
"127.0.0.3:80",
|
||||
vec![
|
||||
XdsAddressType::Service(XdsService {
|
||||
hostname: "example.com".to_string(),
|
||||
addresses: vec![XdsNetworkAddress {
|
||||
network: "".to_string(),
|
||||
address: vec![127, 0, 0, 3],
|
||||
}],
|
||||
ports: vec![Port {
|
||||
service_port: 80,
|
||||
target_port: 8080,
|
||||
}],
|
||||
waypoint: Some(xds::istio::workload::GatewayAddress {
|
||||
destination: Some(
|
||||
xds::istio::workload::gateway_address::Destination::Hostname(
|
||||
XdsNamespacedHostname {
|
||||
namespace: Default::default(),
|
||||
hostname: "waypoint.com".into(),
|
||||
},
|
||||
),
|
||||
),
|
||||
hbone_mtls_port: 15008,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
XdsAddressType::Service(XdsService {
|
||||
hostname: "waypoint.com".to_string(),
|
||||
addresses: vec![XdsNetworkAddress {
|
||||
network: "".to_string(),
|
||||
address: vec![127, 0, 0, 4],
|
||||
}],
|
||||
ports: vec![Port {
|
||||
service_port: 15008,
|
||||
target_port: 15008,
|
||||
}],
|
||||
..Default::default()
|
||||
}),
|
||||
XdsAddressType::Workload(XdsWorkload {
|
||||
uid: "Kubernetes//Pod/default/remote-waypoint-pod".to_string(),
|
||||
addresses: vec![],
|
||||
network: "remote".to_string(),
|
||||
network_gateway: Some(xds::istio::workload::GatewayAddress {
|
||||
destination: Some(
|
||||
xds::istio::workload::gateway_address::Destination::Address(
|
||||
XdsNetworkAddress {
|
||||
network: "remote".to_string(),
|
||||
address: vec![10, 22, 1, 1],
|
||||
},
|
||||
),
|
||||
),
|
||||
hbone_mtls_port: 15009,
|
||||
}),
|
||||
services: std::collections::HashMap::from([(
|
||||
"/waypoint.com".to_string(),
|
||||
PortList {
|
||||
ports: vec![Port {
|
||||
service_port: 15008,
|
||||
target_port: 15008,
|
||||
}],
|
||||
},
|
||||
)]),
|
||||
..Default::default()
|
||||
}),
|
||||
XdsAddressType::Workload(XdsWorkload {
|
||||
uid: "Kubernetes//Pod/default/remote-ew-gtw".to_string(),
|
||||
addresses: vec![Bytes::copy_from_slice(&[10, 22, 1, 1])],
|
||||
network: "remote".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
],
|
||||
Some(ExpectedRequest {
|
||||
protocol: OutboundProtocol::DOUBLEHBONE,
|
||||
hbone_destination: "example.com:80",
|
||||
destination: "10.22.1.1:15009",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn build_request_failover_to_remote() {
|
||||
// Similar to the double HBONE test that we already have, but it sets up a scenario when
|
||||
// load balancing logic will pick a workload on a remote cluster when local workloads are
|
||||
// unhealthy, thus showing the expected failover behavior.
|
||||
let service = XdsAddressType::Service(XdsService {
|
||||
hostname: "example.com".to_string(),
|
||||
addresses: vec![XdsNetworkAddress {
|
||||
network: "".to_string(),
|
||||
address: vec![127, 0, 0, 3],
|
||||
}],
|
||||
ports: vec![Port {
|
||||
service_port: 80,
|
||||
target_port: 8080,
|
||||
}],
|
||||
// Prefer routing to workloads on the same network, but when nothing is healthy locally
|
||||
// allow failing over to remote networks.
|
||||
load_balancing: Some(xds::istio::workload::LoadBalancing {
|
||||
routing_preference: vec![
|
||||
xds::istio::workload::load_balancing::Scope::Network.into(),
|
||||
],
|
||||
mode: xds::istio::workload::load_balancing::Mode::Failover.into(),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
let ew_gateway = XdsAddressType::Workload(XdsWorkload {
|
||||
uid: "Kubernetes//Pod/default/remote-ew-gtw".to_string(),
|
||||
addresses: vec![Bytes::copy_from_slice(&[10, 22, 1, 1])],
|
||||
network: "remote".to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
let remote_workload = XdsAddressType::Workload(XdsWorkload {
|
||||
uid: "Kubernetes//Pod/default/remote-example.com-pod".to_string(),
|
||||
addresses: vec![],
|
||||
network: "remote".to_string(),
|
||||
network_gateway: Some(xds::istio::workload::GatewayAddress {
|
||||
destination: Some(xds::istio::workload::gateway_address::Destination::Address(
|
||||
XdsNetworkAddress {
|
||||
network: "remote".to_string(),
|
||||
address: vec![10, 22, 1, 1],
|
||||
},
|
||||
)),
|
||||
hbone_mtls_port: 15009,
|
||||
}),
|
||||
services: std::collections::HashMap::from([(
|
||||
"/example.com".to_string(),
|
||||
PortList {
|
||||
ports: vec![Port {
|
||||
service_port: 80,
|
||||
target_port: 8080,
|
||||
}],
|
||||
},
|
||||
)]),
|
||||
..Default::default()
|
||||
});
|
||||
let healthy_local_workload = XdsAddressType::Workload(XdsWorkload {
|
||||
uid: "Kubernetes//Pod/default/local-example.com-pod".to_string(),
|
||||
addresses: vec![Bytes::copy_from_slice(&[127, 0, 0, 2])],
|
||||
network: "".to_string(),
|
||||
tunnel_protocol: xds::istio::workload::TunnelProtocol::Hbone.into(),
|
||||
services: std::collections::HashMap::from([(
|
||||
"/example.com".to_string(),
|
||||
PortList {
|
||||
ports: vec![Port {
|
||||
service_port: 80,
|
||||
target_port: 8080,
|
||||
}],
|
||||
},
|
||||
)]),
|
||||
status: xds::istio::workload::WorkloadStatus::Healthy.into(),
|
||||
..Default::default()
|
||||
});
|
||||
let unhealthy_local_workload = XdsAddressType::Workload(XdsWorkload {
|
||||
uid: "Kubernetes//Pod/default/local-example.com-pod".to_string(),
|
||||
addresses: vec![Bytes::copy_from_slice(&[127, 0, 0, 2])],
|
||||
network: "".to_string(),
|
||||
tunnel_protocol: xds::istio::workload::TunnelProtocol::Hbone.into(),
|
||||
services: std::collections::HashMap::from([(
|
||||
"/example.com".to_string(),
|
||||
PortList {
|
||||
ports: vec![Port {
|
||||
service_port: 80,
|
||||
target_port: 8080,
|
||||
}],
|
||||
},
|
||||
)]),
|
||||
status: xds::istio::workload::WorkloadStatus::Unhealthy.into(),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
run_build_request_multi(
|
||||
"127.0.0.1",
|
||||
"127.0.0.3:80",
|
||||
vec![
|
||||
service.clone(),
|
||||
ew_gateway.clone(),
|
||||
remote_workload.clone(),
|
||||
healthy_local_workload.clone(),
|
||||
],
|
||||
Some(ExpectedRequest {
|
||||
protocol: OutboundProtocol::HBONE,
|
||||
hbone_destination: "127.0.0.2:8080",
|
||||
destination: "127.0.0.2:15008",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
|
||||
run_build_request_multi(
|
||||
"127.0.0.1",
|
||||
"127.0.0.3:80",
|
||||
vec![
|
||||
service.clone(),
|
||||
ew_gateway.clone(),
|
||||
remote_workload.clone(),
|
||||
unhealthy_local_workload.clone(),
|
||||
],
|
||||
Some(ExpectedRequest {
|
||||
protocol: OutboundProtocol::DOUBLEHBONE,
|
||||
hbone_destination: "example.com:80",
|
||||
destination: "10.22.1.1:15009",
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
|
|
@ -594,9 +594,10 @@ mod test {
|
|||
}
|
||||
|
||||
/// This is really a test for TokioH2Stream, but its nicer here because we have access to
|
||||
/// streams
|
||||
/// streams.
|
||||
/// Most important, we make sure there are no panics.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn small_reads() {
|
||||
async fn read_buffering() {
|
||||
let (mut pool, srv) = setup_test(3).await;
|
||||
|
||||
let key = key(&srv, 2);
|
||||
|
@ -612,13 +613,28 @@ mod test {
|
|||
let c = pool.send_request_pooled(&key.clone(), req()).await.unwrap();
|
||||
let mut c = TokioH2Stream::new(c);
|
||||
c.write_all(b"abcde").await.unwrap();
|
||||
let mut b = [0u8; 0];
|
||||
// Crucially, this should error rather than panic.
|
||||
if let Err(e) = c.read(&mut b).await {
|
||||
assert_eq!(e.kind(), io::ErrorKind::Other);
|
||||
} else {
|
||||
panic!("Should have errored");
|
||||
}
|
||||
let mut b = [0u8; 100];
|
||||
// Properly buffer reads and don't error
|
||||
assert_eq!(c.read(&mut b).await.unwrap(), 8);
|
||||
assert_eq!(&b[..8], b"poolsrv\n"); // this is added by itself
|
||||
assert_eq!(c.read(&mut b[..1]).await.unwrap(), 1);
|
||||
assert_eq!(&b[..1], b"a");
|
||||
assert_eq!(c.read(&mut b[..1]).await.unwrap(), 1);
|
||||
assert_eq!(&b[..1], b"b");
|
||||
assert_eq!(c.read(&mut b[..1]).await.unwrap(), 1);
|
||||
assert_eq!(&b[..1], b"c");
|
||||
assert_eq!(c.read(&mut b).await.unwrap(), 2); // there are only two bytes left
|
||||
assert_eq!(&b[..2], b"de");
|
||||
|
||||
// Once we drop the pool, we should still retained the buffered data,
|
||||
// but then we should error.
|
||||
c.write_all(b"abcde").await.unwrap();
|
||||
assert_eq!(c.read(&mut b[..3]).await.unwrap(), 3);
|
||||
assert_eq!(&b[..3], b"abc");
|
||||
drop(pool);
|
||||
assert_eq!(c.read(&mut b[..2]).await.unwrap(), 2);
|
||||
assert_eq!(&b[..2], b"de");
|
||||
assert!(c.read(&mut b).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
|
|
|
@ -19,8 +19,9 @@ use crate::dns::resolver::Resolver;
|
|||
use hickory_proto::op::{Message, MessageType, Query};
|
||||
use hickory_proto::rr::{Name, RecordType};
|
||||
use hickory_proto::serialize::binary::BinDecodable;
|
||||
use hickory_proto::xfer::Protocol;
|
||||
use hickory_server::authority::MessageRequest;
|
||||
use hickory_server::server::{Protocol, Request};
|
||||
use hickory_server::server::Request;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
@ -315,7 +316,7 @@ async fn dns_lookup(
|
|||
let answer = resolver.lookup(&req).await?;
|
||||
let response = answer
|
||||
.record_iter()
|
||||
.filter_map(|rec| rec.data().and_then(|d| d.ip_addr()))
|
||||
.filter_map(|rec| rec.data().ip_addr())
|
||||
.next() // TODO: do not always use the first result
|
||||
.ok_or_else(|| Error::DnsEmpty)?;
|
||||
|
||||
|
|
|
@ -22,10 +22,9 @@ use crate::dns;
|
|||
use crate::drain::DrainWatcher;
|
||||
|
||||
use crate::proxy::connection_manager::ConnectionManager;
|
||||
use crate::proxy::{DefaultSocketFactory, Proxy, inbound::Inbound};
|
||||
use crate::proxy::{Error, LocalWorkloadInformation, Metrics};
|
||||
|
||||
use crate::proxy::Proxy;
|
||||
|
||||
// Proxy factory creates ztunnel proxies using a socket factory.
|
||||
// this allows us to create our proxies the same way in regular mode and in inpod mode.
|
||||
pub struct ProxyFactory {
|
||||
|
@ -113,6 +112,7 @@ impl ProxyFactory {
|
|||
drain.clone(),
|
||||
socket_factory.as_ref(),
|
||||
local_workload_information.as_fetcher(),
|
||||
self.config.prefered_service_namespace.clone(),
|
||||
)
|
||||
.await?;
|
||||
resolver = Some(server.resolver());
|
||||
|
@ -130,6 +130,7 @@ impl ProxyFactory {
|
|||
socket_factory.clone(),
|
||||
resolver,
|
||||
local_workload_information,
|
||||
false,
|
||||
);
|
||||
result.connection_manager = Some(cm);
|
||||
result.proxy = Some(Proxy::from_inputs(pi, drain).await?);
|
||||
|
@ -137,6 +138,52 @@ impl ProxyFactory {
|
|||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Creates an inbound listener specifically for ztunnel's own internal endpoints (metrics).
|
||||
/// This allows ztunnel to act as its own workload, enforcing policies on traffic directed to itself.
|
||||
/// This is distinct from the main inbound listener which handles traffic for other workloads proxied by ztunnel.
|
||||
pub async fn create_ztunnel_self_proxy_listener(
|
||||
&self,
|
||||
) -> Result<Option<crate::proxy::inbound::Inbound>, Error> {
|
||||
if self.config.proxy_mode != config::ProxyMode::Shared {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let (Some(ztunnel_identity), Some(ztunnel_workload)) =
|
||||
(&self.config.ztunnel_identity, &self.config.ztunnel_workload)
|
||||
{
|
||||
tracing::info!(
|
||||
"creating ztunnel self-proxy listener with identity: {:?}",
|
||||
ztunnel_identity
|
||||
);
|
||||
|
||||
let local_workload_information = Arc::new(LocalWorkloadInformation::new(
|
||||
Arc::new(ztunnel_workload.clone()),
|
||||
self.state.clone(),
|
||||
self.cert_manager.clone(),
|
||||
));
|
||||
|
||||
let socket_factory = Arc::new(DefaultSocketFactory(self.config.socket_config));
|
||||
|
||||
let cm = ConnectionManager::default();
|
||||
|
||||
let pi = crate::proxy::ProxyInputs::new(
|
||||
self.config.clone(),
|
||||
cm.clone(),
|
||||
self.state.clone(),
|
||||
self.proxy_metrics.clone(),
|
||||
socket_factory,
|
||||
None,
|
||||
local_workload_information,
|
||||
true,
|
||||
);
|
||||
|
||||
let inbound = Inbound::new(pi, self.drain.clone()).await?;
|
||||
Ok(Some(inbound))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
|
|
64
src/state.rs
64
src/state.rs
|
@ -33,7 +33,7 @@ use crate::{cert_fetcher, config, rbac, xds};
|
|||
use crate::{proxy, strng};
|
||||
use educe::Educe;
|
||||
use futures_util::FutureExt;
|
||||
use hickory_resolver::TokioAsyncResolver;
|
||||
use hickory_resolver::TokioResolver;
|
||||
use hickory_resolver::config::*;
|
||||
use hickory_resolver::name_server::TokioConnectionProvider;
|
||||
use itertools::Itertools;
|
||||
|
@ -366,6 +366,19 @@ impl ProxyState {
|
|||
debug!("failed to fetch workload for {}", ep.workload_uid);
|
||||
return None;
|
||||
};
|
||||
|
||||
let in_network = wl.network == src.network;
|
||||
let has_network_gateway = wl.network_gateway.is_some();
|
||||
let has_address = !wl.workload_ips.is_empty() || !wl.hostname.is_empty();
|
||||
if !has_address {
|
||||
// Workload has no IP. We can only reach it via a network gateway
|
||||
// WDS is client-agnostic, so we will get a network gateway for a workload
|
||||
// even if it's in the same network; we should never use it.
|
||||
if in_network || !has_network_gateway {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
match resolution_mode {
|
||||
ServiceResolutionMode::Standard => {
|
||||
if target_port.unwrap_or_default() == 0 && !ep.port.contains_key(&svc_port) {
|
||||
|
@ -466,7 +479,7 @@ pub struct DemandProxyState {
|
|||
metrics: Arc<proxy::Metrics>,
|
||||
|
||||
#[serde(skip_serializing)]
|
||||
dns_resolver: TokioAsyncResolver,
|
||||
dns_resolver: TokioResolver,
|
||||
}
|
||||
|
||||
impl DemandProxyState {
|
||||
|
@ -487,11 +500,12 @@ impl DemandProxyState {
|
|||
dns_resolver_opts: ResolverOpts,
|
||||
metrics: Arc<proxy::Metrics>,
|
||||
) -> Self {
|
||||
let dns_resolver = TokioAsyncResolver::new(
|
||||
dns_resolver_cfg.to_owned(),
|
||||
dns_resolver_opts.clone(),
|
||||
let mut rb = hickory_resolver::Resolver::builder_with_config(
|
||||
dns_resolver_cfg,
|
||||
TokioConnectionProvider::default(),
|
||||
);
|
||||
*rb.options_mut() = dns_resolver_opts;
|
||||
let dns_resolver = rb.build();
|
||||
Self {
|
||||
state,
|
||||
demand,
|
||||
|
@ -665,7 +679,7 @@ impl DemandProxyState {
|
|||
let (matching, unmatching): (Vec<_>, Vec<_>) = resp
|
||||
.as_lookup()
|
||||
.record_iter()
|
||||
.filter_map(|record| record.data().and_then(|d| d.ip_addr()))
|
||||
.filter_map(|record| record.data().ip_addr())
|
||||
.partition(|record| record.is_ipv6() == original_target_address.is_ipv6());
|
||||
// Randomly pick an IP, prefer to match the IP family of the downstream request.
|
||||
// Without this, we run into trouble in pure v4 or pure v6 environments.
|
||||
|
@ -1570,6 +1584,22 @@ mod tests {
|
|||
},
|
||||
..test_helpers::test_default_workload()
|
||||
};
|
||||
let wl_empty_ip = Workload {
|
||||
uid: "cluster1//v1/Pod/default/wl_empty_ip".into(),
|
||||
name: "wl_empty_ip".into(),
|
||||
namespace: "default".into(),
|
||||
trust_domain: "cluster.local".into(),
|
||||
service_account: "default".into(),
|
||||
workload_ips: vec![], // none!
|
||||
network: "network".into(),
|
||||
locality: Locality {
|
||||
region: "reg".into(),
|
||||
zone: "zone".into(),
|
||||
subzone: "".into(),
|
||||
},
|
||||
..test_helpers::test_default_workload()
|
||||
};
|
||||
|
||||
let _ep_almost = Workload {
|
||||
uid: "cluster1//v1/Pod/default/ep_almost".into(),
|
||||
name: "wl_almost".into(),
|
||||
|
@ -1616,6 +1646,11 @@ mod tests {
|
|||
port: HashMap::from([(80u16, 80u16)]),
|
||||
status: HealthStatus::Healthy,
|
||||
},
|
||||
Endpoint {
|
||||
workload_uid: "cluster1//v1/Pod/default/wl_empty_ip".into(),
|
||||
port: HashMap::from([(80u16, 80u16)]),
|
||||
status: HealthStatus::Healthy,
|
||||
},
|
||||
]);
|
||||
let strict_svc = Service {
|
||||
endpoints: endpoints.clone(),
|
||||
|
@ -1648,6 +1683,7 @@ mod tests {
|
|||
state.workloads.insert(Arc::new(wl_no_locality.clone()));
|
||||
state.workloads.insert(Arc::new(wl_match.clone()));
|
||||
state.workloads.insert(Arc::new(wl_almost.clone()));
|
||||
state.workloads.insert(Arc::new(wl_empty_ip.clone()));
|
||||
state.services.insert(strict_svc.clone());
|
||||
state.services.insert(failover_svc.clone());
|
||||
|
||||
|
@ -1662,6 +1698,15 @@ mod tests {
|
|||
assert!(want.contains(&got.unwrap()), "{}", desc);
|
||||
}
|
||||
};
|
||||
let assert_not_endpoint =
|
||||
|src: &Workload, svc: &Service, uid: &str, tries: usize, desc: &str| {
|
||||
for _ in 0..tries {
|
||||
let got = state
|
||||
.load_balance(src, svc, 80, ServiceResolutionMode::Standard)
|
||||
.map(|(ep, _)| ep.workload_uid.as_str());
|
||||
assert!(got != Some(uid), "{}", desc);
|
||||
}
|
||||
};
|
||||
|
||||
assert_endpoint(
|
||||
&wl_no_locality,
|
||||
|
@ -1707,5 +1752,12 @@ mod tests {
|
|||
vec!["cluster1//v1/Pod/default/wl_match"],
|
||||
"failover full match selects closest match",
|
||||
);
|
||||
assert_not_endpoint(
|
||||
&wl_no_locality,
|
||||
&failover_svc,
|
||||
"cluster1//v1/Pod/default/wl_empty_ip",
|
||||
10,
|
||||
"failover no match can select any endpoint",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -326,9 +326,7 @@ impl io::Write for WriteAdaptor<'_> {
|
|||
let s =
|
||||
std::str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
||||
|
||||
self.fmt_write
|
||||
.write_str(s)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
self.fmt_write.write_str(s).map_err(io::Error::other)?;
|
||||
|
||||
Ok(s.len())
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ pub struct TestApp {
|
|||
|
||||
pub namespace: Option<super::netns::Namespace>,
|
||||
pub shutdown: ShutdownTrigger,
|
||||
pub ztunnel_identity: Option<identity::Identity>,
|
||||
}
|
||||
|
||||
impl From<(&Bound, Arc<SecretManager>)> for TestApp {
|
||||
|
@ -66,6 +67,7 @@ impl From<(&Bound, Arc<SecretManager>)> for TestApp {
|
|||
cert_manager,
|
||||
namespace: None,
|
||||
shutdown: app.shutdown.trigger(),
|
||||
ztunnel_identity: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,21 +24,23 @@ use crate::xds::istio::workload::Workload as XdsWorkload;
|
|||
use crate::{dns, drain, metrics};
|
||||
use futures_util::ready;
|
||||
use futures_util::stream::{Stream, StreamExt};
|
||||
use hickory_client::client::{AsyncClient, ClientHandle};
|
||||
use hickory_client::error::ClientError;
|
||||
use hickory_client::ClientError;
|
||||
use hickory_client::client::{Client, ClientHandle};
|
||||
use hickory_proto::DnsHandle;
|
||||
use hickory_proto::error::{ProtoError, ProtoErrorKind};
|
||||
use hickory_proto::iocompat::AsyncIoTokioAsStd;
|
||||
use hickory_proto::op::{Edns, Message, MessageType, OpCode, Query, ResponseCode};
|
||||
use hickory_proto::rr::rdata::{A, AAAA, CNAME};
|
||||
use hickory_proto::rr::{DNSClass, Name, RData, Record, RecordType};
|
||||
use hickory_proto::runtime::TokioRuntimeProvider;
|
||||
use hickory_proto::runtime::iocompat::AsyncIoTokioAsStd;
|
||||
use hickory_proto::serialize::binary::BinDecodable;
|
||||
use hickory_proto::tcp::TcpClientStream;
|
||||
use hickory_proto::udp::UdpClientStream;
|
||||
use hickory_proto::xfer::Protocol;
|
||||
use hickory_proto::xfer::{DnsRequest, DnsRequestOptions, DnsResponse};
|
||||
use hickory_proto::{ProtoError, ProtoErrorKind};
|
||||
use hickory_resolver::config::{NameServerConfig, ResolverConfig, ResolverOpts};
|
||||
use hickory_server::authority::{LookupError, MessageRequest};
|
||||
use hickory_server::server::{Protocol, Request};
|
||||
use hickory_server::server::Request;
|
||||
use prometheus_client::registry::Registry;
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
|
@ -46,7 +48,7 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
|||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::net::{TcpStream, UdpSocket};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
const TTL: u32 = 5;
|
||||
|
||||
|
@ -72,11 +74,14 @@ pub fn cname(name: Name, canonical_name: Name) -> Record {
|
|||
|
||||
/// Creates a new DNS client that establishes a TCP connection to the nameserver at the given
|
||||
/// address.
|
||||
pub async fn new_tcp_client(addr: SocketAddr) -> AsyncClient {
|
||||
let (stream, sender) = TcpClientStream::<AsyncIoTokioAsStd<TcpStream>>::new(addr);
|
||||
let (client, bg) = AsyncClient::new(Box::new(stream), sender, None)
|
||||
.await
|
||||
.unwrap();
|
||||
pub async fn new_tcp_client(addr: SocketAddr) -> Client {
|
||||
let (stream, sender) = TcpClientStream::<AsyncIoTokioAsStd<TcpStream>>::new(
|
||||
addr,
|
||||
None,
|
||||
None,
|
||||
TokioRuntimeProvider::new(),
|
||||
);
|
||||
let (client, bg) = Client::new(Box::new(stream), sender, None).await.unwrap();
|
||||
|
||||
// Run the client exchange in the background.
|
||||
tokio::spawn(bg);
|
||||
|
@ -85,9 +90,10 @@ pub async fn new_tcp_client(addr: SocketAddr) -> AsyncClient {
|
|||
}
|
||||
|
||||
/// Creates a new DNS client that establishes a UDP connection to the nameserver at the given address.
|
||||
pub async fn new_udp_client(addr: SocketAddr) -> AsyncClient {
|
||||
let stream = UdpClientStream::<UdpSocket>::new(addr);
|
||||
let (client, bg) = AsyncClient::connect(stream).await.unwrap();
|
||||
pub async fn new_udp_client(addr: SocketAddr) -> Client {
|
||||
let stream =
|
||||
UdpClientStream::<TokioRuntimeProvider>::builder(addr, TokioRuntimeProvider::new()).build();
|
||||
let (client, bg) = Client::connect(stream).await.unwrap();
|
||||
|
||||
// Run the client exchange in the background.
|
||||
tokio::spawn(bg);
|
||||
|
@ -106,7 +112,7 @@ pub async fn send_request<C: ClientHandle>(
|
|||
|
||||
/// Sends a request with the given maximum response payload size.
|
||||
pub async fn send_with_max_size(
|
||||
client: &mut AsyncClient,
|
||||
client: &mut Client,
|
||||
name: Name,
|
||||
rr_type: RecordType,
|
||||
max_payload: u16,
|
||||
|
@ -230,15 +236,17 @@ fn internal_resolver_config(tcp: SocketAddr, udp: SocketAddr) -> ResolverConfig
|
|||
let mut rc = ResolverConfig::new();
|
||||
rc.add_name_server(NameServerConfig {
|
||||
socket_addr: udp,
|
||||
protocol: hickory_resolver::config::Protocol::Udp,
|
||||
protocol: Protocol::Udp,
|
||||
tls_dns_name: None,
|
||||
http_endpoint: None,
|
||||
trust_negative_responses: false,
|
||||
bind_addr: None,
|
||||
});
|
||||
rc.add_name_server(NameServerConfig {
|
||||
socket_addr: tcp,
|
||||
protocol: hickory_resolver::config::Protocol::Tcp,
|
||||
protocol: Protocol::Tcp,
|
||||
tls_dns_name: None,
|
||||
http_endpoint: None,
|
||||
trust_negative_responses: false,
|
||||
bind_addr: None,
|
||||
});
|
||||
|
@ -290,6 +298,7 @@ pub async fn run_dns(responses: HashMap<Name, Vec<IpAddr>>) -> anyhow::Result<Te
|
|||
}),
|
||||
state.clone(),
|
||||
),
|
||||
Some("prefered-namespace".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -353,12 +362,13 @@ impl crate::dns::Forwarder for FakeForwarder {
|
|||
_: Option<&Workload>,
|
||||
request: &Request,
|
||||
) -> Result<Answer, LookupError> {
|
||||
let name: Name = request.query().name().into();
|
||||
let query = request.request_info()?.query;
|
||||
let name: Name = query.name().into();
|
||||
let utf = name.to_string();
|
||||
if let Some(ip) = utf.strip_suffix(".reflect.internal.") {
|
||||
// Magic to allow `ip.reflect.internal` to always return ip (like nip.io)
|
||||
return Ok(Answer::new(
|
||||
vec![a(request.query().name().into(), ip.parse().unwrap())],
|
||||
vec![a(query.name().into(), ip.parse().unwrap())],
|
||||
false,
|
||||
));
|
||||
}
|
||||
|
@ -368,17 +378,18 @@ impl crate::dns::Forwarder for FakeForwarder {
|
|||
};
|
||||
|
||||
let mut out = Vec::new();
|
||||
let rtype = request.query().query_type();
|
||||
|
||||
let rtype = query.query_type();
|
||||
for ip in ips {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
if rtype == RecordType::A {
|
||||
out.push(a(request.query().name().into(), *ip));
|
||||
out.push(a(query.name().into(), *ip));
|
||||
}
|
||||
}
|
||||
IpAddr::V6(ip) => {
|
||||
if rtype == RecordType::AAAA {
|
||||
out.push(aaaa(request.query().name().into(), *ip));
|
||||
out.push(aaaa(query.name().into(), *ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,8 @@
|
|||
|
||||
use pin_project_lite::pin_project;
|
||||
use tonic::Status;
|
||||
use tonic::body::BoxBody;
|
||||
use tonic::body::Body;
|
||||
|
||||
use tower::{BoxError, ServiceExt};
|
||||
|
||||
// Copied from https://github.com/hyperium/tonic/blob/34b863b1d2a204ef3dd871ec86860fc92aafb451/examples/src/tls_rustls/server.rs
|
||||
|
@ -25,7 +26,7 @@ use tower::{BoxError, ServiceExt};
|
|||
/// and does not support the `poll_ready` method that is used by tower services.
|
||||
///
|
||||
/// This is provided here because the equivalent adaptor in hyper-util does not support
|
||||
/// tonic::body::BoxBody bodies.
|
||||
/// tonic::body::Body bodies.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TowerToHyperService<S> {
|
||||
service: S,
|
||||
|
@ -40,20 +41,17 @@ impl<S> TowerToHyperService<S> {
|
|||
|
||||
impl<S> hyper::service::Service<hyper::Request<hyper::body::Incoming>> for TowerToHyperService<S>
|
||||
where
|
||||
S: tower::Service<hyper::Request<BoxBody>> + Clone,
|
||||
S: tower::Service<hyper::Request<Body>> + Clone,
|
||||
S::Error: Into<BoxError> + 'static,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = BoxError;
|
||||
type Future = TowerToHyperServiceFuture<S, hyper::Request<BoxBody>>;
|
||||
type Future = TowerToHyperServiceFuture<S, hyper::Request<Body>>;
|
||||
|
||||
fn call(&self, req: hyper::Request<hyper::body::Incoming>) -> Self::Future {
|
||||
use http_body_util::BodyExt;
|
||||
let req = req.map(|incoming| {
|
||||
incoming
|
||||
.map_err(|err| Status::from_error(err.into()))
|
||||
.boxed_unsync()
|
||||
});
|
||||
let req =
|
||||
req.map(|incoming| Body::new(incoming.map_err(|err| Status::from_error(err.into()))));
|
||||
TowerToHyperServiceFuture {
|
||||
future: self.service.clone().oneshot(req),
|
||||
}
|
||||
|
|
|
@ -124,38 +124,104 @@ impl WorkloadManager {
|
|||
wli: Option<state::WorkloadInfo>,
|
||||
) -> anyhow::Result<TestApp> {
|
||||
let mut inpod_uds: PathBuf = "/dev/null".into();
|
||||
let ztunnel_server = if self.mode == Shared {
|
||||
inpod_uds = self.tmp_dir.join(node);
|
||||
Some(start_ztunnel_server(inpod_uds.clone()).await)
|
||||
let current_mode = self.mode;
|
||||
let proxy_mode = match current_mode {
|
||||
Shared => ProxyMode::Shared,
|
||||
Dedicated => ProxyMode::Dedicated,
|
||||
};
|
||||
let ztunnel_name = format!("ztunnel-{node}");
|
||||
|
||||
// Define ztunnel's own identity and workload info if it's a Shared proxy.
|
||||
// These are used for registering ztunnel as a workload and for cfg.ztunnel_identity/workload.
|
||||
let ztunnel_shared_identity: Option<identity::Identity> = if proxy_mode == ProxyMode::Shared
|
||||
{
|
||||
Some(identity::Identity::Spiffe {
|
||||
trust_domain: "cluster.local".into(),
|
||||
namespace: "default".into(),
|
||||
service_account: ztunnel_name.clone().into(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let ns = TestWorkloadBuilder::new(&format!("ztunnel-{node}"), self)
|
||||
.on_node(node)
|
||||
.uncaptured()
|
||||
.register()
|
||||
.await?;
|
||||
|
||||
let ztunnel_shared_workload_info: Option<state::WorkloadInfo> =
|
||||
if proxy_mode == ProxyMode::Shared {
|
||||
Some(state::WorkloadInfo::new(
|
||||
ztunnel_name.clone(),
|
||||
"default".to_string(),
|
||||
ztunnel_name.clone(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let ztunnel_server = match current_mode {
|
||||
Shared => {
|
||||
inpod_uds = self.tmp_dir.join(node);
|
||||
Some(start_ztunnel_server(inpod_uds.clone()).await)
|
||||
}
|
||||
Dedicated => None,
|
||||
};
|
||||
|
||||
let ns = match current_mode {
|
||||
Shared => {
|
||||
// Shared mode: Ztunnel has its own identity, registered as HBONE
|
||||
TestWorkloadBuilder::new(&ztunnel_name, self)
|
||||
.on_node(node)
|
||||
.identity(
|
||||
ztunnel_shared_identity
|
||||
.clone()
|
||||
.expect("Shared mode must have an identity for ztunnel registration"),
|
||||
)
|
||||
.hbone() // Shared ztunnel uses HBONE protocol
|
||||
.register()
|
||||
.await?
|
||||
}
|
||||
Dedicated => {
|
||||
TestWorkloadBuilder::new(&ztunnel_name, self)
|
||||
.on_node(node)
|
||||
.uncaptured() // Dedicated ztunnel is treated as uncaptured TCP
|
||||
.register()
|
||||
.await?
|
||||
}
|
||||
};
|
||||
let _ztunnel_local_workload = self
|
||||
.workloads
|
||||
.last()
|
||||
.cloned()
|
||||
.expect("ztunnel workload should be registered");
|
||||
|
||||
let ip = ns.ip();
|
||||
let initial_config = LocalConfig {
|
||||
workloads: self.workloads.clone(),
|
||||
policies: self.policies.clone(),
|
||||
services: self.services.values().cloned().collect_vec(),
|
||||
};
|
||||
let proxy_mode = if ztunnel_server.is_some() {
|
||||
ProxyMode::Shared
|
||||
} else {
|
||||
ProxyMode::Dedicated
|
||||
};
|
||||
let (mut tx_cfg, rx_cfg) = mpsc_ack(1);
|
||||
tx_cfg.send(initial_config).await?;
|
||||
let local_xds_config = Some(ConfigSource::Dynamic(Arc::new(Mutex::new(rx_cfg))));
|
||||
|
||||
// Config for ztunnel's own identity and workload, primarily for when it acts as a server (metrics endpoint).
|
||||
let cfg_ztunnel_identity = ztunnel_shared_identity.clone();
|
||||
let cfg_ztunnel_workload_info = ztunnel_shared_workload_info.clone();
|
||||
|
||||
// Config for the workload this ztunnel instance is proxying for :
|
||||
// If Shared, ztunnel is effectively proxying for itself
|
||||
// If Dedicated, it's for the application workload `wli`
|
||||
let cfg_proxy_workload_information = match proxy_mode {
|
||||
// Ztunnel's own info for shared mode proxy
|
||||
ProxyMode::Shared => ztunnel_shared_workload_info.clone(),
|
||||
// Application's workload info for dedicated mode
|
||||
ProxyMode::Dedicated => wli,
|
||||
};
|
||||
|
||||
let cfg = config::Config {
|
||||
xds_address: None,
|
||||
dns_proxy: true,
|
||||
fake_ca: true,
|
||||
local_xds_config,
|
||||
local_node: Some(node.to_string()),
|
||||
proxy_workload_information: wli,
|
||||
proxy_workload_information: cfg_proxy_workload_information,
|
||||
inpod_uds,
|
||||
proxy_mode,
|
||||
// We use packet mark even in dedicated to distinguish proxy from application
|
||||
|
@ -166,12 +232,16 @@ impl WorkloadManager {
|
|||
Some(true)
|
||||
},
|
||||
localhost_app_tunnel: true,
|
||||
ztunnel_identity: cfg_ztunnel_identity,
|
||||
ztunnel_workload: cfg_ztunnel_workload_info,
|
||||
..config::parse_config().unwrap()
|
||||
};
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
||||
// Setup the ztunnel...
|
||||
let cloned_ns = ns.clone();
|
||||
let cloned_ns2 = ns.clone();
|
||||
let ztunnel_identity = ztunnel_shared_identity.clone();
|
||||
// run_ready will spawn a thread and block on it. Run with spawn_blocking so it doesn't block the runtime.
|
||||
tokio::task::spawn_blocking(move || {
|
||||
ns.run_ready(move |ready| async move {
|
||||
|
@ -210,9 +280,9 @@ impl WorkloadManager {
|
|||
ip,
|
||||
)),
|
||||
cert_manager,
|
||||
|
||||
namespace: Some(cloned_ns),
|
||||
shutdown,
|
||||
ztunnel_identity: ztunnel_identity.clone(),
|
||||
};
|
||||
ta.ready().await;
|
||||
info!("ready");
|
||||
|
@ -512,16 +582,18 @@ impl<'a> TestWorkloadBuilder<'a> {
|
|||
pub async fn register(mut self) -> anyhow::Result<Namespace> {
|
||||
let zt = self.manager.ztunnels.get(self.w.workload.node.as_str());
|
||||
let node = self.w.workload.node.clone();
|
||||
let network_namespace = if self.manager.mode == Dedicated && zt.is_some() {
|
||||
// This is a bit of hack. For dedicated mode, we run the app and ztunnel in the same namespace
|
||||
// We probably should express this more natively in the framework, but for now we just detect it
|
||||
// and re-use the namespace.
|
||||
tracing::info!("node already has ztunnel and dedicate mode, sharing");
|
||||
zt.as_ref().unwrap().namespace.clone()
|
||||
} else {
|
||||
self.manager
|
||||
let network_namespace = match (self.manager.mode, zt.is_some()) {
|
||||
(Dedicated, true) => {
|
||||
// This is a bit of hack. For dedicated mode, we run the app and ztunnel in the same namespace
|
||||
// We probably should express this more natively in the framework, but for now we just detect it
|
||||
// and re-use the namespace.
|
||||
tracing::info!("node already has ztunnel and dedicate mode, sharing");
|
||||
zt.as_ref().unwrap().namespace.clone()
|
||||
}
|
||||
_ => self
|
||||
.manager
|
||||
.namespaces
|
||||
.child(&self.w.workload.node, &self.w.workload.name)?
|
||||
.child(&self.w.workload.node, &self.w.workload.name)?,
|
||||
};
|
||||
if self.w.workload.network_gateway.is_some() {
|
||||
// This is a little inefficient, because we create the
|
||||
|
@ -569,7 +641,7 @@ impl<'a> TestWorkloadBuilder<'a> {
|
|||
let fd = network_namespace.netns().file().as_raw_fd();
|
||||
let msg = inpod::Message::Start(inpod::StartZtunnelMessage {
|
||||
uid: uid.to_string(),
|
||||
workload_info: Some(wli),
|
||||
workload_info: Some(wli.clone()),
|
||||
fd,
|
||||
});
|
||||
zt_info
|
||||
|
|
|
@ -30,7 +30,7 @@ use std::pin::Pin;
|
|||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tonic::body::BoxBody;
|
||||
use tonic::body::Body;
|
||||
use tracing::debug;
|
||||
|
||||
async fn root_to_store(root_cert: &RootCert) -> Result<rustls::RootCertStore, Error> {
|
||||
|
@ -199,7 +199,7 @@ async fn control_plane_client_config(
|
|||
#[derive(Clone, Debug)]
|
||||
pub struct TlsGrpcChannel {
|
||||
uri: Uri,
|
||||
client: hyper_util::client::legacy::Client<HttpsConnector<HttpConnector>, BoxBody>,
|
||||
client: hyper_util::client::legacy::Client<HttpsConnector<HttpConnector>, Body>,
|
||||
auth: Arc<AuthSource>,
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ pub fn grpc_connector(
|
|||
})
|
||||
}
|
||||
|
||||
impl tower::Service<http::Request<BoxBody>> for TlsGrpcChannel {
|
||||
impl tower::Service<http::Request<Body>> for TlsGrpcChannel {
|
||||
type Response = http::Response<Incoming>;
|
||||
type Error = anyhow::Error;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||
|
@ -253,7 +253,7 @@ impl tower::Service<http::Request<BoxBody>> for TlsGrpcChannel {
|
|||
Ok(()).into()
|
||||
}
|
||||
|
||||
fn call(&mut self, mut req: http::Request<BoxBody>) -> Self::Future {
|
||||
fn call(&mut self, mut req: http::Request<Body>) -> Self::Future {
|
||||
let mut uri = Uri::builder();
|
||||
if let Some(scheme) = self.uri.scheme() {
|
||||
uri = uri.scheme(scheme.to_owned());
|
||||
|
|
|
@ -124,28 +124,44 @@ impl CsrOptions {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::tls;
|
||||
use itertools::Itertools;
|
||||
|
||||
#[test]
|
||||
fn test_csr() {
|
||||
use x509_parser::prelude::FromDer;
|
||||
use x509_parser::prelude::*;
|
||||
let csr = tls::csr::CsrOptions {
|
||||
san: "spiffe://td/ns/ns1/sa/sa1".to_string(),
|
||||
}
|
||||
.generate()
|
||||
.unwrap();
|
||||
|
||||
let (_, der) = x509_parser::pem::parse_x509_pem(csr.csr.as_bytes()).unwrap();
|
||||
|
||||
let (_, cert) =
|
||||
x509_parser::certification_request::X509CertificationRequest::from_der(&der.contents)
|
||||
.unwrap();
|
||||
cert.verify_signature().unwrap();
|
||||
let subject = cert.certification_request_info.subject.iter().collect_vec();
|
||||
assert_eq!(subject.len(), 0);
|
||||
let attr = cert
|
||||
.certification_request_info
|
||||
.iter_attributes()
|
||||
.next()
|
||||
.unwrap();
|
||||
// SAN is encoded in some format I don't understand how to parse; this could be improved.
|
||||
// but make sure it's there in a hacky manner
|
||||
assert!(attr.value.ends_with(b"spiffe://td/ns/ns1/sa/sa1"));
|
||||
|
||||
let ParsedCriAttribute::ExtensionRequest(parsed) = attr.parsed_attribute() else {
|
||||
panic!("not a ExtensionRequest")
|
||||
};
|
||||
let ext = parsed.clone().extensions;
|
||||
assert_eq!(ext.len(), 1);
|
||||
let ext = ext.into_iter().next().unwrap();
|
||||
assert!(ext.critical);
|
||||
let ParsedExtension::SubjectAlternativeName(san) = ext.parsed_extension() else {
|
||||
panic!("not a SubjectAlternativeName")
|
||||
};
|
||||
assert_eq!(
|
||||
&format!("{san:?}"),
|
||||
"SubjectAlternativeName { general_names: [URI(\"spiffe://td/ns/ns1/sa/sa1\")] }"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
use super::Error;
|
||||
|
||||
#[allow(unused_imports)]
|
||||
use crate::PQC_ENABLED;
|
||||
use crate::identity::{self, Identity};
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
@ -40,6 +42,15 @@ pub trait ServerCertProvider: Send + Sync + Clone {
|
|||
|
||||
pub(super) static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
|
||||
|
||||
#[cfg(feature = "tls-aws-lc")]
|
||||
pub static CRYPTO_PROVIDER: &str = "tls-aws-lc";
|
||||
#[cfg(feature = "tls-ring")]
|
||||
pub static CRYPTO_PROVIDER: &str = "tls-ring";
|
||||
#[cfg(feature = "tls-boring")]
|
||||
pub static CRYPTO_PROVIDER: &str = "tls-boring";
|
||||
#[cfg(feature = "tls-openssl")]
|
||||
pub static CRYPTO_PROVIDER: &str = "tls-openssl";
|
||||
|
||||
// Ztunnel use `rustls` with pluggable crypto modules.
|
||||
// All crypto MUST be done via the below providers.
|
||||
//
|
||||
|
@ -68,14 +79,20 @@ pub(super) fn provider() -> Arc<CryptoProvider> {
|
|||
|
||||
#[cfg(feature = "tls-aws-lc")]
|
||||
pub(super) fn provider() -> Arc<CryptoProvider> {
|
||||
Arc::new(CryptoProvider {
|
||||
let mut provider = CryptoProvider {
|
||||
// Limit to only the subset of ciphers that are FIPS compatible
|
||||
cipher_suites: vec![
|
||||
rustls::crypto::aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
|
||||
rustls::crypto::aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
|
||||
],
|
||||
..rustls::crypto::aws_lc_rs::default_provider()
|
||||
})
|
||||
};
|
||||
|
||||
if *PQC_ENABLED {
|
||||
provider.kx_groups = vec![rustls::crypto::aws_lc_rs::kx_group::X25519MLKEM768]
|
||||
}
|
||||
|
||||
Arc::new(provider)
|
||||
}
|
||||
|
||||
#[cfg(feature = "tls-openssl")]
|
||||
|
|
|
@ -17,10 +17,11 @@ use std::fmt;
|
|||
use std::fmt::{Display, Formatter};
|
||||
use std::string::String;
|
||||
|
||||
use crate::tls::CRYPTO_PROVIDER;
|
||||
|
||||
const BUILD_VERSION: &str = env!("ZTUNNEL_BUILD_buildVersion");
|
||||
const BUILD_GIT_REVISION: &str = env!("ZTUNNEL_BUILD_buildGitRevision");
|
||||
const BUILD_STATUS: &str = env!("ZTUNNEL_BUILD_buildStatus");
|
||||
const BUILD_TAG: &str = env!("ZTUNNEL_BUILD_buildTag");
|
||||
const BUILD_RUST_VERSION: &str = env!("ZTUNNEL_BUILD_RUSTC_VERSION");
|
||||
const BUILD_RUST_PROFILE: &str = env!("ZTUNNEL_BUILD_PROFILE_NAME");
|
||||
|
||||
|
@ -32,8 +33,8 @@ pub struct BuildInfo {
|
|||
rust_version: String,
|
||||
build_profile: String,
|
||||
build_status: String,
|
||||
git_tag: String,
|
||||
pub istio_version: String,
|
||||
crypto_provider: String,
|
||||
}
|
||||
|
||||
impl BuildInfo {
|
||||
|
@ -44,8 +45,9 @@ impl BuildInfo {
|
|||
rust_version: BUILD_RUST_VERSION.to_string(),
|
||||
build_profile: BUILD_RUST_PROFILE.to_string(),
|
||||
build_status: BUILD_STATUS.to_string(),
|
||||
git_tag: BUILD_TAG.to_string(),
|
||||
istio_version: env::var("ISTIO_VERSION").unwrap_or_else(|_| "unknown".to_string()),
|
||||
istio_version: env::var("ISTIO_META_ISTIO_VERSION")
|
||||
.unwrap_or_else(|_| "unknown".to_string()),
|
||||
crypto_provider: CRYPTO_PROVIDER.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -54,14 +56,14 @@ impl Display for BuildInfo {
|
|||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"version.BuildInfo{{Version:\"{}\", GitRevision:\"{}\", RustVersion:\"{}\", BuildProfile:\"{}\", BuildStatus:\"{}\", GitTag:\"{}\", IstioVersion:\"{}\"}}",
|
||||
"version.BuildInfo{{Version:\"{}\", GitRevision:\"{}\", RustVersion:\"{}\", BuildProfile:\"{}\", BuildStatus:\"{}\", IstioVersion:\"{}\", CryptoProvider:\"{}\"}}",
|
||||
self.version,
|
||||
self.git_revision,
|
||||
self.rust_version,
|
||||
self.build_profile,
|
||||
self.build_status,
|
||||
self.git_tag,
|
||||
self.istio_version
|
||||
self.istio_version,
|
||||
self.crypto_provider,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,14 +18,16 @@ mod namespaced {
|
|||
use futures::future::poll_fn;
|
||||
use http_body_util::Empty;
|
||||
use std::collections::HashMap;
|
||||
use ztunnel::state::workload::ApplicationTunnel;
|
||||
use ztunnel::state::workload::application_tunnel::Protocol;
|
||||
use ztunnel::state::workload::gatewayaddress::Destination;
|
||||
use ztunnel::state::workload::{GatewayAddress, NamespacedHostname};
|
||||
use ztunnel::test_helpers::linux::TestMode;
|
||||
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
use anyhow::Context;
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use ztunnel::rbac::{Authorization, RbacMatch, StringMatch};
|
||||
|
@ -39,18 +41,17 @@ mod namespaced {
|
|||
use tokio::time::timeout;
|
||||
use tracing::{error, info};
|
||||
|
||||
use ztunnel::state::workload::{ApplicationTunnel, NetworkAddress};
|
||||
use ztunnel::test_helpers::app::ParsedMetrics;
|
||||
use ztunnel::test_helpers::app::TestApp;
|
||||
use ztunnel::state::workload::NetworkAddress;
|
||||
use ztunnel::test_helpers::app::{ParsedMetrics, TestApp};
|
||||
use ztunnel::test_helpers::linux::TestMode::{Dedicated, Shared};
|
||||
use ztunnel::test_helpers::linux::WorkloadManager;
|
||||
use ztunnel::test_helpers::netns::{Namespace, Resolver};
|
||||
use ztunnel::test_helpers::*;
|
||||
|
||||
use ztunnel::{identity, strng, telemetry};
|
||||
|
||||
use crate::namespaced::WorkloadMode::Captured;
|
||||
use ztunnel::setup_netns_test;
|
||||
use ztunnel::test_helpers::linux::TestMode::{Dedicated, Shared};
|
||||
use ztunnel::test_helpers::linux::WorkloadManager;
|
||||
use ztunnel::test_helpers::netns::{Namespace, Resolver};
|
||||
use ztunnel::test_helpers::*;
|
||||
|
||||
const WAYPOINT_MESSAGE: &[u8] = b"waypoint\n";
|
||||
|
||||
|
@ -926,27 +927,12 @@ mod namespaced {
|
|||
// Now shutdown the server. In real world, the server app would shutdown, then ztunnel would remove itself.
|
||||
// In this test, we will leave the server app running, but shutdown ztunnel.
|
||||
manager.delete_workload("server").await.unwrap();
|
||||
// Request should fail now
|
||||
let tx = Arc::new(Mutex::new(tx));
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
assert_eventually(
|
||||
Duration::from_secs(2),
|
||||
|| async { tx.lock().unwrap().send_and_wait(()).await.is_err() },
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
// Close the connection
|
||||
drop(tx);
|
||||
|
||||
// Should fail as the last request fails
|
||||
assert!(cjh.join().unwrap().is_err());
|
||||
|
||||
// Now try to connect and make sure it fails
|
||||
// In shared mode, verify that new connections succeed but data transfer fails
|
||||
client
|
||||
.run_and_wait(move || async move {
|
||||
let mut stream = TcpStream::connect(srv).await.unwrap();
|
||||
// We should be able to connect (since client is running), but not send a request
|
||||
|
||||
const BODY: &[u8] = b"hello world";
|
||||
stream.write_all(BODY).await.unwrap();
|
||||
let mut buf = [0; BODY.len() * 2];
|
||||
|
@ -955,6 +941,16 @@ mod namespaced {
|
|||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// The long running connection should also fail on next attempt
|
||||
let tx_send_result = tx.send_and_wait(()).await;
|
||||
assert!(
|
||||
tx_send_result.is_err(),
|
||||
"long running connection should fail after workload deletion"
|
||||
);
|
||||
|
||||
drop(tx);
|
||||
assert!(cjh.join().unwrap().is_err());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1220,7 +1216,7 @@ mod namespaced {
|
|||
vec![
|
||||
(zt, 15001, Request), // Outbound: should be blocked due to recursive call
|
||||
(zt, 15006, Request), // Inbound: should be blocked due to recursive call
|
||||
(zt, 15008, Request), // HBONE: expected TLS, reject
|
||||
(zt, 15008, Request), // HBONE: Connection succeeds (ztunnel listens) but request fails due to TLS
|
||||
// Localhost still get connection established, as ztunnel accepts anything. But they are dropped immediately.
|
||||
(zt, 15080, Request), // socks5: localhost
|
||||
(zt, 15000, Request), // admin: localhost
|
||||
|
@ -1252,7 +1248,7 @@ mod namespaced {
|
|||
// Ztunnel doesn't listen on these ports...
|
||||
(zt, 15001, Connection), // Outbound: should be blocked due to recursive call
|
||||
(zt, 15006, Connection), // Inbound: should be blocked due to recursive call
|
||||
(zt, 15008, Connection), // HBONE: expected TLS, reject
|
||||
(zt, 15008, Request), // HBONE: Connection succeeds (ztunnel listens) but request fails due to TLS
|
||||
// Localhost is not accessible
|
||||
(zt, 15080, Connection), // socks5: localhost
|
||||
(zt, 15000, Connection), // admin: localhost
|
||||
|
@ -1329,66 +1325,191 @@ mod namespaced {
|
|||
let id1s = id1.to_string();
|
||||
|
||||
let ta = manager.deploy_ztunnel(DEFAULT_NODE).await?;
|
||||
let ztunnel_identity_obj = ta.ztunnel_identity.as_ref().unwrap().clone();
|
||||
ta.cert_manager
|
||||
.fetch_certificate(&ztunnel_identity_obj)
|
||||
.await?;
|
||||
let ztunnel_identity_str = ztunnel_identity_obj.to_string();
|
||||
|
||||
let check = |want: Vec<String>, help: &str| {
|
||||
let cm = ta.cert_manager.clone();
|
||||
let help = help.to_string();
|
||||
let mut sorted_want = want.clone();
|
||||
sorted_want.sort();
|
||||
async move {
|
||||
// Cert manager is async, so we need to wait
|
||||
let res = check_eventually(
|
||||
Duration::from_secs(2),
|
||||
|| cm.collect_certs(|a, _b| a.to_string()),
|
||||
want,
|
||||
|| async {
|
||||
let mut certs = cm.collect_certs(|a, _b| a.to_string()).await;
|
||||
certs.sort();
|
||||
certs
|
||||
},
|
||||
sorted_want,
|
||||
)
|
||||
.await;
|
||||
assert!(res.is_ok(), "{}: got {:?}", help, res.err().unwrap());
|
||||
}
|
||||
};
|
||||
check(vec![], "initially empty").await;
|
||||
check(
|
||||
vec![ztunnel_identity_str.clone()],
|
||||
"initially only ztunnel cert",
|
||||
)
|
||||
.await;
|
||||
|
||||
manager
|
||||
.workload_builder("id1-a-remote-node", REMOTE_NODE)
|
||||
.identity(id1.clone())
|
||||
.register()
|
||||
.await?;
|
||||
check(vec![], "we should not prefetch remote nodes").await;
|
||||
check(
|
||||
vec![ztunnel_identity_str.clone()],
|
||||
"we should not prefetch remote nodes",
|
||||
)
|
||||
.await;
|
||||
|
||||
manager
|
||||
.workload_builder("id1-a-same-node", DEFAULT_NODE)
|
||||
.identity(id1.clone())
|
||||
.register()
|
||||
.await?;
|
||||
check(vec![id1s.clone()], "we should prefetch our nodes").await;
|
||||
check(
|
||||
vec![ztunnel_identity_str.clone(), id1s.clone()],
|
||||
"we should prefetch our nodes",
|
||||
)
|
||||
.await;
|
||||
|
||||
manager
|
||||
.workload_builder("id1-b-same-node", DEFAULT_NODE)
|
||||
.identity(id1.clone())
|
||||
.register()
|
||||
.await?;
|
||||
check(
|
||||
vec![id1s.clone()],
|
||||
vec![ztunnel_identity_str.clone(), id1s.clone()],
|
||||
"multiple of same identity shouldn't do anything",
|
||||
)
|
||||
.await;
|
||||
manager.delete_workload("id1-a-remote-node").await?;
|
||||
// Deleting remote node should not affect local certs if local workloads still exist
|
||||
check(
|
||||
vec![id1s.clone()],
|
||||
vec![ztunnel_identity_str.clone(), id1s.clone()],
|
||||
"removing remote node shouldn't impact anything",
|
||||
)
|
||||
.await;
|
||||
manager.delete_workload("id1-b-same-node").await?;
|
||||
// Deleting one local node shouldn't impact certs if another local workload still exists
|
||||
check(
|
||||
vec![id1s.clone()],
|
||||
vec![ztunnel_identity_str.clone(), id1s.clone()],
|
||||
"removing local node shouldn't impact anything if I still have some running",
|
||||
)
|
||||
.await;
|
||||
manager.delete_workload("id1-a-same-node").await?;
|
||||
// TODO: this should be vec![], but our testing setup doesn't exercise the real codepath
|
||||
// After deleting all workloads using sa1, give cert manager time to clean up
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// In shared mode, certificates may be kept alive by the inbound listener
|
||||
// for handling inbound connections, even after workload deletion
|
||||
let expected_certs = match manager.mode() {
|
||||
TestMode::Shared => vec![ztunnel_identity_str.clone(), id1s.clone()],
|
||||
TestMode::Dedicated => vec![ztunnel_identity_str.clone()],
|
||||
};
|
||||
check(
|
||||
vec![id1s.clone()],
|
||||
"removing final workload should clear things out",
|
||||
expected_certs,
|
||||
"removing final workload should clear certs except those needed by inbound listener",
|
||||
)
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hbone_metrics_access() -> Result<(), anyhow::Error> {
|
||||
let mut manager = setup_netns_test!(Shared);
|
||||
|
||||
// Deploy ztunnel for the node
|
||||
let zt = manager.deploy_ztunnel(DEFAULT_NODE).await?;
|
||||
let ztunnel_node_ip = manager.resolve("ztunnel-node")?;
|
||||
// Use the actual metrics address ztunnel is listening on (e.g., [::]:15020)
|
||||
// but combine it with the node IP for the client to target.
|
||||
let target_metrics_addr = SocketAddr::new(ztunnel_node_ip, zt.metrics_address.port());
|
||||
let target_metrics_url = format!("http://{}/metrics", target_metrics_addr);
|
||||
|
||||
// Deploy a client workload (simulating Prometheus)
|
||||
let client = manager
|
||||
.workload_builder("client", DEFAULT_NODE)
|
||||
.register()
|
||||
.await?;
|
||||
|
||||
let zt_identity_str = zt.ztunnel_identity.as_ref().unwrap().to_string();
|
||||
|
||||
// Client makes a standard HTTP GET request to ztunnel's metrics endpoint
|
||||
// Ztunnel's outbound capture should intercept this, initiate HBONE to its own inbound,
|
||||
// which then proxies to the internal metrics server.
|
||||
client
|
||||
.run(move || async move {
|
||||
info!(target=%target_metrics_url, "Client attempting standard HTTP GET to metrics endpoint");
|
||||
|
||||
let client = hyper_util::client::legacy::Client::builder(
|
||||
ztunnel::hyper_util::TokioExecutor,
|
||||
)
|
||||
.build_http();
|
||||
|
||||
let req = hyper::Request::builder()
|
||||
.method(Method::GET)
|
||||
.uri(&target_metrics_url)
|
||||
.body(Empty::<Bytes>::new())?;
|
||||
|
||||
let response = client.request(req).await?;
|
||||
|
||||
info!("Received response status: {:?}", response.status());
|
||||
assert_eq!(response.status(), StatusCode::OK, "GET request failed");
|
||||
|
||||
let body_bytes = http_body_util::BodyExt::collect(response.into_body())
|
||||
.await?
|
||||
.to_bytes();
|
||||
let response_str = String::from_utf8_lossy(&body_bytes);
|
||||
|
||||
assert!(
|
||||
response_str.contains("# TYPE"),
|
||||
"Expected Prometheus metrics (# TYPE) in response, got:\n{}",
|
||||
response_str
|
||||
);
|
||||
info!("Successfully verified metrics response body");
|
||||
|
||||
Ok(())
|
||||
})?
|
||||
.join()
|
||||
.unwrap()?;
|
||||
|
||||
// Verify metrics from the DESTINATION perspective (ztunnel handling its own inbound)
|
||||
let metrics = [
|
||||
(CONNECTIONS_OPENED, 1), // One connection opened (client -> zt inbound via HBONE)
|
||||
(CONNECTIONS_CLOSED, 1), // One connection closed
|
||||
];
|
||||
verify_metrics(&zt, &metrics, &destination_labels()).await;
|
||||
|
||||
// Verify INBOUND telemetry log for the metrics connection
|
||||
let dst_addr_log = format!("{}:15008", ztunnel_node_ip);
|
||||
let dst_hbone_addr_log = format!("{}", target_metrics_addr);
|
||||
|
||||
// We don't know exact byte counts, so omit them from the check for now
|
||||
let want = HashMap::from([
|
||||
("scope", "access"),
|
||||
("src.workload", "client"),
|
||||
("dst.workload", "ztunnel-node"), // ztunnel's workload name
|
||||
("dst.addr", dst_addr_log.as_str()), // Connected to HBONE port
|
||||
("dst.hbone_addr", dst_hbone_addr_log.as_str()), // Original target
|
||||
("direction", "inbound"),
|
||||
("message", "connection complete"), // Assuming success
|
||||
(
|
||||
"src.identity",
|
||||
"spiffe://cluster.local/ns/default/sa/client",
|
||||
), // Client identity
|
||||
("dst.identity", zt_identity_str.as_str()), // Ztunnel identity
|
||||
]);
|
||||
telemetry::testing::assert_contains(want);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const TEST_VIP: &str = "10.10.0.1";
|
||||
const TEST_VIP2: &str = "10.10.0.2";
|
||||
const TEST_VIP3: &str = "10.10.0.3";
|
||||
|
@ -1759,7 +1880,6 @@ mod namespaced {
|
|||
}
|
||||
use Failure::*;
|
||||
use ztunnel::state::WorkloadInfo;
|
||||
use ztunnel::state::workload::application_tunnel::Protocol;
|
||||
|
||||
async fn malicious_calls_test(
|
||||
client: Namespace,
|
||||
|
@ -1786,14 +1906,14 @@ mod namespaced {
|
|||
let stream = timeout(Duration::from_secs(1), TcpStream::connect(tgt)).await?;
|
||||
error!("stream {stream:?}");
|
||||
if failure == Connection {
|
||||
assert!(stream.is_err());
|
||||
assert!(stream.is_err(), "expected connection to fail for {tgt}");
|
||||
continue;
|
||||
}
|
||||
let mut stream = stream.unwrap();
|
||||
|
||||
let res = timeout(Duration::from_secs(1), send_traffic(&mut stream)).await?;
|
||||
if failure == Request {
|
||||
assert!(res.is_err());
|
||||
assert!(res.is_err(), "expected request to fail for {tgt}");
|
||||
continue;
|
||||
}
|
||||
res.unwrap();
|
||||
|
|
Loading…
Reference in New Issue