Compare commits

...

12 Commits

Author SHA1 Message Date
Istio Automation 248d945e5b
Automator: update common-files@release-1.25 in istio/ztunnel@release-1.25 (#1575) 2025-06-17 14:27:17 -04:00
Steven Jin 0f1f8c1ab2
[release-1.25] Better support for root certificate (#1547)
* Better support for root certificates

* Split the last element of the returned response from the CA. This
  allows support for multiple roots. Note this requires a change to
Istio (which I am making) to rever the regression in 1.25.1
(https://github.com/istio/istio/issues/55793).
* Split root from chain in the config_dump. Without this, it is
  impossible to know if something is a root or intermediate.

* add test

* lint

* format

---------

Co-authored-by: John Howard <john.howard@solo.io>
2025-05-05 16:49:43 -04:00
Istio Automation 713c6ecce8
http2: tune connection window size and add config (#1541)
Fixes https://github.com/istio/ztunnel/issues/1538

See comment for motivation as to why this change is needed.

Co-authored-by: John Howard <john.howard@solo.io>
2025-04-24 16:07:02 -04:00
Istio Automation 3a7f6c73b2
[release-1.25] fix istio_build metric (#1534)
* fix istio_build metric

* fmt

---------

Co-authored-by: zirain <zirain2009@gmail.com>
2025-04-21 14:27:51 -04:00
Istio Automation b8527fc5f2
Automator: update common-files@release-1.25 in istio/ztunnel@release-1.25 (#1523) 2025-04-10 17:30:18 -04:00
Istio Automation b621f6bed5
Simplify the xDS/CA headers parse logic (#1495)
Co-authored-by: Zhewei Hu <zhewei.hu33@gmail.com>
2025-03-27 13:27:41 -04:00
Istio Automation 8599e23165
[release-1.25] Add xDS and CA headers/metadata support (#1494)
* Add support for XDS headers and CA headers from environment variables

* Add unit tests

* Linting

* Make headers use metadata vector

* Linting

* Fix

* More fix

---------

Co-authored-by: Fuyuan Bie <fuyuanbie@microsoft.com>
Co-authored-by: Zhewei Hu <zhewei.hu33@gmail.com>
2025-03-27 12:39:41 -04:00
Istio Automation 31902c7512
Automator: update common-files@release-1.25 in istio/ztunnel@release-1.25 (#1489) 2025-03-21 16:46:44 -04:00
John Howard 27babcee47
[1.25] deps: bump h2 (#1487)
https://github.com/hyperium/h2/releases/tag/v0.4.8 - this contains a fix
for a panic that can be triggered in ambient
2025-03-17 23:49:39 -04:00
Istio Automation fb2a7be8e4
Automator: update common-files@release-1.25 in istio/ztunnel@release-1.25 (#1454) 2025-02-12 01:06:43 -05:00
Mike Morris 6bc8c589df
[release-1.25] Automated branching step 4 (#1447)
Co-authored-by: Mike Morris <1149913+mikemorris@users.noreply.github.com>
2025-02-10 11:28:43 -05:00
Istio Automation 2a7ad7c7e4
metrics: add src/dst locality information to labels (#1442)
Understanding cross-zone/cross-region traffic is one of the most
critical pieces of observing a cluster, for a cost, reliability, and
latency standpoint. Lots of work has been done in this space from
standalone tools (https://github.com/polarsignals/kubezonnet/) to
enterprise upsells (I *think* Buoyant enterprise has this but not in
OSS?); we can provide this exact data trivially.

Co-authored-by: John Howard <john.howard@solo.io>
2025-01-30 15:24:52 -05:00
23 changed files with 457 additions and 79 deletions

View File

@ -1,6 +1,6 @@
{
"name": "istio build-tools",
"image": "gcr.io/istio-testing/build-tools:master-6bfe0028e941afdae35a3c5d4374bc08e3c04153",
"image": "gcr.io/istio-testing/build-tools:release-1.25-a2ad16fed8090d6633e8b86090a5bae1780c08f8",
"privileged": true,
"remoteEnv": {
"USE_GKE_GCLOUD_AUTH_PLUGIN": "True",

View File

@ -1,6 +1 @@
* @istio/wg-networking-maintainers-ztunnel
/Makefile* @istio/wg-test-and-release-maintainers
/*.md @istio/wg-test-and-release-maintainers
/common/ @istio/wg-test-and-release-maintainers
/common-protos/ @istio/wg-test-and-release-maintainers
/scripts/ @istio/wg-test-and-release-maintainers
* @istio/release-managers-1-25

6
Cargo.lock generated
View File

@ -1060,9 +1060,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "h2"
version = "0.4.7"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2"
dependencies = [
"atomic-waker",
"bytes",
@ -1629,7 +1629,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
"windows-targets 0.52.6",
"windows-targets 0.48.5",
]
[[package]]

View File

@ -79,7 +79,7 @@ fn main() -> Result<(), anyhow::Error> {
for line in String::from_utf8(output.stdout).unwrap().lines() {
// Each line looks like `istio.io/pkg/version.buildGitRevision=abc`
if let Some((key, value)) = line.split_once('=') {
let key = key.split('.').last().unwrap();
let key = key.split('.').next_back().unwrap();
println!("cargo:rustc-env=ZTUNNEL_BUILD_{key}={value}");
} else {
println!("cargo:warning=invalid build output {line}");

View File

@ -1 +1 @@
0569152cf7260f891ee02fcef8c10bf4f94ea606
c00e63715aead620a8ffa1d23ba03f5ed6b8a690

View File

@ -92,7 +92,7 @@ mirror-licenses: mod-download-go
@license-lint --mirror
TMP := $(shell mktemp -d -u)
UPDATE_BRANCH ?= "master"
UPDATE_BRANCH ?= "release-1.25"
BUILD_TOOLS_ORG ?= "istio"

View File

@ -75,7 +75,7 @@ fi
TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io}
PROJECT_ID=${PROJECT_ID:-istio-testing}
if [[ "${IMAGE_VERSION:-}" == "" ]]; then
IMAGE_VERSION=master-6bfe0028e941afdae35a3c5d4374bc08e3c04153
IMAGE_VERSION=release-1.25-a2ad16fed8090d6633e8b86090a5bae1780c08f8
fi
if [[ "${IMAGE_NAME:-}" == "" ]]; then
IMAGE_NAME=build-tools

View File

@ -86,6 +86,7 @@ pub struct CertsDump {
identity: String,
state: String,
cert_chain: Vec<CertDump>,
root_certs: Vec<CertDump>,
}
impl Service {
@ -220,10 +221,12 @@ async fn dump_certs(cert_manager: &SecretManager) -> Vec<CertsDump> {
Unavailable(err) => dump.state = format!("Unavailable: {err}"),
Available(certs) => {
dump.state = "Available".to_string();
dump.cert_chain = std::iter::once(&certs.cert)
.chain(certs.chain.iter())
dump.cert_chain = certs
.cert_and_intermediates()
.iter()
.map(dump_cert)
.collect();
dump.root_certs = certs.roots.iter().map(dump_cert).collect();
}
};
dump
@ -542,11 +545,13 @@ mod tests {
let want = serde_json::json!([
{
"certChain": [],
"rootCerts": [],
"identity": "spiffe://error/ns/forgotten/sa/sa-failed",
"state": "Unavailable: the identity is no longer needed"
},
{
"certChain": [],
"rootCerts": [],
"identity": "spiffe://test/ns/test/sa/sa-pending",
"state": "Initializing"
},
@ -558,6 +563,8 @@ mod tests {
"serialNumber": "588850990443535479077311695632745359443207891470",
"validFrom": "2023-03-11T05:57:26Z"
},
],
"rootCerts": [
{
"expirationTime": "2296-12-24T18:31:28Z",
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lVQytjLzYwZStGMWVFKzdWcXhuYVdjT09abm1Fd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlNekF6TVRFeE9ETXgKTWpoYUdBOHlNamsyTVRJeU5ERTRNekV5T0Zvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1lQ1R4UEp0dWQwVXh3KwpDYWFkZFdEN2ErUUV1UVkrQlBUS0pkbk1lajBzQk1mVU1iVDE2SkxrWU5GZ3JqMVVWSEhjcFNvSUhvY3AKMnNkMzJTWTRiZGJva1Fjb3ArQmp0azU1alE0NktMWXNKZ2IyTnd2WW8xdDhFMWFldEpxRkdWN3JtZVpiCkZZZWFpKzZxN2lNamxiQ0dBdTcvVW5LSnNkR25hSlFnTjhkdTBUMUtEZ2pxS1B5SHFkc3U5a2JwQ3FpRQpYTVJtdzQvQkVoRkd6bUlEMm9VREtCMzZkdVZiZHpTRW01MVF2Z1U1SUxYSWd5VnJlak41Q0ZzQytXK3gKamVPWExFenRmSEZVb3FiM3dXaGtCdUV4bXI4MUoyaEdXOXBVTEoyd2tRZ2RmWFA3Z3RNa0I2RXlLdy94CkllYU5tTHpQSUdyWDAxelFZSWRaVHVEd01ZMENBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGRDhrNGYxYQpya3V3UitVUmhLQWUySVRaS1o3Vk1COEdBMVVkSXdRWU1CYUFGRDhrNGYxYXJrdXdSK1VSaEtBZTJJVFoKS1o3Vk1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLcm5BZVNzClNTSzMvOHp4K2h6ajZTRlhkSkE5Q1EwMkdFSjdoSHJLaWpHV1ZZZGRhbDlkQWJTNXRMZC8vcUtPOXVJcwpHZXR5L09rMmJSUTZjcXFNbGdkTnozam1tcmJTbFlXbUlYSTB5SEdtQ2lTYXpIc1hWYkVGNkl3eTN0Y1IKNHZvWFdLSUNXUGgrQzJjVGdMbWVaMEV1ekZ4cTR3Wm5DZjQwd0tvQUo5aTFhd1NyQm5FOWpXdG5wNEY0CmhXbkpUcEdreTVkUkFMRTBsLzJBYnJsMzh3Z2ZNOHI0SW90bVBUaEZLbkZlSUhVN2JRMXJZQW9xcGJBaApDdjBCTjVQakFRUldNazZib28zZjBha1MwN25sWUlWcVhoeHFjWW5PZ3drZGxUdFg5TXFHSXEyNm44bjEKTldXd25tS09qTnNrNnFSbXVsRWdlR080dnhUdlNKWWIraFU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",
@ -576,6 +583,8 @@ mod tests {
"serialNumber": "528170730419860468572163268563070820131458817969",
"validFrom": "2023-03-11T06:57:26Z"
},
],
"rootCerts": [
{
"expirationTime": "2296-12-24T18:31:28Z",
"pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lVQytjLzYwZStGMWVFKzdWcXhuYVdjT09abm1Fd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlNekF6TVRFeE9ETXgKTWpoYUdBOHlNamsyTVRJeU5ERTRNekV5T0Zvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1lQ1R4UEp0dWQwVXh3KwpDYWFkZFdEN2ErUUV1UVkrQlBUS0pkbk1lajBzQk1mVU1iVDE2SkxrWU5GZ3JqMVVWSEhjcFNvSUhvY3AKMnNkMzJTWTRiZGJva1Fjb3ArQmp0azU1alE0NktMWXNKZ2IyTnd2WW8xdDhFMWFldEpxRkdWN3JtZVpiCkZZZWFpKzZxN2lNamxiQ0dBdTcvVW5LSnNkR25hSlFnTjhkdTBUMUtEZ2pxS1B5SHFkc3U5a2JwQ3FpRQpYTVJtdzQvQkVoRkd6bUlEMm9VREtCMzZkdVZiZHpTRW01MVF2Z1U1SUxYSWd5VnJlak41Q0ZzQytXK3gKamVPWExFenRmSEZVb3FiM3dXaGtCdUV4bXI4MUoyaEdXOXBVTEoyd2tRZ2RmWFA3Z3RNa0I2RXlLdy94CkllYU5tTHpQSUdyWDAxelFZSWRaVHVEd01ZMENBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGRDhrNGYxYQpya3V3UitVUmhLQWUySVRaS1o3Vk1COEdBMVVkSXdRWU1CYUFGRDhrNGYxYXJrdXdSK1VSaEtBZTJJVFoKS1o3Vk1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLcm5BZVNzClNTSzMvOHp4K2h6ajZTRlhkSkE5Q1EwMkdFSjdoSHJLaWpHV1ZZZGRhbDlkQWJTNXRMZC8vcUtPOXVJcwpHZXR5L09rMmJSUTZjcXFNbGdkTnozam1tcmJTbFlXbUlYSTB5SEdtQ2lTYXpIc1hWYkVGNkl3eTN0Y1IKNHZvWFdLSUNXUGgrQzJjVGdMbWVaMEV1ekZ4cTR3Wm5DZjQwd0tvQUo5aTFhd1NyQm5FOWpXdG5wNEY0CmhXbkpUcEdreTVkUkFMRTBsLzJBYnJsMzh3Z2ZNOHI0SW90bVBUaEZLbkZlSUhVN2JRMXJZQW9xcGJBaApDdjBCTjVQakFRUldNazZib28zZjBha1MwN25sWUlWcVhoeHFjWW5PZ3drZGxUdFg5TXFHSXEyNm44bjEKTldXd25tS09qTnNrNnFSbXVsRWdlR080dnhUdlNKWWIraFU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K",

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::ser::SerializeSeq;
use std::collections::{HashMap, HashSet};
use std::fmt::{Display, Formatter};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
@ -20,6 +21,7 @@ use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use std::{cmp, env, fs};
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue};
use anyhow::anyhow;
use bytes::Bytes;
@ -68,6 +70,10 @@ const ENABLE_ORIG_SRC: &str = "ENABLE_ORIG_SRC";
const PROXY_CONFIG: &str = "PROXY_CONFIG";
const IPV6_ENABLED: &str = "IPV6_ENABLED";
const HTTP2_STREAM_WINDOW_SIZE: &str = "HTTP2_STREAM_WINDOW_SIZE";
const HTTP2_CONNECTION_WINDOW_SIZE: &str = "HTTP2_CONNECTION_WINDOW_SIZE";
const HTTP2_FRAME_SIZE: &str = "HTTP2_FRAME_SIZE";
const UNSTABLE_ENABLE_SOCKS5: &str = "UNSTABLE_ENABLE_SOCKS5";
const DEFAULT_WORKER_THREADS: u16 = 2;
@ -88,6 +94,9 @@ const ISTIO_META_PREFIX: &str = "ISTIO_META_";
const DNS_CAPTURE_METADATA: &str = "DNS_CAPTURE";
const DNS_PROXY_ADDR_METADATA: &str = "DNS_PROXY_ADDR";
const ISTIO_XDS_HEADER_PREFIX: &str = "XDS_HEADER_";
const ISTIO_CA_HEADER_PREFIX: &str = "CA_HEADER_";
/// Fetch the XDS/CA root cert file path based on below constants
const XDS_ROOT_CA_ENV: &str = "XDS_ROOT_CA";
const CA_ROOT_CA_ENV: &str = "CA_ROOT_CA";
@ -134,6 +143,37 @@ pub enum ProxyMode {
Dedicated,
}
#[derive(Clone, Debug)]
pub struct MetadataVector {
pub vec: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
}
impl serde::Serialize for MetadataVector {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut seq: <S as serde::Serializer>::SerializeSeq =
serializer.serialize_seq(Some(self.vec.len()))?;
for (k, v) in &self.vec {
let serialized_key = k.to_string();
match v.to_str() {
Ok(serialized_val) => {
seq.serialize_element(&(serialized_key, serialized_val))?;
}
Err(_) => {
return Err(serde::ser::Error::custom(
"failed to serialize metadata value",
));
}
}
}
seq.end()
}
}
#[derive(serde::Serialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Config {
@ -246,6 +286,12 @@ pub struct Config {
pub packet_mark: Option<u32>,
pub socket_config: SocketConfig,
// Headers to be added to XDS discovery requests
pub xds_headers: MetadataVector,
// Headers to be added to certificate requests
pub ca_headers: MetadataVector,
}
#[derive(serde::Serialize, Clone, Copy, Debug)]
@ -281,6 +327,10 @@ pub enum Error {
InvalidUri(#[from] Arc<InvalidUri>),
#[error("invalid configuration: {0}")]
InvalidState(String),
#[error("failed to parse header key: {0}")]
InvalidHeaderKey(String),
#[error("failed to parse header value: {0}")]
InvalidHeaderValue(String),
}
impl From<InvalidUri> for Error {
@ -326,6 +376,28 @@ fn parse_args() -> String {
cli_args[1..].join(" ")
}
fn parse_headers(prefix: &str) -> Result<MetadataVector, Error> {
let mut metadata: MetadataVector = MetadataVector { vec: Vec::new() };
for (key, value) in env::vars() {
let stripped_key: Option<&str> = key.strip_prefix(prefix);
match stripped_key {
Some(stripped_key) => {
// attempt to parse the stripped key
let metadata_key = AsciiMetadataKey::from_str(stripped_key)
.map_err(|_| Error::InvalidHeaderKey(key))?;
// attempt to parse the value
let metadata_value = AsciiMetadataValue::from_str(&value)
.map_err(|_| Error::InvalidHeaderValue(value))?;
metadata.vec.push((metadata_key, metadata_value));
}
None => continue,
}
}
Ok(metadata)
}
pub fn parse_config() -> Result<Config, Error> {
let pc = parse_proxy_config()?;
construct_config(pc)
@ -546,9 +618,15 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
DEFAULT_POOL_UNUSED_RELEASE_TIMEOUT,
)?,
window_size: 4 * 1024 * 1024,
connection_window_size: 4 * 1024 * 1024,
frame_size: 1024 * 1024,
// window size: per-stream limit
window_size: parse_default(HTTP2_STREAM_WINDOW_SIZE, 4 * 1024 * 1024)?,
// connection window size: per connection.
// Setting this to the same value as window_size can introduce deadlocks in some applications
// where clients do not read data on streamA until they receive data on streamB.
// If streamA consumes the entire connection window, we enter a deadlock.
// A 4x limit should be appropriate without introducing too much potential buffering.
connection_window_size: parse_default(HTTP2_CONNECTION_WINDOW_SIZE, 16 * 1024 * 1024)?,
frame_size: parse_default(HTTP2_FRAME_SIZE, 1024 * 1024)?,
self_termination_deadline: match parse_duration(CONNECTION_TERMINATION_DEADLINE)? {
Some(period) => period,
@ -676,6 +754,8 @@ pub fn construct_config(pc: ProxyConfig) -> Result<Config, Error> {
}
}),
fake_self_inbound: false,
xds_headers: parse_headers(ISTIO_XDS_HEADER_PREFIX)?,
ca_headers: parse_headers(ISTIO_CA_HEADER_PREFIX)?,
})
}
@ -912,6 +992,9 @@ pub mod tests {
env::set_var("ISTIO_META_INCLUDE_THIS", "foobar-env");
env::set_var("NOT_INCLUDE", "not-include");
env::set_var("ISTIO_META_CLUSTER_ID", "test-cluster");
env::set_var("XDS_HEADER_HEADER_FOO", "foo");
env::set_var("XDS_HEADER_HEADER_BAR", "bar");
env::set_var("CA_HEADER_HEADER_BAZ", "baz");
let pc = construct_proxy_config("", pc_env).unwrap();
let cfg = construct_config(pc).unwrap();
@ -925,8 +1008,23 @@ pub mod tests {
);
assert_eq!(cfg.proxy_metadata["BAR"], "bar");
assert_eq!(cfg.proxy_metadata["FOOBAR"], "foobar-overwritten");
assert_eq!(cfg.proxy_metadata["NO_PREFIX"], "no-prefix");
assert_eq!(cfg.proxy_metadata["INCLUDE_THIS"], "foobar-env");
assert_eq!(cfg.proxy_metadata.get("NOT_INCLUDE"), None);
assert_eq!(cfg.proxy_metadata["CLUSTER_ID"], "test-cluster");
assert_eq!(cfg.cluster_id, "test-cluster");
let mut expected_xds_headers = HashMap::new();
expected_xds_headers.insert("HEADER_FOO".to_string(), "foo".to_string());
expected_xds_headers.insert("HEADER_BAR".to_string(), "bar".to_string());
let mut expected_ca_headers = HashMap::new();
expected_ca_headers.insert("HEADER_BAZ".to_string(), "baz".to_string());
validate_metadata_vector(&cfg.xds_headers, expected_xds_headers.clone());
validate_metadata_vector(&cfg.ca_headers, expected_ca_headers.clone());
// both (with a field override and metadata override)
let pc = construct_proxy_config(mesh_config_path, pc_env).unwrap();
let cfg = construct_config(pc).unwrap();
@ -940,5 +1038,19 @@ pub mod tests {
assert_eq!(cfg.proxy_metadata["FOOBAR"], "foobar-overwritten");
assert_eq!(cfg.proxy_metadata["NO_PREFIX"], "no-prefix");
assert_eq!(cfg.proxy_metadata["INCLUDE_THIS"], "foobar-env");
assert_eq!(cfg.proxy_metadata["CLUSTER_ID"], "test-cluster");
assert_eq!(cfg.cluster_id, "test-cluster");
validate_metadata_vector(&cfg.xds_headers, expected_xds_headers.clone());
validate_metadata_vector(&cfg.ca_headers, expected_ca_headers.clone());
}
fn validate_metadata_vector(metadata: &MetadataVector, header_map: HashMap<String, String>) {
for (k, v) in header_map {
let key: AsciiMetadataKey = AsciiMetadataKey::from_str(&k).unwrap();
let value: AsciiMetadataValue = AsciiMetadataValue::from_str(&v).unwrap();
assert!(metadata.vec.contains(&(key, value)));
}
}
}

View File

@ -17,8 +17,9 @@ use std::collections::BTreeMap;
use async_trait::async_trait;
use prost_types::value::Kind;
use prost_types::Struct;
use tracing::{error, instrument, warn};
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue};
use tonic::IntoRequest;
use tracing::{debug, error, instrument, warn};
use crate::identity::auth::AuthSource;
use crate::identity::manager::Identity;
@ -31,6 +32,7 @@ pub struct CaClient {
pub client: IstioCertificateServiceClient<TlsGrpcChannel>,
pub enable_impersonated_identity: bool,
pub secret_ttl: i64,
ca_headers: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
}
impl CaClient {
@ -41,6 +43,7 @@ impl CaClient {
auth: AuthSource,
enable_impersonated_identity: bool,
secret_ttl: i64,
ca_headers: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
) -> Result<CaClient, Error> {
let svc =
tls::grpc_connector(address, auth, cert_provider.fetch_cert(alt_hostname).await?)?;
@ -49,6 +52,7 @@ impl CaClient {
client,
enable_impersonated_identity,
secret_ttl,
ca_headers,
})
}
}
@ -63,7 +67,7 @@ impl CaClient {
let csr = cs.csr;
let private_key = cs.private_key;
let req = IstioCertificateRequest {
let mut req = tonic::Request::new(IstioCertificateRequest {
csr,
validity_duration: self.secret_ttl,
metadata: {
@ -80,14 +84,23 @@ impl CaClient {
None
}
},
};
});
self.ca_headers.iter().for_each(|(k, v)| {
req.metadata_mut().insert(k.clone(), v.clone());
if let Ok(v_str) = v.to_str() {
debug!("CA header added: {}={}", k, v_str);
}
});
let resp = self
.client
.clone()
.create_certificate(req)
.create_certificate(req.into_request())
.await
.map_err(Box::new)?
.into_inner();
let leaf = resp
.cert_chain
.first()
@ -101,12 +114,8 @@ impl CaClient {
};
let certs = tls::WorkloadCertificate::new(&private_key, leaf, chain)?;
// Make the certificate actually matches the identity we requested.
if self.enable_impersonated_identity && certs.cert.identity().as_ref() != Some(id) {
error!(
"expected identity {:?}, got {:?}",
id,
certs.cert.identity()
);
if self.enable_impersonated_identity && certs.identity().as_ref() != Some(id) {
error!("expected identity {:?}, got {:?}", id, certs.identity());
return Err(Error::SanError(id.to_owned()));
}
Ok(certs)
@ -246,7 +255,7 @@ pub mod mock {
#[cfg(test)]
mod tests {
use std::iter;
use std::time::Duration;
use matches::assert_matches;
@ -286,10 +295,7 @@ mod tests {
);
let res = test_ca_client_with_response(IstioCertificateResponse {
cert_chain: iter::once(certs.cert)
.chain(certs.chain)
.map(|c| c.as_pem())
.collect(),
cert_chain: certs.full_chain_and_roots(),
})
.await;
assert_matches!(res, Err(Error::SanError(_)));
@ -304,10 +310,7 @@ mod tests {
);
let res = test_ca_client_with_response(IstioCertificateResponse {
cert_chain: iter::once(certs.cert)
.chain(certs.chain)
.map(|c| c.as_pem())
.collect(),
cert_chain: certs.full_chain_and_roots(),
})
.await;
assert_matches!(res, Ok(_));

View File

@ -507,6 +507,7 @@ impl SecretManager {
cfg.auth.clone(),
cfg.proxy_mode == ProxyMode::Shared,
cfg.secret_ttl.as_secs().try_into().unwrap_or(60 * 60 * 24),
cfg.ca_headers.vec.clone(),
)
.await?;
Ok(Self::new_with_client(caclient))

View File

@ -18,7 +18,9 @@ use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{atomic, Arc};
use std::time::Instant;
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
use prometheus_client::encoding::{
EncodeLabelSet, EncodeLabelValue, LabelSetEncoder, LabelValueEncoder,
};
use prometheus_client::metrics::counter::{Atomic, Counter};
use prometheus_client::metrics::family::Family;
use prometheus_client::registry::Registry;
@ -95,6 +97,8 @@ pub struct DerivedWorkload {
pub namespace: Option<Strng>,
pub identity: Option<Identity>,
pub cluster_id: Option<Strng>,
pub region: Option<Strng>,
pub zone: Option<Strng>,
}
#[derive(Clone)]
@ -123,6 +127,12 @@ impl CommonTrafficLabels {
self.source_app = w.canonical_name.clone().into();
self.source_version = w.canonical_revision.clone().into();
self.source_cluster = w.cluster_id.to_string().into();
let mut local = self.locality.0.unwrap_or_default();
local.source_region = w.locality.region.clone().into();
local.source_zone = w.locality.zone.clone().into();
self.locality = OptionallyEncode(Some(local));
self
}
@ -137,6 +147,12 @@ impl CommonTrafficLabels {
self.source_cluster = w.cluster_id.clone().into();
// This is the identity from the TLS handshake; this is the most trustworthy source so use it
self.source_principal = w.identity.clone().into();
let mut local = self.locality.0.unwrap_or_default();
local.source_region = w.region.clone().into();
local.source_zone = w.zone.clone().into();
self.locality = OptionallyEncode(Some(local));
self
}
@ -150,6 +166,12 @@ impl CommonTrafficLabels {
self.destination_app = w.canonical_name.clone().into();
self.destination_version = w.canonical_revision.clone().into();
self.destination_cluster = w.cluster_id.to_string().into();
let mut local = self.locality.0.unwrap_or_default();
local.destination_region = w.locality.region.clone().into();
local.destination_zone = w.locality.zone.clone().into();
self.locality = OptionallyEncode(Some(local));
self
}
@ -208,6 +230,30 @@ pub struct CommonTrafficLabels {
request_protocol: RequestProtocol,
response_flags: ResponseFlags,
connection_security_policy: SecurityPolicy,
#[prometheus(flatten)]
locality: OptionallyEncode<LocalityLabels>,
}
/// OptionallyEncode is a wrapper that will optionally encode the entire label set.
/// This differs from something like DefaultedUnknown which handles only the value - this makes the
/// entire label not show up.
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq)]
struct OptionallyEncode<T>(Option<T>);
impl<T: EncodeLabelSet> EncodeLabelSet for OptionallyEncode<T> {
fn encode(&self, encoder: LabelSetEncoder) -> Result<(), std::fmt::Error> {
match &self.0 {
None => Ok(()),
Some(ll) => ll.encode(encoder),
}
}
}
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq, EncodeLabelSet)]
struct LocalityLabels {
source_region: DefaultedUnknown<RichStrng>,
source_zone: DefaultedUnknown<RichStrng>,
destination_region: DefaultedUnknown<RichStrng>,
destination_zone: DefaultedUnknown<RichStrng>,
}
#[derive(Clone, Hash, Default, Debug, PartialEq, Eq, EncodeLabelSet)]

View File

@ -439,12 +439,14 @@ fn build_forwarded(remote_addr: SocketAddr, server: &Option<ServiceDescription>)
}
fn baggage(r: &Request, cluster: String) -> String {
format!("k8s.cluster.name={cluster},k8s.namespace.name={namespace},k8s.{workload_type}.name={workload_name},service.name={name},service.version={version}",
format!("k8s.cluster.name={cluster},k8s.namespace.name={namespace},k8s.{workload_type}.name={workload_name},service.name={name},service.version={version},cloud.region={region},cloud.availability_zone={zone}",
namespace = r.source.namespace,
workload_type = r.source.workload_type,
workload_name = r.source.workload_name,
name = r.source.canonical_name,
version = r.source.canonical_revision,
region = r.source.locality.region,
zone = r.source.locality.zone,
)
}

View File

@ -58,7 +58,7 @@ impl CaServer {
Duration::from_secs(0),
Duration::from_secs(100),
);
let root_cert = RootCert::Static(certs.chain.iter().map(|c| c.as_pem()).join("\n").into());
let root_cert = RootCert::Static(certs.roots.iter().map(|c| c.as_pem()).join("\n").into());
let acceptor = tls::mock::MockServerCertProvider::new(certs);
let mut tls_stream = crate::hyper_util::tls_server(acceptor, listener);
let srv = IstioCertificateServiceServer::new(server);
@ -86,6 +86,7 @@ impl CaServer {
),
true,
60 * 60 * 24,
Vec::new(),
)
.await
.unwrap();

View File

@ -73,7 +73,7 @@ impl AdsServer {
Duration::from_secs(0),
Duration::from_secs(100),
);
let root_cert = RootCert::Static(certs.chain.iter().map(|c| c.as_pem()).join("\n").into());
let root_cert = RootCert::Static(certs.roots.iter().map(|c| c.as_pem()).join("\n").into());
let acceptor = tls::mock::MockServerCertProvider::new(certs);
let listener_addr_string = "https://".to_string() + &server_addr.to_string();
let mut tls_stream = crate::hyper_util::tls_server(acceptor, listener);

28
src/tls/ca-key2.pem Normal file
View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCLIZGLab2juncQ
yF3RQPXcJmuktVjdTGtNICS2CKcaToYKgYAmp6VPgTXHHB/fwNMsDnQb50szTgEl
LPzGT4YapgWIz9JOFyPsSoXBvraVRBxT20dFD2ARK3ilGaoDkItlu4vL9QTNbgXF
ucYmZkiD2GtLtNcqFNC75tm4IJ09NywzD88IA/8RHSZLy+2yeT6OI1O/3igs66xT
HQTdmqNnqxeckyxtwxUafayfk9W7xGhxHK8pFRUfvnOl/Qm56RMlQfP7FBjg4bHS
wL+FfDKBLItvcwO4i8lQpya0ZsqMTtxGT11nRDH5NZMT1w6kCKTyOECJUq2nZZ9b
VeeoRmdNAgMBAAECggEAE66rx5htpMYp9mSWCxaAwYRo6XvjJqlbK6W+s8bNFvZh
VYak8bL5OqJZkIGcy7tcVGj2CYWCuK8SD+eJmedhng77DPPzeSsiYJjZS8OWUk74
n+9PKYiniz5GWrri946g/cMWn4OZypMEO4jQrJl/LDG3WhYq8y/PKKnbhoYMoH5i
ebv8YLGzzPZm0Vd3JM+wvHkd/CoAvrEWXuhvgxEXyCfpNfStrRbf3Frsk7yRrTx7
KbSINMvZPemRhaBewr1mU6HWsbu2W5sm2hpe1KmABrUFvDq7ad4LcAuQc54zhdbC
WkR86+QSDXhCE+ZlR3TyjfGCcsBYzWnRNVmP+liNEQKBgQC/o82IFHv3AGnWKRC3
+rULXHCLqrAiVaqaW442/OjzjesFbouKzL8V8wKw+ag/4wECIm5r6ogtMIhCOVXc
bQEcGbvhIF5irh/8j0CpaEctiguJTOyy9tShYxJVzOYS44NsDAIyCdQIWYOzeNWP
l7aaRNs1MFf9eD4I5ATqbF5f3QKBgQC521at9CvQTDoS4MrWuV2XcVlAAjz05voh
8p7ergCEY5JLKU3k231zVVKaGns2ert6SyCNXD7xGC/Eo/9zEtG/xzoomNRfYixs
czcNx/yRX/GUOWqG1SDFck5wfbrZ4jTgmhe8B2RG7t8J848dUZRb7eJ0s6gXdCW9
xHprUdRmMQKBgD5XA7obp8PO357qFuUyagh7FqVobgmNQoUZ+WZL2V+5L9XBgyUw
u4xhU+PMIv49UwultbPnREsm+XxJeHPPBchlWqe+RtXk/MTEuO0i3dyjhmMwoeMJ
xluFheZhVAqa9hqEwYYTimT48Y3FZftjB+ShN4nS4xyyK8PqoOq9O+oFAoGAIbjF
YmyiInoiM1isFQevDpJXYkDFtJ3QFqbB4p9popu6aH7HDlYwzeNWSHWzk2/zYj4N
Wvi4xt/fkus6pzNr8UMBr2oDZocWjlrdS1fU4L+qwn0kcfBrsMeLqed2JqBffb0X
v1sL+77Noy2Y8vXhWEiyRQBv6El/q43htGU1h5ECgYBXnJBFtYZ5J1CnFYOVGXD1
Rqp0dYVEJdnwZZIVPyEPiMzpYZjUbuodwcMQyHlJzyPn2Pn60Lx+Ie/mNgkltVtl
si2Di6ZLn9ok120YXRl4hufWGsA8b+cwPo72aIoAFP+K8LMRjHKGMS+XnHkX1N9/
42G8+1ugr/men4HybDQV+w==
-----END PRIVATE KEY-----

View File

@ -17,6 +17,7 @@ use crate::tls::{Error, IdentityVerifier, OutboundConnector};
use base64::engine::general_purpose::STANDARD;
use bytes::Bytes;
use itertools::Itertools;
use std::{cmp, iter};
use rustls::client::Resumption;
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
@ -49,12 +50,14 @@ pub struct Expiration {
pub struct WorkloadCertificate {
/// cert is the leaf certificate
pub cert: Certificate,
/// chain is the entire trust chain, excluding the leaf
/// chain is the entire trust chain, excluding the leaf and root
pub chain: Vec<Certificate>,
pub private_key: PrivateKeyDer<'static>,
pub(in crate::tls) private_key: PrivateKeyDer<'static>,
/// precomputed roots
pub roots: Arc<RootCertStore>,
/// precomputed roots. This is used for verification
root_store: Arc<RootCertStore>,
/// original roots, used for debugging
pub roots: Vec<Certificate>,
}
pub fn identity_from_connection(conn: &server::ServerConnection) -> Option<Identity> {
@ -204,6 +207,25 @@ fn parse_cert(mut cert: Vec<u8>) -> Result<Certificate, Error> {
})
}
fn parse_cert_multi(mut cert: &[u8]) -> Result<Vec<Certificate>, Error> {
let mut reader = std::io::BufReader::new(Cursor::new(&mut cert));
let parsed: Result<Vec<_>, _> = rustls_pemfile::read_all(&mut reader).collect();
parsed
.map_err(|e| Error::CertificateParseError(e.to_string()))?
.into_iter()
.map(|p| {
let Item::X509Certificate(der) = p else {
return Err(Error::CertificateParseError("no certificate".to_string()));
};
let (_, cert) = x509_parser::parse_x509_certificate(&der)?;
Ok(Certificate {
der: der.clone(),
expiry: expiration(cert),
})
})
.collect()
}
fn parse_key(mut key: &[u8]) -> Result<PrivateKeyDer<'static>, Error> {
let mut reader = std::io::BufReader::new(Cursor::new(&mut key));
let parsed = rustls_pemfile::read_one(&mut reader)
@ -218,32 +240,61 @@ fn parse_key(mut key: &[u8]) -> Result<PrivateKeyDer<'static>, Error> {
impl WorkloadCertificate {
pub fn new(key: &[u8], cert: &[u8], chain: Vec<&[u8]>) -> Result<WorkloadCertificate, Error> {
let cert = parse_cert(cert.to_vec())?;
let chain = chain
.into_iter()
// The Istio API does something pretty unhelpful, by providing a single chain of certs.
// The last one is the root. However, there may be multiple roots concatenated in that last cert,
// so we will need to split them.
let Some(raw_root) = chain.last() else {
return Err(Error::InvalidRootCert(
"no root certificate present".to_string(),
));
};
let roots = parse_cert_multi(raw_root)?;
let chain = chain[..cmp::max(0, chain.len() - 1)]
.iter()
.map(|x| x.to_vec())
.map(parse_cert)
.collect::<Result<Vec<_>, _>>()?;
let key: PrivateKeyDer = parse_key(key)?;
let mut roots = RootCertStore::empty();
roots.add_parsable_certificates(chain.iter().last().map(|c| c.der.clone()));
let mut roots_store = RootCertStore::empty();
let (_valid, invalid) =
roots_store.add_parsable_certificates(roots.iter().map(|c| c.der.clone()));
if invalid > 0 {
tracing::warn!("warning: found {invalid} invalid root certs");
}
Ok(WorkloadCertificate {
cert,
chain,
private_key: key,
roots: Arc::new(roots),
roots,
root_store: Arc::new(roots_store),
})
}
pub fn identity(&self) -> Option<Identity> {
self.cert.identity()
}
// TODO: can we precompute some or all of this?
pub(in crate::tls) fn cert_and_intermediates(&self) -> Vec<CertificateDer<'static>> {
pub(in crate::tls) fn cert_and_intermediates_der(&self) -> Vec<CertificateDer<'static>> {
std::iter::once(self.cert.der.clone())
.chain(
self.chain[..self.chain.len() - 1]
.iter()
.map(|x| x.der.clone()),
)
.chain(self.chain.iter().map(|x| x.der.clone()))
.collect()
}
pub fn cert_and_intermediates(&self) -> Vec<Certificate> {
std::iter::once(self.cert.clone())
.chain(self.chain.clone())
.collect()
}
pub fn full_chain_and_roots(&self) -> Vec<String> {
self.cert_and_intermediates()
.into_iter()
.map(|c| c.as_pem())
.chain(iter::once(self.roots.iter().map(|c| c.as_pem()).join("\n")))
.collect()
}
@ -252,7 +303,7 @@ impl WorkloadCertificate {
Identity::Spiffe { trust_domain, .. } => trust_domain,
});
let raw_client_cert_verifier = WebPkiClientVerifier::builder_with_provider(
self.roots.clone(),
self.root_store.clone(),
crate::tls::lib::provider(),
)
.build()?;
@ -263,20 +314,26 @@ impl WorkloadCertificate {
.with_protocol_versions(tls::TLS_VERSIONS)
.expect("server config must be valid")
.with_client_cert_verifier(client_cert_verifier)
.with_single_cert(self.cert_and_intermediates(), self.private_key.clone_key())?;
.with_single_cert(
self.cert_and_intermediates_der(),
self.private_key.clone_key(),
)?;
sc.alpn_protocols = vec![b"h2".into()];
Ok(sc)
}
pub fn outbound_connector(&self, identity: Vec<Identity>) -> Result<OutboundConnector, Error> {
let roots = self.roots.clone();
let roots = self.root_store.clone();
let verifier = IdentityVerifier { roots, identity };
let mut cc = ClientConfig::builder_with_provider(crate::tls::lib::provider())
.with_protocol_versions(tls::TLS_VERSIONS)
.expect("client config must be valid")
.dangerous() // Customer verifier is requires "dangerous" opt-in
.with_custom_certificate_verifier(Arc::new(verifier))
.with_client_auth_cert(self.cert_and_intermediates(), self.private_key.clone_key())?;
.with_client_auth_cert(
self.cert_and_intermediates_der(),
self.private_key.clone_key(),
)?;
cc.alpn_protocols = vec![b"h2".into()];
cc.resumption = Resumption::disabled();
cc.enable_sni = false;
@ -337,3 +394,75 @@ fn der_to_pem(der: &[u8], label: &str) -> String {
ans.push_str("-----\n");
ans
}
#[cfg(test)]
mod test {
use crate::identity::Identity;
use crate::test_helpers::helpers;
use crate::tls::mock::{TestIdentity, TEST_ROOT, TEST_ROOT2, TEST_ROOT2_KEY, TEST_ROOT_KEY};
use crate::tls::WorkloadCertificate;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio_rustls::TlsAcceptor;
#[tokio::test]
async fn multi_root() {
helpers::initialize_telemetry();
let id = Identity::from_str("spiffe://td/ns/n/sa/a").unwrap();
// Joined root
let mut joined = TEST_ROOT.to_vec();
joined.push(b'\n');
joined.extend(TEST_ROOT2);
// Generate key+cert signed by root1
let (key, cert) = crate::tls::mock::generate_test_certs_with_root(
&TestIdentity::Identity(id.clone()),
SystemTime::now(),
SystemTime::now() + Duration::from_secs(60),
None,
TEST_ROOT_KEY,
TEST_ROOT,
);
let cert1 =
WorkloadCertificate::new(key.as_bytes(), cert.as_bytes(), vec![&joined]).unwrap();
// Generate key+cert signed by root2
let (key, cert) = crate::tls::mock::generate_test_certs_with_root(
&TestIdentity::Identity(id.clone()),
SystemTime::now(),
SystemTime::now() + Duration::from_secs(60),
None,
TEST_ROOT2_KEY,
TEST_ROOT2,
);
let cert2 =
WorkloadCertificate::new(key.as_bytes(), cert.as_bytes(), vec![&joined]).unwrap();
// Do a simple handshake between them; we should be able to accept the trusted root
let server = cert1.server_config().unwrap();
let tls = TlsAcceptor::from(Arc::new(server));
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::task::spawn(async move {
let (stream, _) = listener.accept().await.unwrap();
let mut tls = tls.accept(stream).await.unwrap();
let _ = tls.write(b"serv").await.unwrap();
});
let stream = TcpStream::connect(addr).await.unwrap();
let client = cert2.outbound_connector(vec![id]).unwrap();
let mut tls = client.connect(stream).await.unwrap();
let _ = tls.write(b"hi").await.unwrap();
let mut buf = [0u8; 4];
tls.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"serv");
}
}

View File

@ -8,6 +8,11 @@ if [ ! -f ca-key.pem ]; then
openssl genrsa -f4 -out ca-key.pem
openssl req -x509 -new -nodes -key "ca-key.pem" -days 100000 -out "root-cert.pem" -subj "/O=cluster.local"
fi
if [ ! -f ca-key2.pem ]; then
# Only gen if doesn't exist. As some tests depend on the existing content of root cert.
openssl genrsa -f4 -out ca-key2.pem
openssl req -x509 -new -nodes -addext "keyUsage = keyCertSign" -key "ca-key2.pem" -days 100000 -out "root-cert2.pem" -subj "/O=cluster.local"
fi
openssl req -x509 -new -nodes -CA "root-cert.pem" -CAkey "ca-key.pem" -newkey rsa:2048 -keyout "intermediary-key.pem" -days 100000 -out "intermediary-cert.pem" -subj "/O=intermediary.cluster.local"
openssl req -x509 -new -nodes -CA "intermediary-cert.pem" -CAkey "intermediary-key.pem" -newkey rsa:2048 -keyout "istiod-key.pem" -days 100000 -out "istiod-cert.pem" -subj "/O=istiod.cluster.local"

View File

@ -124,7 +124,8 @@ pub mod tests {
let certs = WorkloadCertificate::new(TEST_PKEY, TEST_WORKLOAD_CERT, roots).unwrap();
// 3 certs that should be here are the istiod cert, intermediary cert and the root cert.
assert_eq!(certs.chain.len(), 3);
assert_eq!(certs.chain.len(), 2);
assert_eq!(certs.roots.len(), 1);
assert_eq!(
certs.cert.names(),
vec![

View File

@ -33,6 +33,8 @@ pub const TEST_WORKLOAD_CERT: &[u8] = include_bytes!("cert.pem");
pub const TEST_PKEY: &[u8] = include_bytes!("key.pem");
pub const TEST_ROOT: &[u8] = include_bytes!("root-cert.pem");
pub const TEST_ROOT_KEY: &[u8] = include_bytes!("ca-key.pem");
pub const TEST_ROOT2: &[u8] = include_bytes!("root-cert2.pem");
pub const TEST_ROOT2_KEY: &[u8] = include_bytes!("ca-key2.pem");
/// TestIdentity is an identity used for testing. This extends the Identity with test-only types
#[derive(Debug)]
@ -103,6 +105,24 @@ pub fn generate_test_certs_at(
not_after: SystemTime,
rng: Option<&mut dyn rand::RngCore>,
) -> WorkloadCertificate {
let (key, cert) =
generate_test_certs_with_root(id, not_before, not_after, rng, TEST_ROOT_KEY, TEST_ROOT);
let mut workload =
WorkloadCertificate::new(key.as_bytes(), cert.as_bytes(), vec![TEST_ROOT]).unwrap();
// Certificates do not allow sub-millisecond, but we need this for tests.
workload.cert.expiry.not_before = not_before;
workload.cert.expiry.not_after = not_after;
workload
}
pub fn generate_test_certs_with_root(
id: &TestIdentity,
not_before: SystemTime,
not_after: SystemTime,
rng: Option<&mut dyn rand::RngCore>,
ca_key: &[u8],
ca_cert: &[u8],
) -> (String, String) {
use rcgen::*;
let serial_number = {
let mut data = [0u8; 20];
@ -114,7 +134,6 @@ pub fn generate_test_certs_at(
data[0] &= 0x7f;
data
};
let ca_cert = test_ca();
let mut p = CertificateParams::default();
p.not_before = not_before.into();
p.not_after = not_after.into();
@ -136,16 +155,12 @@ pub fn generate_test_certs_at(
}];
let kp = KeyPair::from_pem(std::str::from_utf8(TEST_PKEY).unwrap()).unwrap();
let ca_kp = KeyPair::from_pem(std::str::from_utf8(TEST_ROOT_KEY).unwrap()).unwrap();
let ca_kp = KeyPair::from_pem(std::str::from_utf8(ca_key).unwrap()).unwrap();
let key = kp.serialize_pem();
let cert = p.signed_by(&kp, &ca_cert, &ca_kp).unwrap();
let ca = test_ca(ca_key, ca_cert);
let cert = p.signed_by(&kp, &ca, &ca_kp).unwrap();
let cert = cert.pem();
let mut workload =
WorkloadCertificate::new(key.as_bytes(), cert.as_bytes(), vec![TEST_ROOT]).unwrap();
// Certificates do not allow sub-millisecond, but we need this for tests.
workload.cert.expiry.not_before = not_before;
workload.cert.expiry.not_after = not_after;
workload
(key, cert)
}
pub fn generate_test_certs(
@ -157,10 +172,9 @@ pub fn generate_test_certs(
generate_test_certs_at(id, not_before, not_before + duration_until_expiry, None)
}
fn test_ca() -> Certificate {
let key = KeyPair::from_pem(std::str::from_utf8(TEST_ROOT_KEY).unwrap()).unwrap();
let ca_param =
CertificateParams::from_ca_cert_pem(std::str::from_utf8(TEST_ROOT).unwrap()).unwrap();
fn test_ca(key: &[u8], cert: &[u8]) -> Certificate {
let key = KeyPair::from_pem(std::str::from_utf8(key).unwrap()).unwrap();
let ca_param = CertificateParams::from_ca_cert_pem(std::str::from_utf8(cert).unwrap()).unwrap();
ca_param.self_signed(&key).unwrap()
}
@ -181,7 +195,7 @@ impl ServerCertProvider for MockServerCertProvider {
.expect("server config must be valid")
.with_no_client_auth()
.with_single_cert(
self.0.cert_and_intermediates(),
self.0.cert_and_intermediates_der(),
self.0.private_key.clone_key(),
)
.unwrap();

19
src/tls/root-cert2.pem Normal file
View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDIDCCAgigAwIBAgIUZMEqB4WzwdAYjeaqZ0qzk1mT168wDQYJKoZIhvcNAQEL
BQAwGDEWMBQGA1UECgwNY2x1c3Rlci5sb2NhbDAgFw0yNTA0MTEyMDQ4MzZaGA8y
Mjk5MDEyNTIwNDgzNlowGDEWMBQGA1UECgwNY2x1c3Rlci5sb2NhbDCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAIshkYtpvaO6dxDIXdFA9dwma6S1WN1M
a00gJLYIpxpOhgqBgCanpU+BNcccH9/A0ywOdBvnSzNOASUs/MZPhhqmBYjP0k4X
I+xKhcG+tpVEHFPbR0UPYBEreKUZqgOQi2W7i8v1BM1uBcW5xiZmSIPYa0u01yoU
0Lvm2bggnT03LDMPzwgD/xEdJkvL7bJ5Po4jU7/eKCzrrFMdBN2ao2erF5yTLG3D
FRp9rJ+T1bvEaHEcrykVFR++c6X9CbnpEyVB8/sUGODhsdLAv4V8MoEsi29zA7iL
yVCnJrRmyoxO3EZPXWdEMfk1kxPXDqQIpPI4QIlSradln1tV56hGZ00CAwEAAaNg
MF4wHQYDVR0OBBYEFBAYwv8Y3PQnA6oSL5W4I8F/WNYvMB8GA1UdIwQYMBaAFBAY
wv8Y3PQnA6oSL5W4I8F/WNYvMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgIE
MA0GCSqGSIb3DQEBCwUAA4IBAQBuWx8zxrfwQAYYZ462Kp/082Q+EXiWDp6MO2yx
bGnH03gesNH2audl3wHcWTYkGflgE7Pp70+JOztdAkanmTNn/xDXk1BivCgfP2fE
r9t3SoCkEX0am8LBjrCNYA0QINtz4CjhT1XpBxgbBUBNUeem8FAHStQJdlOiePlw
nnx841hbMZq9mZU7GDogZbbZD42TBcL01djVSC44o8+NbR455NsI6vxO8dZ6AXsl
rExMF70XDkogK4R9lPs2AADsOhH1bZQuHyVTNHCj/T2nFxSGfOItXekyfKVN5ID1
nlt1GD6Kjca9gQYYK1hzUEzePe16ROz3LlWuhx7pd/qsXhw7
-----END CERTIFICATE-----

View File

@ -45,7 +45,8 @@ impl BuildInfo {
build_profile: BUILD_RUST_PROFILE.to_string(),
build_status: BUILD_STATUS.to_string(),
git_tag: BUILD_TAG.to_string(),
istio_version: env::var("ISTIO_VERSION").unwrap_or_else(|_| "unknown".to_string()),
istio_version: env::var("ISTIO_META_ISTIO_VERSION")
.unwrap_or_else(|_| "unknown".to_string()),
}
}
}

View File

@ -26,6 +26,7 @@ use split_iter::Splittable;
use thiserror::Error;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue};
use tracing::{debug, error, info, info_span, warn, Instrument};
use crate::metrics::{IncrementRecorder, Recorder};
@ -215,6 +216,7 @@ pub struct Config {
/// alt_hostname provides an alternative accepted SAN for the control plane TLS verification
alt_hostname: Option<String>,
xds_headers: Vec<(AsciiMetadataKey, AsciiMetadataValue)>,
}
pub struct State {
@ -262,6 +264,7 @@ impl Config {
on_demand: config.xds_on_demand,
proxy_metadata: config.proxy_metadata.clone(),
alt_hostname: config.alt_xds_hostname.clone(),
xds_headers: config.xds_headers.vec.clone(),
}
}
@ -628,9 +631,18 @@ impl AdsClient {
.await?,
)?;
let mut req = tonic::Request::new(outbound);
self.config.xds_headers.iter().for_each(|(k, v)| {
req.metadata_mut().insert(k.clone(), v.clone());
if let Ok(v_str) = v.to_str() {
debug!("XDS header added: {}={}", k, v_str);
}
});
let ads_connection = AggregatedDiscoveryServiceClient::new(tls_grpc_channel)
.max_decoding_message_size(200 * 1024 * 1024)
.delta_aggregated_resources(tonic::Request::new(outbound))
.delta_aggregated_resources(req)
.await;
let mut response_stream = ads_connection