Compare commits
48 Commits
release/v2
...
main
Author | SHA1 | Date |
---|---|---|
|
e8cc4ec47b | |
|
b951a6c374 | |
|
7f6ac15f13 | |
|
75e9caaeae | |
|
02bbb3d617 | |
|
103c69ca75 | |
|
4663cc4eb6 | |
|
03374b9543 | |
|
4c9ae74450 | |
|
94572d174d | |
|
897c7e85bc | |
|
036ca75c00 | |
|
98e731d841 | |
|
d5b12ea5e2 | |
|
a64170bd61 | |
|
973dfa6f4d | |
|
17bff6144a | |
|
d385094caa | |
|
dce6b61191 | |
|
28ebc47a6b | |
|
4bae7e98f2 | |
|
b89c4902c6 | |
|
8a80f1ce95 | |
|
edc35d6e18 | |
|
99f322a9a0 | |
|
627a5aad21 | |
|
356f80b786 | |
|
0b3bc61263 | |
|
2b0e723027 | |
|
2156c3d5e3 | |
|
c4cae21e11 | |
|
89c88caf5c | |
|
bb612d3aac | |
|
7030cc51ed | |
|
af520dfd12 | |
|
ccf91dfb3e | |
|
69cd164da1 | |
|
feb5f87713 | |
|
fdd7f218a3 | |
|
b4e2b7e24f | |
|
1b07f277d7 | |
|
25cf0c7f11 | |
|
d46e7c0c82 | |
|
3305a890b0 | |
|
d850fa6f73 | |
|
6b323d8457 | |
|
7f58cd56ed | |
|
43e3f630ec |
|
@ -28,7 +28,7 @@ jobs:
|
|||
continue-on-error: true
|
||||
steps:
|
||||
- run: rustup toolchain install --profile=minimal beta
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: just toolchain=beta fetch
|
||||
- run: just toolchain=beta build
|
||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- id: changed
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
|||
env:
|
||||
CXX: "/usr/bin/clang++-19"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run
|
||||
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml
|
||||
|
|
|
@ -30,7 +30,7 @@ jobs:
|
|||
container: docker://rust:1.88.0
|
||||
steps:
|
||||
- run: apt update && apt install -y jo
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
id: changed-files
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
|||
steps:
|
||||
- run: rustup toolchain add nightly
|
||||
- run: cargo install cargo-fuzz
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- working-directory: ${{matrix.dir}}
|
||||
run: cargo +nightly fetch
|
||||
|
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e
|
||||
with:
|
||||
globs: "**/*.md"
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
continue-on-error: true
|
||||
steps:
|
||||
- run: rustup toolchain install --profile=minimal nightly
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: just toolchain=nightly fetch
|
||||
- run: just toolchain=nightly profile=release build
|
||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- id: build
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
|
@ -77,7 +77,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just action-lint
|
||||
- run: just action-dev-check
|
||||
|
||||
|
@ -91,7 +91,7 @@ jobs:
|
|||
timeout-minutes: 20
|
||||
steps:
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: just fetch
|
||||
- run: cargo deny --all-features check bans licenses sources
|
||||
|
@ -114,7 +114,7 @@ jobs:
|
|||
crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }}
|
||||
steps:
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: just fetch
|
||||
- run: just check-crate ${{ matrix.crate }}
|
||||
|
@ -136,7 +136,7 @@ jobs:
|
|||
tag=$(linkerd version --client --short)
|
||||
echo "linkerd $tag"
|
||||
echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV"
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just docker
|
||||
- run: just k3d-create
|
||||
- run: just k3d-load-linkerd
|
||||
|
@ -168,7 +168,7 @@ jobs:
|
|||
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
|
||||
run: exit 1
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
|
||||
- name: "Merge dependabot changes"
|
||||
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
|
||||
|
|
|
@ -44,7 +44,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- name: Check if the most recent commit is after the last release
|
||||
id: recency
|
||||
env:
|
||||
|
|
|
@ -61,7 +61,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
if: github.event_name == 'pull_request'
|
||||
- id: workflow
|
||||
if: github.event_name == 'pull_request'
|
||||
|
@ -170,7 +170,7 @@ jobs:
|
|||
|
||||
- name: Configure git
|
||||
run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
ref: ${{ needs.meta.outputs.ref }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
|
@ -204,7 +204,7 @@ jobs:
|
|||
git config --global user.name "$GITHUB_USERNAME"
|
||||
git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com
|
||||
# Tag the release.
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }}
|
||||
ref: ${{ needs.meta.outputs.ref }}
|
||||
|
|
|
@ -16,5 +16,5 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just sh-lint
|
||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: |
|
||||
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
|
||||
|
@ -38,7 +38,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- shell: bash
|
||||
run: |
|
||||
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
|
||||
|
|
582
Cargo.lock
582
Cargo.lock
File diff suppressed because it is too large
Load Diff
|
@ -42,8 +42,6 @@ members = [
|
|||
"linkerd/idle-cache",
|
||||
"linkerd/io",
|
||||
"linkerd/meshtls",
|
||||
"linkerd/meshtls/boring",
|
||||
"linkerd/meshtls/rustls",
|
||||
"linkerd/meshtls/verifier",
|
||||
"linkerd/metrics",
|
||||
"linkerd/mock/http-body",
|
||||
|
@ -71,6 +69,7 @@ members = [
|
|||
"linkerd/reconnect",
|
||||
"linkerd/retry",
|
||||
"linkerd/router",
|
||||
"linkerd/rustls",
|
||||
"linkerd/service-profiles",
|
||||
"linkerd/signal",
|
||||
"linkerd/stack",
|
||||
|
@ -117,8 +116,8 @@ prost-types = { version = "0.13" }
|
|||
tokio-rustls = { version = "0.26", default-features = false, features = [
|
||||
"logging",
|
||||
] }
|
||||
tonic = { version = "0.12", default-features = false }
|
||||
tonic-build = { version = "0.12", default-features = false }
|
||||
tonic = { version = "0.13", default-features = false }
|
||||
tonic-build = { version = "0.13", default-features = false }
|
||||
tower = { version = "0.5", default-features = false }
|
||||
tower-service = { version = "0.3" }
|
||||
tower-test = { version = "0.4" }
|
||||
|
@ -135,4 +134,4 @@ default-features = false
|
|||
features = ["tokio", "tracing"]
|
||||
|
||||
[workspace.dependencies.linkerd2-proxy-api]
|
||||
version = "0.16.0"
|
||||
version = "0.17.0"
|
||||
|
|
15
deny.toml
15
deny.toml
|
@ -23,11 +23,6 @@ allow = [
|
|||
private = { ignore = true }
|
||||
confidence-threshold = 0.8
|
||||
exceptions = [
|
||||
{ allow = [
|
||||
"ISC",
|
||||
"MIT",
|
||||
"OpenSSL",
|
||||
], name = "ring", version = "*" },
|
||||
{ allow = [
|
||||
"ISC",
|
||||
"OpenSSL",
|
||||
|
@ -38,14 +33,6 @@ exceptions = [
|
|||
], name = "aws-lc-fips-sys", version = "*" },
|
||||
]
|
||||
|
||||
[[licenses.clarify]]
|
||||
name = "ring"
|
||||
version = "*"
|
||||
expression = "MIT AND ISC AND OpenSSL"
|
||||
license-files = [
|
||||
{ path = "LICENSE", hash = 0xbd0eed23 },
|
||||
]
|
||||
|
||||
[bans]
|
||||
multiple-versions = "deny"
|
||||
# Wildcard dependencies are used for all workspace-local crates.
|
||||
|
@ -55,6 +42,8 @@ deny = [
|
|||
{ name = "rustls", wrappers = ["tokio-rustls"] },
|
||||
# rustls-webpki should be used instead.
|
||||
{ name = "webpki" },
|
||||
# aws-lc-rs should be used instead.
|
||||
{ name = "ring" }
|
||||
]
|
||||
skip = [
|
||||
# `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64`
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
use futures::future::{self, TryFutureExt};
|
||||
use http::StatusCode;
|
||||
use linkerd_app_core::{
|
||||
metrics::{self as metrics, FmtMetrics},
|
||||
metrics::{self as metrics, legacy::FmtMetrics},
|
||||
proxy::http::{Body, BoxBody, ClientHandle, Request, Response},
|
||||
trace, Error, Result,
|
||||
};
|
||||
|
@ -32,7 +32,7 @@ pub use self::readiness::{Latch, Readiness};
|
|||
|
||||
#[derive(Clone)]
|
||||
pub struct Admin<M> {
|
||||
metrics: metrics::Serve<M>,
|
||||
metrics: metrics::legacy::Serve<M>,
|
||||
tracing: trace::Handle,
|
||||
ready: Readiness,
|
||||
shutdown_tx: mpsc::UnboundedSender<()>,
|
||||
|
@ -52,7 +52,7 @@ impl<M> Admin<M> {
|
|||
tracing: trace::Handle,
|
||||
) -> Self {
|
||||
Self {
|
||||
metrics: metrics::Serve::new(metrics),
|
||||
metrics: metrics::legacy::Serve::new(metrics),
|
||||
ready,
|
||||
shutdown_tx,
|
||||
enable_shutdown,
|
||||
|
|
|
@ -2,7 +2,7 @@ use linkerd_app_core::{
|
|||
classify,
|
||||
config::ServerConfig,
|
||||
drain, errors, identity,
|
||||
metrics::{self, FmtMetrics},
|
||||
metrics::{self, legacy::FmtMetrics},
|
||||
proxy::http,
|
||||
serve,
|
||||
svc::{self, ExtractParam, InsertParam, Param},
|
||||
|
|
|
@ -13,31 +13,23 @@ independently of the inbound and outbound proxy logic.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
drain = { workspace = true, features = ["retain"] }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
hyper-util = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
ipnet = "2.11"
|
||||
prometheus-client = { workspace = true }
|
||||
regex = "1"
|
||||
serde_json = "1"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "sync", "parking_lot"] }
|
||||
tokio-stream = { version = "0.1", features = ["time"] }
|
||||
tonic = { workspace = true, default-features = false, features = ["prost"] }
|
||||
tracing = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
|
||||
linkerd-addr = { path = "../../addr" }
|
||||
linkerd-conditional = { path = "../../conditional" }
|
||||
linkerd-dns = { path = "../../dns" }
|
||||
linkerd-duplex = { path = "../../duplex" }
|
||||
linkerd-errno = { path = "../../errno" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-error-respond = { path = "../../error-respond" }
|
||||
linkerd-exp-backoff = { path = "../../exp-backoff" }
|
||||
|
@ -64,6 +56,7 @@ linkerd-proxy-tcp = { path = "../../proxy/tcp" }
|
|||
linkerd-proxy-transport = { path = "../../proxy/transport" }
|
||||
linkerd-reconnect = { path = "../../reconnect" }
|
||||
linkerd-router = { path = "../../router" }
|
||||
linkerd-rustls = { path = "../../rustls" }
|
||||
linkerd-service-profiles = { path = "../../service-profiles" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
linkerd-stack-metrics = { path = "../../stack/metrics" }
|
||||
|
@ -83,5 +76,6 @@ features = ["make", "spawn-ready", "timeout", "util", "limit"]
|
|||
semver = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
quickcheck = { version = "1", default-features = false }
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use crate::profiles;
|
||||
pub use classify::gate;
|
||||
use linkerd_error::Error;
|
||||
use linkerd_proxy_client_policy as client_policy;
|
||||
use linkerd_proxy_http::{classify, HasH2Reason, ResponseTimeoutError};
|
||||
|
|
|
@ -101,7 +101,7 @@ impl Config {
|
|||
identity: identity::NewClient,
|
||||
) -> svc::ArcNewService<
|
||||
(),
|
||||
svc::BoxCloneSyncService<http::Request<tonic::body::BoxBody>, http::Response<RspBody>>,
|
||||
svc::BoxCloneSyncService<http::Request<tonic::body::Body>, http::Response<RspBody>>,
|
||||
> {
|
||||
let addr = self.addr;
|
||||
tracing::trace!(%addr, "Building");
|
||||
|
|
|
@ -25,6 +25,7 @@ pub mod metrics;
|
|||
pub mod proxy;
|
||||
pub mod serve;
|
||||
pub mod svc;
|
||||
pub mod tls_info;
|
||||
pub mod transport;
|
||||
|
||||
pub use self::build_info::{BuildInfo, BUILD_INFO};
|
||||
|
|
|
@ -166,7 +166,7 @@ where
|
|||
// === impl Metrics ===
|
||||
|
||||
impl Metrics {
|
||||
pub fn new(retain_idle: Duration) -> (Self, impl FmtMetrics + Clone + Send + 'static) {
|
||||
pub fn new(retain_idle: Duration) -> (Self, impl legacy::FmtMetrics + Clone + Send + 'static) {
|
||||
let (control, control_report) = {
|
||||
let m = http_metrics::Requests::<ControlLabels, Class>::default();
|
||||
let r = m.clone().into_report(retain_idle).with_prefix("control");
|
||||
|
@ -223,6 +223,7 @@ impl Metrics {
|
|||
opentelemetry,
|
||||
};
|
||||
|
||||
use legacy::FmtMetrics as _;
|
||||
let report = endpoint_report
|
||||
.and_report(profile_route_report)
|
||||
.and_report(retry_report)
|
||||
|
@ -248,7 +249,7 @@ impl svc::Param<ControlLabels> for control::ControlAddr {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ControlLabels {
|
||||
impl legacy::FmtLabels for ControlLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { addr, server_id } = self;
|
||||
|
||||
|
@ -281,7 +282,7 @@ impl ProfileRouteLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ProfileRouteLabels {
|
||||
impl legacy::FmtLabels for ProfileRouteLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
direction,
|
||||
|
@ -314,7 +315,7 @@ impl From<OutboundEndpointLabels> for EndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for EndpointLabels {
|
||||
impl legacy::FmtLabels for EndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Inbound(i) => (Direction::In, i).fmt_labels(f),
|
||||
|
@ -323,7 +324,7 @@ impl FmtLabels for EndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for InboundEndpointLabels {
|
||||
impl legacy::FmtLabels for InboundEndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
tls,
|
||||
|
@ -343,7 +344,7 @@ impl FmtLabels for InboundEndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ServerLabel {
|
||||
impl legacy::FmtLabels for ServerLabel {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self(meta, port) = self;
|
||||
write!(
|
||||
|
@ -374,7 +375,7 @@ impl prom::EncodeLabelSetMut for ServerLabel {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ServerAuthzLabels {
|
||||
impl legacy::FmtLabels for ServerAuthzLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { server, authz } = self;
|
||||
|
||||
|
@ -389,7 +390,7 @@ impl FmtLabels for ServerAuthzLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for RouteLabels {
|
||||
impl legacy::FmtLabels for RouteLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { server, route } = self;
|
||||
|
||||
|
@ -404,7 +405,7 @@ impl FmtLabels for RouteLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for RouteAuthzLabels {
|
||||
impl legacy::FmtLabels for RouteAuthzLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { route, authz } = self;
|
||||
|
||||
|
@ -425,7 +426,7 @@ impl svc::Param<OutboundZoneLocality> for OutboundEndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for OutboundEndpointLabels {
|
||||
impl legacy::FmtLabels for OutboundEndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
server_id,
|
||||
|
@ -462,20 +463,20 @@ impl fmt::Display for Direction {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Direction {
|
||||
impl legacy::FmtLabels for Direction {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "direction=\"{self}\"")
|
||||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Authority<'_> {
|
||||
impl legacy::FmtLabels for Authority<'_> {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self(authority) = self;
|
||||
write!(f, "authority=\"{authority}\"")
|
||||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Class {
|
||||
impl legacy::FmtLabels for Class {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let class = |ok: bool| if ok { "success" } else { "failure" };
|
||||
|
||||
|
@ -523,7 +524,7 @@ impl StackLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for StackLabels {
|
||||
impl legacy::FmtLabels for StackLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
direction,
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
use linkerd_metrics::prom;
|
||||
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
|
||||
use std::{
|
||||
fmt::{Error, Write},
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
static TLS_INFO: OnceLock<Arc<TlsInfo>> = OnceLock::new();
|
||||
|
||||
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, EncodeLabelSet)]
|
||||
pub struct TlsInfo {
|
||||
tls_suites: MetricValueList,
|
||||
tls_kx_groups: MetricValueList,
|
||||
tls_rand: String,
|
||||
tls_key_provider: String,
|
||||
tls_fips: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
|
||||
struct MetricValueList {
|
||||
values: Vec<&'static str>,
|
||||
}
|
||||
|
||||
impl FromIterator<&'static str> for MetricValueList {
|
||||
fn from_iter<T: IntoIterator<Item = &'static str>>(iter: T) -> Self {
|
||||
MetricValueList {
|
||||
values: iter.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodeLabelValue for MetricValueList {
|
||||
fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), Error> {
|
||||
for value in &self.values {
|
||||
value.encode(encoder)?;
|
||||
encoder.write_char(',')?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn metric() -> prom::Family<TlsInfo, prom::ConstGauge> {
|
||||
let fam = prom::Family::<TlsInfo, prom::ConstGauge>::new_with_constructor(|| {
|
||||
prom::ConstGauge::new(1)
|
||||
});
|
||||
|
||||
let tls_info = TLS_INFO.get_or_init(|| {
|
||||
let provider = linkerd_rustls::get_default_provider();
|
||||
|
||||
let tls_suites = provider
|
||||
.cipher_suites
|
||||
.iter()
|
||||
.flat_map(|cipher_suite| cipher_suite.suite().as_str())
|
||||
.collect::<MetricValueList>();
|
||||
let tls_kx_groups = provider
|
||||
.kx_groups
|
||||
.iter()
|
||||
.flat_map(|suite| suite.name().as_str())
|
||||
.collect::<MetricValueList>();
|
||||
Arc::new(TlsInfo {
|
||||
tls_suites,
|
||||
tls_kx_groups,
|
||||
tls_rand: format!("{:?}", provider.secure_random),
|
||||
tls_key_provider: format!("{:?}", provider.key_provider),
|
||||
tls_fips: provider.fips(),
|
||||
})
|
||||
});
|
||||
let _ = fam.get_or_create(tls_info);
|
||||
fam
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
use crate::metrics::ServerLabel as PolicyServerLabel;
|
||||
pub use crate::metrics::{Direction, OutboundEndpointLabels};
|
||||
use linkerd_conditional::Conditional;
|
||||
use linkerd_metrics::FmtLabels;
|
||||
use linkerd_metrics::legacy::FmtLabels;
|
||||
use linkerd_tls as tls;
|
||||
use std::{fmt, net::SocketAddr};
|
||||
|
||||
|
|
|
@ -13,8 +13,7 @@ Configures and runs the inbound proxy
|
|||
test-util = [
|
||||
"linkerd-app-test",
|
||||
"linkerd-idle-cache/test-util",
|
||||
"linkerd-meshtls/rustls",
|
||||
"linkerd-meshtls-rustls/test-util",
|
||||
"linkerd-meshtls/test-util",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
|
@ -25,8 +24,7 @@ linkerd-app-core = { path = "../core" }
|
|||
linkerd-app-test = { path = "../test", optional = true }
|
||||
linkerd-http-access-log = { path = "../../http/access-log" }
|
||||
linkerd-idle-cache = { path = "../../idle-cache" }
|
||||
linkerd-meshtls = { path = "../../meshtls", optional = true }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true, default-features = false }
|
||||
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
|
||||
linkerd-proxy-client-policy = { path = "../../proxy/client-policy" }
|
||||
linkerd-tonic-stream = { path = "../../tonic-stream" }
|
||||
linkerd-tonic-watch = { path = "../../tonic-watch" }
|
||||
|
@ -49,7 +47,7 @@ hyper = { workspace = true, features = ["http1", "http2"] }
|
|||
linkerd-app-test = { path = "../test" }
|
||||
arbitrary = { version = "1", features = ["derive"] }
|
||||
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
|
||||
|
@ -62,8 +60,7 @@ linkerd-http-metrics = { path = "../../http/metrics", features = ["test-util"] }
|
|||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] }
|
||||
linkerd-io = { path = "../../io", features = ["tokio-test"] }
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [
|
||||
|
|
|
@ -18,8 +18,7 @@ linkerd-app-core = { path = "../../core" }
|
|||
linkerd-app-inbound = { path = ".." }
|
||||
linkerd-app-test = { path = "../../test" }
|
||||
linkerd-idle-cache = { path = "../../../idle-cache", features = ["test-util"] }
|
||||
linkerd-meshtls = { path = "../../../meshtls", features = ["rustls"] }
|
||||
linkerd-meshtls-rustls = { path = "../../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
linkerd-tracing = { path = "../../../tracing", features = ["ansi"] }
|
||||
|
|
|
@ -117,7 +117,7 @@ impl<N> Inbound<N> {
|
|||
let identity = rt
|
||||
.identity
|
||||
.server()
|
||||
.with_alpn(vec![transport_header::PROTOCOL.into()])
|
||||
.spawn_with_alpn(vec![transport_header::PROTOCOL.into()])
|
||||
.expect("TLS credential store must be held");
|
||||
|
||||
inner
|
||||
|
|
|
@ -113,10 +113,6 @@ impl<S> Inbound<S> {
|
|||
&self.runtime.identity
|
||||
}
|
||||
|
||||
pub fn proxy_metrics(&self) -> &metrics::Proxy {
|
||||
&self.runtime.metrics.proxy
|
||||
}
|
||||
|
||||
/// A helper for gateways to instrument policy checks.
|
||||
pub fn authorize_http<N>(
|
||||
&self,
|
||||
|
|
|
@ -13,7 +13,7 @@ pub(crate) mod error;
|
|||
|
||||
pub use linkerd_app_core::metrics::*;
|
||||
|
||||
/// Holds outbound proxy metrics.
|
||||
/// Holds LEGACY inbound proxy metrics.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct InboundMetrics {
|
||||
pub http_authz: authz::HttpAuthzMetrics,
|
||||
|
@ -50,7 +50,7 @@ impl InboundMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtMetrics for InboundMetrics {
|
||||
impl legacy::FmtMetrics for InboundMetrics {
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.http_authz.fmt_metrics(f)?;
|
||||
self.http_errors.fmt_metrics(f)?;
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit};
|
||||
use linkerd_app_core::{
|
||||
metrics::{
|
||||
metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels,
|
||||
ServerLabel, TargetAddr, TlsAccept,
|
||||
legacy::{Counter, FmtLabels, FmtMetrics},
|
||||
metrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, ServerLabel, TargetAddr,
|
||||
TlsAccept,
|
||||
},
|
||||
tls,
|
||||
transport::OrigDstAddr,
|
||||
|
|
|
@ -8,7 +8,7 @@ use crate::{
|
|||
};
|
||||
use linkerd_app_core::{
|
||||
errors::{FailFastError, LoadShedError},
|
||||
metrics::FmtLabels,
|
||||
metrics::legacy::FmtLabels,
|
||||
tls,
|
||||
};
|
||||
use std::fmt;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics, ServerLabel},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics, ServerLabel,
|
||||
},
|
||||
svc::{self, stack::NewMonitor},
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc::{self, stack::NewMonitor},
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ServerPolicy> = once_cell::sync
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
{
|
||||
pub(super) fn new(
|
||||
|
@ -57,7 +57,7 @@ where
|
|||
|
||||
impl<S> Service<u16> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
|
|
|
@ -40,7 +40,7 @@ impl Config {
|
|||
limits: ReceiveLimits,
|
||||
) -> impl GetPolicy + Clone + Send + Sync + 'static
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
@ -74,7 +74,7 @@ impl<S> Store<S> {
|
|||
opaque_ports: RangeInclusiveSet<u16>,
|
||||
) -> Self
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
|
@ -138,7 +138,7 @@ impl<S> Store<S> {
|
|||
|
||||
impl<S> GetPolicy for Store<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
|
|
|
@ -263,7 +263,7 @@ fn orig_dst_addr() -> OrigDstAddr {
|
|||
OrigDstAddr(([192, 0, 2, 2], 1000).into())
|
||||
}
|
||||
|
||||
impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
|
||||
impl tonic::client::GrpcService<tonic::body::Body> for MockSvc {
|
||||
type ResponseBody = linkerd_app_core::control::RspBody;
|
||||
type Error = Error;
|
||||
type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>;
|
||||
|
@ -275,7 +275,7 @@ impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
|
|||
unreachable!()
|
||||
}
|
||||
|
||||
fn call(&mut self, _req: http::Request<tonic::body::BoxBody>) -> Self::Future {
|
||||
fn call(&mut self, _req: http::Request<tonic::body::Body>) -> Self::Future {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ impl Inbound<()> {
|
|||
limits: ReceiveLimits,
|
||||
) -> impl policy::GetPolicy + Clone + Send + Sync + 'static
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
@ -3,9 +3,7 @@ pub use futures::prelude::*;
|
|||
use linkerd_app_core::{
|
||||
config,
|
||||
dns::Suffix,
|
||||
drain, exp_backoff,
|
||||
identity::rustls,
|
||||
metrics,
|
||||
drain, exp_backoff, identity, metrics,
|
||||
proxy::{
|
||||
http::{h1, h2},
|
||||
tap,
|
||||
|
@ -98,7 +96,7 @@ pub fn runtime() -> (ProxyRuntime, drain::Signal) {
|
|||
let (tap, _) = tap::new();
|
||||
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
|
||||
let runtime = ProxyRuntime {
|
||||
identity: rustls::creds::default_for_test().1.into(),
|
||||
identity: identity::creds::default_for_test().1,
|
||||
metrics: metrics.proxy,
|
||||
tap,
|
||||
span_sink: None,
|
||||
|
|
|
@ -28,8 +28,9 @@ ipnet = "2"
|
|||
linkerd-app = { path = "..", features = ["allow-loopback"] }
|
||||
linkerd-app-core = { path = "../core" }
|
||||
linkerd-app-test = { path = "../test" }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = ["test-util"] }
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["test-util"] }
|
||||
linkerd-metrics = { path = "../../metrics", features = ["test_util"] }
|
||||
linkerd-rustls = { path = "../../rustls" }
|
||||
linkerd-tracing = { path = "../../tracing" }
|
||||
maplit = "1"
|
||||
parking_lot = "0.12"
|
||||
|
@ -39,7 +40,7 @@ socket2 = "0.6"
|
|||
tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] }
|
||||
tokio-rustls = { workspace = true }
|
||||
tokio-stream = { version = "0.1", features = ["sync"] }
|
||||
tonic = { workspace = true, features = ["transport"], default-features = false }
|
||||
tonic = { workspace = true, features = ["transport", "router"], default-features = false }
|
||||
tower = { workspace = true, default-features = false }
|
||||
tracing = { workspace = true }
|
||||
|
||||
|
@ -73,8 +74,5 @@ flate2 = { version = "1", default-features = false, features = [
|
|||
] }
|
||||
# Log streaming isn't enabled by default globally, but we want to test it.
|
||||
linkerd-app-admin = { path = "../admin", features = ["log-streaming"] }
|
||||
# No code from this crate is actually used; only necessary to enable the Rustls
|
||||
# implementation.
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
serde_json = "1"
|
||||
|
|
|
@ -8,7 +8,7 @@ use std::{
|
|||
};
|
||||
|
||||
use linkerd2_proxy_api::identity as pb;
|
||||
use linkerd_meshtls_rustls::creds::default_provider_for_test;
|
||||
use linkerd_rustls::get_default_provider;
|
||||
use tokio_rustls::rustls::{self, server::WebPkiClientVerifier};
|
||||
use tonic as grpc;
|
||||
|
||||
|
@ -107,7 +107,7 @@ impl Identity {
|
|||
assert_ne!(added, 0, "trust anchors must include at least one cert");
|
||||
assert_eq!(skipped, 0, "no certs in pemfile should be invalid");
|
||||
|
||||
let provider = default_provider_for_test();
|
||||
let provider = get_default_provider();
|
||||
|
||||
let client_config = rustls::ClientConfig::builder_with_provider(provider.clone())
|
||||
.with_safe_default_protocol_versions()
|
||||
|
|
|
@ -302,7 +302,7 @@ impl Controller {
|
|||
}
|
||||
|
||||
pub async fn run(self) -> controller::Listening {
|
||||
let svc = grpc::transport::Server::builder()
|
||||
let routes = grpc::service::Routes::default()
|
||||
.add_service(
|
||||
inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new(
|
||||
self.inbound,
|
||||
|
@ -310,9 +310,9 @@ impl Controller {
|
|||
)
|
||||
.add_service(outbound_policies_server::OutboundPoliciesServer::new(
|
||||
Server(Arc::new(self.outbound)),
|
||||
))
|
||||
.into_service();
|
||||
controller::run(RoutesSvc(svc), "support policy controller", None).await
|
||||
));
|
||||
|
||||
controller::run(RoutesSvc(routes), "support policy controller", None).await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -525,7 +525,9 @@ impl Service<Request<hyper::body::Incoming>> for RoutesSvc {
|
|||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let Self(routes) = self;
|
||||
routes.poll_ready(cx)
|
||||
<grpc::service::Routes as Service<Request<UnsyncBoxBody<Bytes, grpc::Status>>>>::poll_ready(
|
||||
routes, cx,
|
||||
)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future {
|
||||
|
|
|
@ -13,7 +13,7 @@ Configures and runs the outbound proxy
|
|||
default = []
|
||||
allow-loopback = []
|
||||
test-subscriber = []
|
||||
test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util", "dep:http-body"]
|
||||
test-util = ["linkerd-app-test", "linkerd-meshtls/test-util", "dep:http-body"]
|
||||
|
||||
prometheus-client-rust-242 = [] # TODO
|
||||
|
||||
|
@ -42,7 +42,7 @@ linkerd-http-prom = { path = "../../http/prom" }
|
|||
linkerd-http-retry = { path = "../../http/retry" }
|
||||
linkerd-http-route = { path = "../../http/route" }
|
||||
linkerd-identity = { path = "../../identity" }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true, default-features = false }
|
||||
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
|
||||
linkerd-opaq-route = { path = "../../opaq-route" }
|
||||
linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [
|
||||
"proto",
|
||||
|
@ -67,8 +67,7 @@ linkerd-app-test = { path = "../test", features = ["client-policy"] }
|
|||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] }
|
||||
linkerd-io = { path = "../../io", features = ["tokio-test"] }
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
|
|
|
@ -146,7 +146,7 @@ impl Outbound<()> {
|
|||
export_hostname_labels: bool,
|
||||
) -> impl policy::GetPolicy
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
@ -130,7 +130,7 @@ impl OutboundMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtMetrics for OutboundMetrics {
|
||||
impl legacy::FmtMetrics for OutboundMetrics {
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.http_errors.fmt_metrics(f)?;
|
||||
self.tcp_errors.fmt_metrics(f)?;
|
||||
|
@ -243,7 +243,7 @@ impl EncodeLabelSet for RouteRef {
|
|||
|
||||
// === impl ConcreteLabels ===
|
||||
|
||||
impl FmtLabels for ConcreteLabels {
|
||||
impl legacy::FmtLabels for ConcreteLabels {
|
||||
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let ConcreteLabels(parent, backend) = self;
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ pub(crate) use self::{http::Http, tcp::Tcp};
|
|||
use crate::http::IdentityRequired;
|
||||
use linkerd_app_core::{
|
||||
errors::{FailFastError, LoadShedError},
|
||||
metrics::FmtLabels,
|
||||
metrics::legacy::FmtLabels,
|
||||
proxy::http::ResponseTimeoutError,
|
||||
};
|
||||
use std::fmt;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc, Error,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc,
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ClientPolicy> = once_cell::sync
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
|
@ -59,7 +59,7 @@ where
|
|||
|
||||
impl<S> Service<Addr> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
|
|
|
@ -60,7 +60,7 @@ pub(crate) fn runtime() -> (ProxyRuntime, drain::Signal) {
|
|||
let (tap, _) = tap::new();
|
||||
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
|
||||
let runtime = ProxyRuntime {
|
||||
identity: linkerd_meshtls_rustls::creds::default_for_test().1.into(),
|
||||
identity: linkerd_meshtls::creds::default_for_test().1,
|
||||
metrics: metrics.proxy,
|
||||
tap,
|
||||
span_sink: None,
|
||||
|
|
|
@ -43,7 +43,7 @@ impl Config {
|
|||
) -> Result<
|
||||
Dst<
|
||||
impl svc::Service<
|
||||
http::Request<tonic::body::BoxBody>,
|
||||
http::Request<tonic::body::Body>,
|
||||
Response = http::Response<control::RspBody>,
|
||||
Error = Error,
|
||||
Future = impl Send,
|
||||
|
|
|
@ -4,7 +4,8 @@ pub use linkerd_app_core::identity::{client, Id};
|
|||
use linkerd_app_core::{
|
||||
control, dns,
|
||||
identity::{
|
||||
client::linkerd::Certify, creds, CertMetrics, Credentials, DerX509, Mode, WithCertMetrics,
|
||||
client::linkerd::Certify, creds, watch as watch_identity, CertMetrics, Credentials,
|
||||
DerX509, WithCertMetrics,
|
||||
},
|
||||
metrics::{prom, ControlHttp as ClientMetrics},
|
||||
Result,
|
||||
|
@ -137,8 +138,7 @@ fn watch(
|
|||
watch::Receiver<bool>,
|
||||
)> {
|
||||
let (tx, ready) = watch::channel(false);
|
||||
let (store, receiver) =
|
||||
Mode::default().watch(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
|
||||
let (store, receiver) = watch_identity(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
|
||||
let cred = WithCertMetrics::new(metrics, NotifyReady { store, tx });
|
||||
Ok((cred, receiver, ready))
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ use linkerd_app_core::{
|
|||
config::ServerConfig,
|
||||
control::{ControlAddr, Metrics as ControlMetrics},
|
||||
dns, drain,
|
||||
metrics::{prom, FmtMetrics},
|
||||
metrics::{legacy::FmtMetrics, prom},
|
||||
serve,
|
||||
svc::Param,
|
||||
tls_info,
|
||||
transport::{addrs::*, listen::Bind},
|
||||
Error, ProxyRuntime,
|
||||
};
|
||||
|
@ -251,9 +252,6 @@ impl Config {
|
|||
export_hostname_labels,
|
||||
);
|
||||
|
||||
let dst_addr = dst.addr.clone();
|
||||
// registry.sub_registry_with_prefix("gateway"),
|
||||
|
||||
let gateway = gateway::Gateway::new(gateway, inbound.clone(), outbound.clone()).stack(
|
||||
dst.resolve.clone(),
|
||||
dst.profiles.clone(),
|
||||
|
@ -304,6 +302,7 @@ impl Config {
|
|||
error!(%error, "Failed to register process metrics");
|
||||
}
|
||||
registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric());
|
||||
registry.register("rustls_info", "Proxy TLS info", tls_info::metric());
|
||||
|
||||
let admin = {
|
||||
let identity = identity.receiver().server();
|
||||
|
@ -330,7 +329,7 @@ impl Config {
|
|||
|
||||
Ok(App {
|
||||
admin,
|
||||
dst: dst_addr,
|
||||
dst: dst.addr,
|
||||
drain: drain_tx,
|
||||
identity,
|
||||
inbound_addr,
|
||||
|
|
|
@ -46,7 +46,7 @@ impl Config {
|
|||
) -> Result<
|
||||
Policy<
|
||||
impl svc::Service<
|
||||
http::Request<tonic::body::BoxBody>,
|
||||
http::Request<tonic::body::Body>,
|
||||
Response = http::Response<control::RspBody>,
|
||||
Error = Error,
|
||||
Future = impl Send,
|
||||
|
|
|
@ -6,7 +6,7 @@ use linkerd_opencensus::{self as opencensus, metrics, proto};
|
|||
use std::{collections::HashMap, time::SystemTime};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(super) fn create_collector<S>(
|
||||
|
@ -18,7 +18,7 @@ pub(super) fn create_collector<S>(
|
|||
legacy_metrics: metrics::Registry,
|
||||
) -> EnabledCollector
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
|
|
|
@ -15,7 +15,7 @@ use std::{
|
|||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(super) struct OtelCollectorAttributes {
|
||||
|
@ -31,7 +31,7 @@ pub(super) fn create_collector<S>(
|
|||
legacy_metrics: metrics::Registry,
|
||||
) -> EnabledCollector
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
|
|
|
@ -11,7 +11,6 @@ ahash = "0.8"
|
|||
linkerd-stack = { path = "../stack" }
|
||||
parking_lot = "0.12"
|
||||
rand = { version = "0.9", features = ["small_rng"] }
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
|
@ -7,14 +7,15 @@ edition = { workspace = true }
|
|||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hickory-resolver = "0.25.2"
|
||||
linkerd-dns-name = { path = "./name" }
|
||||
linkerd-error = { path = "../error" }
|
||||
prometheus-client = { workspace = true }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
linkerd-error = { path = "../error" }
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] }
|
||||
|
|
|
@ -15,10 +15,10 @@ tokio = { version = "1", default-features = false }
|
|||
tracing = { workspace = true }
|
||||
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
tower-test = { workspace = true }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
|
|
|
@ -2,7 +2,7 @@ use super::{ClassifyEos, ClassifyResponse};
|
|||
use futures::{prelude::*, ready};
|
||||
use http_body::Frame;
|
||||
use linkerd_error::Error;
|
||||
use linkerd_stack::{layer, ExtractParam, NewService, Service};
|
||||
use linkerd_stack::Service;
|
||||
use pin_project::{pin_project, pinned_drop};
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
|
@ -12,18 +12,6 @@ use std::{
|
|||
};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// Constructs new [`BroadcastClassification`] services.
|
||||
///
|
||||
/// `X` is an [`ExtractParam`] implementation that extracts a [`Tx`] from each
|
||||
/// target. The [`Tx`] is used to broadcast the classification of each response
|
||||
/// from the constructed [`BroadcastClassification`] service.
|
||||
#[derive(Debug)]
|
||||
pub struct NewBroadcastClassification<C, X, N> {
|
||||
inner: N,
|
||||
extract: X,
|
||||
_marker: PhantomData<fn() -> C>,
|
||||
}
|
||||
|
||||
/// A HTTP `Service` that applies a [`ClassifyResponse`] to each response, and
|
||||
/// broadcasts the classification over a [`mpsc`] channel.
|
||||
#[derive(Debug)]
|
||||
|
@ -33,14 +21,6 @@ pub struct BroadcastClassification<C: ClassifyResponse, S> {
|
|||
_marker: PhantomData<fn() -> C>,
|
||||
}
|
||||
|
||||
/// A handle to a [`mpsc`] channel over which response classifications are
|
||||
/// broadcasted.
|
||||
///
|
||||
/// This is extracted from a target value by [`NewBroadcastClassification`] when
|
||||
/// constructing a [`BroadcastClassification`] service.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Tx<C>(pub mpsc::Sender<C>);
|
||||
|
||||
#[pin_project]
|
||||
pub struct ResponseFuture<C: ClassifyResponse, B, F> {
|
||||
#[pin]
|
||||
|
@ -62,59 +42,6 @@ struct State<C, T> {
|
|||
tx: mpsc::Sender<T>,
|
||||
}
|
||||
|
||||
// === impl NewBroadcastClassification ===
|
||||
|
||||
impl<C, X: Clone, N> NewBroadcastClassification<C, X, N> {
|
||||
pub fn new(extract: X, inner: N) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
extract,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
|
||||
/// [`NewService`]s, using the provided [`ExtractParam`] implementation to
|
||||
/// extract a classification [`Tx`] from the target.
|
||||
pub fn layer_via(extract: X) -> impl layer::Layer<N, Service = Self> + Clone {
|
||||
layer::mk(move |inner| Self::new(extract.clone(), inner))
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, N> NewBroadcastClassification<C, (), N> {
|
||||
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
|
||||
/// [`NewService`]s when the target type implements
|
||||
/// [`linkerd_stack::Param`]`<`[`Tx`]`>`.
|
||||
pub fn layer() -> impl layer::Layer<N, Service = Self> + Clone {
|
||||
Self::layer_via(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, C, X, N> NewService<T> for NewBroadcastClassification<C, X, N>
|
||||
where
|
||||
C: ClassifyResponse,
|
||||
X: ExtractParam<Tx<C::Class>, T>,
|
||||
N: NewService<T>,
|
||||
{
|
||||
type Service = BroadcastClassification<C, N::Service>;
|
||||
|
||||
fn new_service(&self, target: T) -> Self::Service {
|
||||
let Tx(tx) = self.extract.extract_param(&target);
|
||||
let inner = self.inner.new_service(target);
|
||||
BroadcastClassification::new(tx, inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, X: Clone, N: Clone> Clone for NewBroadcastClassification<C, X, N> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
extract: self.extract.clone(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// === impl BroadcastClassification ===
|
||||
|
||||
impl<C: ClassifyResponse, S> BroadcastClassification<C, S> {
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
use linkerd_error::Error;
|
||||
|
||||
pub use self::{
|
||||
channel::{BroadcastClassification, NewBroadcastClassification, Tx},
|
||||
channel::BroadcastClassification,
|
||||
gate::{NewClassifyGate, NewClassifyGateSet},
|
||||
insert::{InsertClassifyResponse, NewInsertClassifyResponse},
|
||||
};
|
||||
|
||||
pub mod channel;
|
||||
mod channel;
|
||||
pub mod gate;
|
||||
mod insert;
|
||||
|
||||
|
|
|
@ -10,11 +10,9 @@ publish = { workspace = true }
|
|||
test-util = []
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#![forbid(unsafe_code)]
|
||||
|
||||
pub use self::{requests::Requests, retries::Retries};
|
||||
use linkerd_metrics::SharedStore;
|
||||
use linkerd_metrics::legacy::SharedStore;
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash, time::Duration};
|
||||
|
||||
|
|
|
@ -4,7 +4,10 @@ mod service;
|
|||
pub use self::service::{NewHttpMetrics, ResponseBody};
|
||||
use super::Report;
|
||||
use linkerd_http_classify::ClassifyResponse;
|
||||
use linkerd_metrics::{latency, Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics};
|
||||
use linkerd_metrics::{
|
||||
latency,
|
||||
legacy::{Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics},
|
||||
};
|
||||
use linkerd_stack::{self as svc, layer};
|
||||
use std::{collections::HashMap, fmt::Debug, hash::Hash};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
@ -146,7 +149,7 @@ impl ClassMetrics {
|
|||
mod tests {
|
||||
#[test]
|
||||
fn expiry() {
|
||||
use linkerd_metrics::FmtLabels;
|
||||
use linkerd_metrics::legacy::FmtLabels;
|
||||
use std::fmt;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use super::{ClassMetrics, Metrics, StatusMetrics};
|
||||
use crate::{Prefixed, Report};
|
||||
use linkerd_metrics::{
|
||||
latency, Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store,
|
||||
latency,
|
||||
legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store},
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash};
|
||||
|
|
|
@ -3,7 +3,7 @@ use futures::{ready, TryFuture};
|
|||
use http_body::{Body, Frame};
|
||||
use linkerd_error::Error;
|
||||
use linkerd_http_classify::{ClassifyEos, ClassifyResponse};
|
||||
use linkerd_metrics::NewMetrics;
|
||||
use linkerd_metrics::legacy::NewMetrics;
|
||||
use linkerd_stack::Proxy;
|
||||
use parking_lot::Mutex;
|
||||
use pin_project::{pin_project, pinned_drop};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use super::{Prefixed, Registry, Report};
|
||||
use linkerd_metrics::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
|
||||
use linkerd_metrics::legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash, sync::Arc};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
|
|
@ -17,7 +17,6 @@ bytes = { workspace = true }
|
|||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
prometheus-client = { workspace = true }
|
||||
thiserror = "2"
|
||||
|
|
|
@ -14,7 +14,6 @@ http-body-util = { workspace = true }
|
|||
http = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
tokio = { version = "1", features = ["macros", "rt"] }
|
||||
tower = { workspace = true, features = ["retry"] }
|
||||
tracing = { workspace = true }
|
||||
thiserror = "2"
|
||||
|
@ -26,7 +25,6 @@ linkerd-metrics = { path = "../../metrics" }
|
|||
linkerd-stack = { path = "../../stack" }
|
||||
|
||||
[dev-dependencies]
|
||||
hyper = { workspace = true }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
tokio = { version = "1", features = ["macros", "rt"] }
|
||||
|
|
|
@ -21,6 +21,3 @@ url = "2"
|
|||
workspace = true
|
||||
features = ["http-route", "grpc-route"]
|
||||
optional = true
|
||||
|
||||
[dev-dependencies]
|
||||
maplit = "1"
|
||||
|
|
|
@ -10,7 +10,6 @@ Facilities for HTTP/1 upgrades.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
drain = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
|
|
|
@ -10,8 +10,6 @@ publish = { workspace = true }
|
|||
test-util = []
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
linkerd-error = { path = "../error" }
|
||||
linkerd-stack = { path = "../stack" }
|
||||
parking_lot = "0.12"
|
||||
tokio = { version = "1", default-features = false, features = [
|
||||
|
@ -28,4 +26,3 @@ tokio = { version = "1", default-features = false, features = [
|
|||
"test-util",
|
||||
"time",
|
||||
] }
|
||||
linkerd-tracing = { path = "../tracing", features = ["ansi"] }
|
||||
|
|
|
@ -7,27 +7,27 @@ edition = "2018"
|
|||
publish = { workspace = true }
|
||||
|
||||
[features]
|
||||
rustls = ["linkerd-meshtls-rustls", "__has_any_tls_impls"]
|
||||
rustls-aws-lc = ["rustls", "linkerd-meshtls-rustls/aws-lc"]
|
||||
rustls-aws-lc-fips = ["rustls-aws-lc", "linkerd-meshtls-rustls/aws-lc-fips"]
|
||||
rustls-ring = ["rustls", "linkerd-meshtls-rustls/ring"]
|
||||
boring = ["linkerd-meshtls-boring", "__has_any_tls_impls"]
|
||||
boring-fips = ["boring", "linkerd-meshtls-boring/fips"]
|
||||
# Enabled if *any* TLS impl is enabled.
|
||||
__has_any_tls_impls = []
|
||||
rustls-aws-lc-fips = ["tokio-rustls/fips"]
|
||||
test-util = ["linkerd-tls-test-util"]
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
pin-project = "1"
|
||||
rustls-pemfile = "2.2"
|
||||
rustls-webpki = { version = "0.103.4", default-features = false, features = ["std", "aws-lc-rs"] }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "rt", "sync"] }
|
||||
tokio-rustls = { workspace = true, features = ["aws-lc-rs"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
linkerd-dns-name = { path = "../dns/name" }
|
||||
linkerd-error = { path = "../error" }
|
||||
linkerd-identity = { path = "../identity" }
|
||||
linkerd-io = { path = "../io" }
|
||||
linkerd-meshtls-boring = { path = "boring", optional = true }
|
||||
linkerd-meshtls-rustls = { path = "rustls", optional = true, default-features = false }
|
||||
linkerd-meshtls-verifier = { path = "verifier" }
|
||||
linkerd-rustls = { path = "../rustls" }
|
||||
linkerd-stack = { path = "../stack" }
|
||||
linkerd-tls = { path = "../tls" }
|
||||
linkerd-tls-test-util = { path = "../tls/test-util", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["macros", "net", "rt-multi-thread"] }
|
||||
|
@ -37,4 +37,4 @@ rcgen = { version = "0.14.3", default-features = false, features = ["crypto", "p
|
|||
linkerd-conditional = { path = "../conditional" }
|
||||
linkerd-proxy-transport = { path = "../proxy/transport" }
|
||||
linkerd-tls-test-util = { path = "../tls/test-util" }
|
||||
linkerd-tracing = { path = "../tracing", features = ["ansi"] }
|
||||
linkerd-tracing = { path = "../tracing", features = ["ansi"] }
|
|
@ -1,30 +0,0 @@
|
|||
[package]
|
||||
name = "linkerd-meshtls-boring"
|
||||
version = { workspace = true }
|
||||
authors = { workspace = true }
|
||||
license = { workspace = true }
|
||||
edition = "2018"
|
||||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
boring = "4"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hex = "0.4" # used for debug logging
|
||||
tokio = { version = "1", features = ["macros", "sync"] }
|
||||
tokio-boring = "4"
|
||||
tracing = { workspace = true }
|
||||
|
||||
linkerd-dns-name = { path = "../../dns/name" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-identity = { path = "../../identity" }
|
||||
linkerd-io = { path = "../../io" }
|
||||
linkerd-meshtls-verifier = { path = "../verifier" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
linkerd-tls = { path = "../../tls" }
|
||||
|
||||
[features]
|
||||
fips = ["boring/fips"]
|
||||
|
||||
[dev-dependencies]
|
||||
linkerd-tls-test-util = { path = "../../tls/test-util" }
|
||||
linkerd-meshtls = { path = "../../meshtls" }
|
|
@ -1,185 +0,0 @@
|
|||
use crate::creds::CredsRx;
|
||||
use linkerd_identity as id;
|
||||
use linkerd_io as io;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use linkerd_stack::{NewService, Service};
|
||||
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef, ServerName};
|
||||
use std::{future::Future, pin::Pin, sync::Arc, task::Context};
|
||||
use tracing::{debug, trace};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NewClient(CredsRx);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Connect {
|
||||
rx: CredsRx,
|
||||
alpn: Option<Arc<[Vec<u8>]>>,
|
||||
id: id::Id,
|
||||
server: ServerName,
|
||||
}
|
||||
|
||||
pub type ConnectFuture<I> = Pin<Box<dyn Future<Output = io::Result<ClientIo<I>>> + Send>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ClientIo<I>(tokio_boring::SslStream<I>);
|
||||
|
||||
// === impl NewClient ===
|
||||
|
||||
impl NewClient {
|
||||
pub(crate) fn new(rx: CredsRx) -> Self {
|
||||
Self(rx)
|
||||
}
|
||||
}
|
||||
|
||||
impl NewService<ClientTls> for NewClient {
|
||||
type Service = Connect;
|
||||
|
||||
fn new_service(&self, target: ClientTls) -> Self::Service {
|
||||
Connect::new(target, self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NewClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("NewClient").finish()
|
||||
}
|
||||
}
|
||||
|
||||
// === impl Connect ===
|
||||
|
||||
impl Connect {
|
||||
pub(crate) fn new(client_tls: ClientTls, rx: CredsRx) -> Self {
|
||||
Self {
|
||||
rx,
|
||||
alpn: client_tls.alpn.map(|AlpnProtocols(ps)| ps.into()),
|
||||
server: client_tls.server_name,
|
||||
id: client_tls.server_id.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Service<I> for Connect
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
type Response = ClientIo<I>;
|
||||
type Error = io::Error;
|
||||
type Future = ConnectFuture<I>;
|
||||
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
io::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, io: I) -> Self::Future {
|
||||
let server_name = self.server.clone();
|
||||
let server_id = self.id.clone();
|
||||
let connector = self
|
||||
.rx
|
||||
.borrow()
|
||||
.connector(self.alpn.as_deref().unwrap_or(&[]));
|
||||
Box::pin(async move {
|
||||
let config = connector
|
||||
.map_err(io::Error::other)?
|
||||
.configure()
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
// Establish a TLS connection to the server using the provided
|
||||
// `server_name` as an SNI value to the server.
|
||||
//
|
||||
// Hostname verification is DISABLED, as we do not require that the
|
||||
// peer's certificate actually matches the `server_name`. Instead,
|
||||
// the `server_id` is used to perform the appropriate form of
|
||||
// verification after the session is established.
|
||||
let io = tokio_boring::connect(config.verify_hostname(false), server_name.as_str(), io)
|
||||
.await
|
||||
.map_err(|e| match e.as_io_error() {
|
||||
// TODO(ver) boring should let us take ownership of the error directly.
|
||||
Some(ioe) => io::Error::new(ioe.kind(), ioe.to_string()),
|
||||
// XXX(ver) to use the boring error directly here we have to
|
||||
// constrain the socket on Sync + std::fmt::Debug, which is
|
||||
// a pain.
|
||||
None => io::Error::other("unexpected TLS handshake error"),
|
||||
})?;
|
||||
|
||||
// Servers must present a peer certificate. We extract the x509 cert
|
||||
// and verify it manually against the `server_id`.
|
||||
let cert = io
|
||||
.ssl()
|
||||
.peer_certificate()
|
||||
.ok_or_else(|| io::Error::other("could not extract peer cert"))?;
|
||||
let cert_der = id::DerX509(cert.to_der()?);
|
||||
verifier::verify_id(&cert_der, &server_id)?;
|
||||
|
||||
debug!(
|
||||
tls = io.ssl().version_str(),
|
||||
client.cert = ?io.ssl().certificate().and_then(super::fingerprint),
|
||||
peer.cert = ?io.ssl().peer_certificate().as_deref().and_then(super::fingerprint),
|
||||
alpn = ?io.ssl().selected_alpn_protocol(),
|
||||
"Initiated TLS connection"
|
||||
);
|
||||
trace!(peer.id = %server_id, peer.name = %server_name);
|
||||
Ok(ClientIo(io))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// === impl ClientIo ===
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
|
||||
#[inline]
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> ClientIo<I> {
|
||||
#[inline]
|
||||
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
|
||||
self.0
|
||||
.ssl()
|
||||
.selected_alpn_protocol()
|
||||
.map(NegotiatedProtocolRef)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
|
||||
#[inline]
|
||||
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
|
||||
self.0.get_ref().peer_addr()
|
||||
}
|
||||
}
|
|
@ -1,209 +0,0 @@
|
|||
mod receiver;
|
||||
mod store;
|
||||
|
||||
pub use self::{receiver::Receiver, store::Store};
|
||||
use boring::{
|
||||
pkey::{PKey, Private},
|
||||
ssl,
|
||||
x509::{store::X509StoreBuilder, X509},
|
||||
};
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_error::Result;
|
||||
use linkerd_identity as id;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::watch;
|
||||
|
||||
pub fn watch(
|
||||
local_id: id::Id,
|
||||
server_name: dns::Name,
|
||||
roots_pem: &str,
|
||||
) -> Result<(Store, Receiver)> {
|
||||
let creds = {
|
||||
let roots = X509::stack_from_pem(roots_pem.as_bytes())?;
|
||||
Arc::new(BaseCreds { roots })
|
||||
};
|
||||
|
||||
let (tx, rx) = watch::channel(Creds::from(creds.clone()));
|
||||
let rx = Receiver::new(local_id.clone(), server_name, rx);
|
||||
let store = Store::new(creds, local_id, tx);
|
||||
|
||||
Ok((store, rx))
|
||||
}
|
||||
|
||||
pub(crate) struct Creds {
|
||||
base: Arc<BaseCreds>,
|
||||
certs: Option<Certs>,
|
||||
}
|
||||
|
||||
struct BaseCreds {
|
||||
roots: Vec<X509>,
|
||||
}
|
||||
|
||||
struct Certs {
|
||||
leaf: X509,
|
||||
intermediates: Vec<X509>,
|
||||
key: PKey<Private>,
|
||||
}
|
||||
|
||||
pub(crate) type CredsRx = watch::Receiver<Creds>;
|
||||
|
||||
type CredsTx = watch::Sender<Creds>;
|
||||
|
||||
// === impl Creds ===
|
||||
|
||||
impl From<Arc<BaseCreds>> for Creds {
|
||||
fn from(base: Arc<BaseCreds>) -> Self {
|
||||
Self { base, certs: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl Creds {
|
||||
// TODO(ver) Specify certificate types, signing algorithms, cipher suites..
|
||||
pub(crate) fn acceptor(&self, alpn_protocols: &[Vec<u8>]) -> Result<ssl::SslAcceptor> {
|
||||
// mozilla_intermediate_v5 is the only variant that enables TLSv1.3, so we use that.
|
||||
let mut conn = ssl::SslAcceptor::mozilla_intermediate_v5(ssl::SslMethod::tls_server())?;
|
||||
|
||||
// Force use of TLSv1.3.
|
||||
conn.set_options(ssl::SslOptions::NO_TLSV1_2);
|
||||
conn.clear_options(ssl::SslOptions::NO_TLSV1_3);
|
||||
|
||||
let roots = self.root_store()?;
|
||||
tracing::debug!(
|
||||
roots = ?self
|
||||
.base
|
||||
.roots
|
||||
.iter()
|
||||
.filter_map(|c| super::fingerprint(c))
|
||||
.collect::<Vec<_>>(),
|
||||
"Configuring acceptor roots",
|
||||
);
|
||||
conn.set_cert_store(roots);
|
||||
|
||||
// Ensure that client certificates are validated when present.
|
||||
conn.set_verify(ssl::SslVerifyMode::PEER);
|
||||
|
||||
if let Some(certs) = &self.certs {
|
||||
tracing::debug!(
|
||||
cert = ?super::fingerprint(&certs.leaf),
|
||||
"Configuring acceptor certificate",
|
||||
);
|
||||
conn.set_private_key(&certs.key)?;
|
||||
conn.set_certificate(&certs.leaf)?;
|
||||
conn.check_private_key()?;
|
||||
for c in &certs.intermediates {
|
||||
conn.add_extra_chain_cert(c.to_owned())?;
|
||||
}
|
||||
}
|
||||
|
||||
if !alpn_protocols.is_empty() {
|
||||
let p = serialize_alpn(alpn_protocols)?;
|
||||
conn.set_alpn_protos(&p)?;
|
||||
}
|
||||
|
||||
Ok(conn.build())
|
||||
}
|
||||
|
||||
// TODO(ver) Specify certificate types, signing algorithms, cipher suites..
|
||||
pub(crate) fn connector(&self, alpn_protocols: &[Vec<u8>]) -> Result<ssl::SslConnector> {
|
||||
// XXX(ver) This function reads from the environment and/or the filesystem. This likely is
|
||||
// at best wasteful and at worst unsafe (if another thread were to mutate these environment
|
||||
// variables simultaneously, for instance). Unfortunately, the boring APIs don't really give
|
||||
// us an alternative AFAICT.
|
||||
let mut conn = ssl::SslConnector::builder(ssl::SslMethod::tls_client())?;
|
||||
|
||||
// Explicitly enable use of TLSv1.3
|
||||
conn.set_options(ssl::SslOptions::NO_TLSV1 | ssl::SslOptions::NO_TLSV1_1);
|
||||
// XXX(ver) if we disable use of TLSv1.2, connections just hang.
|
||||
//conn.set_options(ssl::SslOptions::NO_TLSV1_2);
|
||||
conn.clear_options(ssl::SslOptions::NO_TLSV1_3);
|
||||
|
||||
tracing::debug!(
|
||||
roots = ?self
|
||||
.base
|
||||
.roots
|
||||
.iter()
|
||||
.filter_map(|c| super::fingerprint(c))
|
||||
.collect::<Vec<_>>(),
|
||||
"Configuring connector roots",
|
||||
);
|
||||
let roots = self.root_store()?;
|
||||
conn.set_cert_store(roots);
|
||||
|
||||
if let Some(certs) = &self.certs {
|
||||
tracing::debug!(
|
||||
cert = ?super::fingerprint(&certs.leaf),
|
||||
intermediates = %certs.intermediates.len(),
|
||||
"Configuring connector certificate",
|
||||
);
|
||||
conn.set_private_key(&certs.key)?;
|
||||
conn.set_certificate(&certs.leaf)?;
|
||||
conn.check_private_key()?;
|
||||
for c in &certs.intermediates {
|
||||
conn.add_extra_chain_cert(c.to_owned())?;
|
||||
}
|
||||
}
|
||||
|
||||
if !alpn_protocols.is_empty() {
|
||||
let p = serialize_alpn(alpn_protocols)?;
|
||||
conn.set_alpn_protos(&p)?;
|
||||
}
|
||||
|
||||
Ok(conn.build())
|
||||
}
|
||||
|
||||
fn root_store(&self) -> Result<boring::x509::store::X509Store> {
|
||||
let mut store = X509StoreBuilder::new()?;
|
||||
for c in &self.base.roots {
|
||||
store.add_cert(c.to_owned())?;
|
||||
}
|
||||
|
||||
Ok(store.build())
|
||||
}
|
||||
}
|
||||
|
||||
/// Encodes a list of ALPN protocols into a slice of bytes.
|
||||
///
|
||||
/// `boring` requires that the list of protocols be encoded in the wire format.
|
||||
fn serialize_alpn(protocols: &[Vec<u8>]) -> Result<Vec<u8>> {
|
||||
// Allocate a buffer to hold the encoded protocols.
|
||||
let mut bytes = {
|
||||
// One additional byte for each protocol's length prefix.
|
||||
let cap = protocols.len() + protocols.iter().map(Vec::len).sum::<usize>();
|
||||
Vec::with_capacity(cap)
|
||||
};
|
||||
|
||||
// Encode each protocol as a length-prefixed string.
|
||||
for p in protocols {
|
||||
if p.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if p.len() > 255 {
|
||||
return Err("ALPN protocols must be less than 256 bytes".into());
|
||||
}
|
||||
bytes.push(p.len() as u8);
|
||||
bytes.extend(p);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[test]
|
||||
fn test_serialize_alpn() {
|
||||
assert_eq!(serialize_alpn(&[b"h2".to_vec()]).unwrap(), b"\x02h2");
|
||||
assert_eq!(
|
||||
serialize_alpn(&[b"h2".to_vec(), b"http/1.1".to_vec()]).unwrap(),
|
||||
b"\x02h2\x08http/1.1"
|
||||
);
|
||||
assert_eq!(
|
||||
serialize_alpn(&[b"h2".to_vec(), b"http/1.1".to_vec()]).unwrap(),
|
||||
b"\x02h2\x08http/1.1"
|
||||
);
|
||||
assert_eq!(
|
||||
serialize_alpn(&[b"h2".to_vec(), vec![], b"http/1.1".to_vec()]).unwrap(),
|
||||
b"\x02h2\x08http/1.1"
|
||||
);
|
||||
|
||||
assert!(serialize_alpn(&[(0..255).collect()]).is_ok());
|
||||
assert!(serialize_alpn(&[(0..=255).collect()]).is_err());
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
use super::CredsRx;
|
||||
use crate::{NewClient, Server};
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_identity as id;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Receiver {
|
||||
id: id::Id,
|
||||
name: dns::Name,
|
||||
rx: CredsRx,
|
||||
}
|
||||
|
||||
impl Receiver {
|
||||
pub(crate) fn new(id: id::Id, name: dns::Name, rx: CredsRx) -> Self {
|
||||
Self { id, name, rx }
|
||||
}
|
||||
|
||||
/// Returns the local identity.
|
||||
pub fn local_id(&self) -> &id::Id {
|
||||
&self.id
|
||||
}
|
||||
|
||||
/// Returns the mTLS Server Name.
|
||||
pub fn server_name(&self) -> &dns::Name {
|
||||
&self.name
|
||||
}
|
||||
|
||||
/// Returns a `NewClient` that can be used to establish TLS on client connections.
|
||||
pub fn new_client(&self) -> NewClient {
|
||||
NewClient::new(self.rx.clone())
|
||||
}
|
||||
|
||||
/// Returns a `Server` that can be used to terminate TLS on server connections.
|
||||
pub fn server(&self) -> Server {
|
||||
Server::new(self.name.clone(), self.rx.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Receiver {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Receiver")
|
||||
.field("name", &self.name)
|
||||
.finish()
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
use super::{BaseCreds, Certs, Creds, CredsTx};
|
||||
use boring::pkey::PKey;
|
||||
use boring::x509::{X509StoreContext, X509};
|
||||
use linkerd_error::Result;
|
||||
use linkerd_identity as id;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct Store {
|
||||
creds: Arc<BaseCreds>,
|
||||
id: id::Id,
|
||||
tx: CredsTx,
|
||||
}
|
||||
|
||||
// === impl Store ===
|
||||
|
||||
impl Store {
|
||||
pub(super) fn new(creds: Arc<BaseCreds>, id: id::Id, tx: CredsTx) -> Self {
|
||||
Self { creds, id, tx }
|
||||
}
|
||||
}
|
||||
|
||||
impl id::Credentials for Store {
|
||||
/// Publishes TLS client and server configurations using
|
||||
fn set_certificate(
|
||||
&mut self,
|
||||
id::DerX509(leaf_der): id::DerX509,
|
||||
intermediates: Vec<id::DerX509>,
|
||||
key_pkcs8: Vec<u8>,
|
||||
_expiry: std::time::SystemTime,
|
||||
) -> Result<()> {
|
||||
let leaf = X509::from_der(&leaf_der)?;
|
||||
|
||||
verifier::verify_id(&leaf_der, &self.id)?;
|
||||
|
||||
let intermediates = intermediates
|
||||
.into_iter()
|
||||
.map(|id::DerX509(der)| X509::from_der(&der).map_err(Into::into))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let key = PKey::private_key_from_pkcs8(&key_pkcs8)?;
|
||||
let creds = Creds {
|
||||
base: self.creds.clone(),
|
||||
certs: Some(Certs {
|
||||
leaf,
|
||||
intermediates,
|
||||
key,
|
||||
}),
|
||||
};
|
||||
|
||||
let mut context = X509StoreContext::new()?;
|
||||
let roots = creds.root_store()?;
|
||||
|
||||
let mut chain = boring::stack::Stack::new()?;
|
||||
for i in &creds.certs.as_ref().unwrap().intermediates {
|
||||
chain.push(i.to_owned())?;
|
||||
}
|
||||
let init = {
|
||||
let leaf = &creds.certs.as_ref().unwrap().leaf;
|
||||
context.init(&roots, leaf, &chain, |c| c.verify_cert())?
|
||||
};
|
||||
if !init {
|
||||
return Err("certificate could not be validated against the trust chain".into());
|
||||
}
|
||||
|
||||
// If receivers are dropped, we don't return an error (as this would likely cause the
|
||||
// updater to retry more aggressively). It's fine to silently ignore these errors.
|
||||
let _ = self.tx.send(creds);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
//! This crate provides an implementation of _meshtls_ backed by `boringssl` (as
|
||||
//! provided by <https://github.com/cloudflare/boring>).
|
||||
//!
|
||||
//! There are several caveats with the current implementation:
|
||||
//!
|
||||
//! In its current form, this crate is compatible with the `meshtls-rustls`
|
||||
//! implementation, which requires of ECDSA-P256-SHA256 keys & signature
|
||||
//! algorithms. This crate doesn't actually constrain the algorithms beyond the
|
||||
//! Mozilla's 'intermediate' (v5) [defaults][defaults]. But, the goal for
|
||||
//! supporting `boring` is to provide a FIPS 140-2 compliant mode. There's a
|
||||
//! [PR][fips-pr] that implements this, but code changes will likely be required
|
||||
//! to enable this once it's merged/released.
|
||||
//!
|
||||
//! A new SSL context is created for each connection. This is probably
|
||||
//! unnecessary, but it's simpler for now. We can revisit this if needed.
|
||||
//!
|
||||
//! This module is not enabled by default. See the `linkerd-meshtls` and
|
||||
//! `linkerd2-proxy` crates for more information.
|
||||
//!
|
||||
//! [defaults]: https://wiki.mozilla.org/Security/Server_Side_TLS
|
||||
//! [fips-pr]: https://github.com/cloudflare/boring/pull/52
|
||||
|
||||
mod client;
|
||||
pub mod creds;
|
||||
mod server;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use self::{
|
||||
client::{ClientIo, Connect, ConnectFuture, NewClient},
|
||||
server::{Server, ServerIo, TerminateFuture},
|
||||
};
|
||||
|
||||
fn fingerprint(c: &boring::x509::X509Ref) -> Option<String> {
|
||||
let digest = c.digest(boring::hash::MessageDigest::sha256()).ok()?;
|
||||
Some(hex::encode(digest)[0..8].to_string())
|
||||
}
|
|
@ -1,180 +0,0 @@
|
|||
use crate::creds::CredsRx;
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_io as io;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use linkerd_stack::{Param, Service};
|
||||
use linkerd_tls::{ClientId, NegotiatedProtocol, ServerName, ServerTls};
|
||||
use std::{future::Future, pin::Pin, sync::Arc, task::Context};
|
||||
use tracing::debug;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
name: dns::Name,
|
||||
rx: CredsRx,
|
||||
alpn: Option<Arc<[Vec<u8>]>>,
|
||||
}
|
||||
|
||||
pub type TerminateFuture<I> =
|
||||
Pin<Box<dyn Future<Output = io::Result<(ServerTls, ServerIo<I>)>> + Send>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ServerIo<I>(tokio_boring::SslStream<I>);
|
||||
|
||||
// === impl Server ===
|
||||
|
||||
impl Server {
|
||||
pub(crate) fn new(name: dns::Name, rx: CredsRx) -> Self {
|
||||
Self {
|
||||
name,
|
||||
rx,
|
||||
alpn: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_alpn(mut self, alpn_protocols: Vec<Vec<u8>>) -> Self {
|
||||
self.alpn = if alpn_protocols.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(alpn_protocols.into())
|
||||
};
|
||||
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Param<ServerName> for Server {
|
||||
fn param(&self) -> ServerName {
|
||||
ServerName(self.name.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Service<I> for Server
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
type Response = (ServerTls, ServerIo<I>);
|
||||
type Error = std::io::Error;
|
||||
type Future = TerminateFuture<I>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
io::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, io: I) -> Self::Future {
|
||||
// TODO(ver) we should avoid creating a new context for each connection.
|
||||
let acceptor = self
|
||||
.rx
|
||||
.borrow()
|
||||
.acceptor(self.alpn.as_deref().unwrap_or(&[]));
|
||||
Box::pin(async move {
|
||||
let acc = acceptor.map_err(io::Error::other)?;
|
||||
let io = tokio_boring::accept(&acc, io)
|
||||
.await
|
||||
.map(ServerIo)
|
||||
.map_err(|e| match e.as_io_error() {
|
||||
Some(ioe) => io::Error::new(ioe.kind(), ioe.to_string()),
|
||||
// XXX(ver) to use the boring error directly here we have to constraint the
|
||||
// socket on Sync + std::fmt::Debug, which is a pain.
|
||||
None => io::Error::other("unexpected TLS handshake error"),
|
||||
})?;
|
||||
|
||||
let client_id = io.client_identity();
|
||||
let negotiated_protocol = io.negotiated_protocol();
|
||||
|
||||
debug!(
|
||||
tls = io.0.ssl().version_str(),
|
||||
srv.cert = ?io.0.ssl().certificate().and_then(super::fingerprint),
|
||||
peer.cert = ?io.0.ssl().peer_certificate().as_deref().and_then(super::fingerprint),
|
||||
client.id = ?client_id,
|
||||
alpn = ?negotiated_protocol,
|
||||
"Accepted TLS connection"
|
||||
);
|
||||
let tls = ServerTls::Established {
|
||||
client_id,
|
||||
negotiated_protocol,
|
||||
};
|
||||
Ok((tls, io))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// === impl ServerIo ===
|
||||
|
||||
impl<I> ServerIo<I> {
|
||||
#[inline]
|
||||
fn negotiated_protocol(&self) -> Option<NegotiatedProtocol> {
|
||||
self.0
|
||||
.ssl()
|
||||
.selected_alpn_protocol()
|
||||
.map(|p| NegotiatedProtocol(p.to_vec()))
|
||||
}
|
||||
|
||||
fn client_identity(&self) -> Option<ClientId> {
|
||||
match self.0.ssl().peer_certificate() {
|
||||
Some(cert) => {
|
||||
let der = cert
|
||||
.to_der()
|
||||
.map_err(
|
||||
|error| tracing::warn!(%error, "Failed to encode client end cert to der"),
|
||||
)
|
||||
.ok()?;
|
||||
|
||||
verifier::client_identity(&der).map(ClientId)
|
||||
}
|
||||
None => {
|
||||
debug!("Connection missing peer certificate");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
|
||||
#[inline]
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
|
||||
#[inline]
|
||||
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
|
||||
self.0.get_ref().peer_addr()
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
[package]
|
||||
name = "linkerd-meshtls-rustls"
|
||||
version = { workspace = true }
|
||||
authors = { workspace = true }
|
||||
license = { workspace = true }
|
||||
edition = "2018"
|
||||
publish = { workspace = true }
|
||||
|
||||
[features]
|
||||
default = ["aws-lc"]
|
||||
ring = ["tokio-rustls/ring", "rustls-webpki/ring"]
|
||||
aws-lc = ["tokio-rustls/aws-lc-rs", "rustls-webpki/aws-lc-rs"]
|
||||
aws-lc-fips = ["aws-lc", "tokio-rustls/fips"]
|
||||
test-util = ["linkerd-tls-test-util"]
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
rustls-pemfile = "2.2"
|
||||
rustls-webpki = { version = "0.103.4", default-features = false, features = ["std"] }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "rt", "sync"] }
|
||||
tokio-rustls = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
linkerd-dns-name = { path = "../../dns/name" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-io = { path = "../../io" }
|
||||
linkerd-identity = { path = "../../identity" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
linkerd-tls = { path = "../../tls" }
|
||||
linkerd-tls-test-util = { path = "../../tls/test-util", optional = true }
|
||||
linkerd-meshtls-verifier = { path = "../verifier" }
|
||||
|
||||
[dev-dependencies]
|
||||
linkerd-tls-test-util = { path = "../../tls/test-util" }
|
|
@ -1,16 +0,0 @@
|
|||
#[cfg(all(feature = "aws-lc", feature = "ring"))]
|
||||
compile_error!(
|
||||
"Multiple rustls backends enabled. Enabled one of the \"ring\" or \"aws-lc\" features"
|
||||
);
|
||||
#[cfg(not(any(feature = "aws-lc", feature = "ring")))]
|
||||
compile_error!("No rustls backend enabled. Enabled one of the \"ring\" or \"aws-lc\" features");
|
||||
|
||||
#[cfg(feature = "aws-lc")]
|
||||
mod aws_lc;
|
||||
#[cfg(feature = "ring")]
|
||||
mod ring;
|
||||
|
||||
#[cfg(feature = "aws-lc")]
|
||||
pub use aws_lc::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
|
||||
#[cfg(feature = "ring")]
|
||||
pub use ring::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
|
|
@ -1,73 +0,0 @@
|
|||
pub use aws_lc_rs::default_provider;
|
||||
use tokio_rustls::rustls::{
|
||||
self,
|
||||
crypto::{aws_lc_rs, WebPkiSupportedAlgorithms},
|
||||
};
|
||||
|
||||
#[cfg(not(feature = "aws-lc-fips"))]
|
||||
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
|
||||
aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
|
||||
aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
|
||||
aws_lc_rs::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
|
||||
];
|
||||
// Prefer aes-256-gcm if fips is enabled
|
||||
#[cfg(feature = "aws-lc-fips")]
|
||||
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
|
||||
aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
|
||||
aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
|
||||
];
|
||||
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
|
||||
all: &[
|
||||
webpki::aws_lc_rs::ECDSA_P256_SHA256,
|
||||
webpki::aws_lc_rs::ECDSA_P256_SHA384,
|
||||
webpki::aws_lc_rs::ECDSA_P384_SHA256,
|
||||
webpki::aws_lc_rs::ECDSA_P384_SHA384,
|
||||
webpki::aws_lc_rs::ECDSA_P521_SHA256,
|
||||
webpki::aws_lc_rs::ECDSA_P521_SHA384,
|
||||
webpki::aws_lc_rs::ECDSA_P521_SHA512,
|
||||
webpki::aws_lc_rs::ED25519,
|
||||
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA256,
|
||||
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA384,
|
||||
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA512,
|
||||
webpki::aws_lc_rs::RSA_PKCS1_3072_8192_SHA384,
|
||||
],
|
||||
mapping: &[
|
||||
// Note: for TLS1.2 the curve is not fixed by SignatureScheme. For TLS1.3 it is.
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
|
||||
&[
|
||||
webpki::aws_lc_rs::ECDSA_P384_SHA384,
|
||||
webpki::aws_lc_rs::ECDSA_P256_SHA384,
|
||||
webpki::aws_lc_rs::ECDSA_P521_SHA384,
|
||||
],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
&[
|
||||
webpki::aws_lc_rs::ECDSA_P256_SHA256,
|
||||
webpki::aws_lc_rs::ECDSA_P384_SHA256,
|
||||
webpki::aws_lc_rs::ECDSA_P521_SHA256,
|
||||
],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP521_SHA512,
|
||||
&[webpki::aws_lc_rs::ECDSA_P521_SHA512],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::ED25519,
|
||||
&[webpki::aws_lc_rs::ED25519],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA512,
|
||||
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA512],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA384,
|
||||
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA384],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA256,
|
||||
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA256],
|
||||
),
|
||||
],
|
||||
};
|
|
@ -1,55 +0,0 @@
|
|||
pub use ring::default_provider;
|
||||
use tokio_rustls::rustls::{
|
||||
self,
|
||||
crypto::{ring, WebPkiSupportedAlgorithms},
|
||||
};
|
||||
|
||||
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
|
||||
ring::cipher_suite::TLS13_AES_128_GCM_SHA256,
|
||||
ring::cipher_suite::TLS13_AES_256_GCM_SHA384,
|
||||
ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
|
||||
];
|
||||
// A subset of the algorithms supported by rustls+ring, imported from
|
||||
// https://github.com/rustls/rustls/blob/v/0.23.21/rustls/src/crypto/ring/mod.rs#L107
|
||||
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
|
||||
all: &[
|
||||
webpki::ring::ECDSA_P256_SHA256,
|
||||
webpki::ring::ECDSA_P256_SHA384,
|
||||
webpki::ring::ECDSA_P384_SHA256,
|
||||
webpki::ring::ECDSA_P384_SHA384,
|
||||
webpki::ring::ED25519,
|
||||
webpki::ring::RSA_PKCS1_2048_8192_SHA256,
|
||||
webpki::ring::RSA_PKCS1_2048_8192_SHA384,
|
||||
webpki::ring::RSA_PKCS1_2048_8192_SHA512,
|
||||
webpki::ring::RSA_PKCS1_3072_8192_SHA384,
|
||||
],
|
||||
mapping: &[
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
|
||||
&[
|
||||
webpki::ring::ECDSA_P384_SHA384,
|
||||
webpki::ring::ECDSA_P256_SHA384,
|
||||
],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
&[
|
||||
webpki::ring::ECDSA_P256_SHA256,
|
||||
webpki::ring::ECDSA_P384_SHA256,
|
||||
],
|
||||
),
|
||||
(rustls::SignatureScheme::ED25519, &[webpki::ring::ED25519]),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA512,
|
||||
&[webpki::ring::RSA_PKCS1_2048_8192_SHA512],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA384,
|
||||
&[webpki::ring::RSA_PKCS1_2048_8192_SHA384],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA256,
|
||||
&[webpki::ring::RSA_PKCS1_2048_8192_SHA256],
|
||||
),
|
||||
],
|
||||
};
|
|
@ -1,184 +0,0 @@
|
|||
use futures::prelude::*;
|
||||
use linkerd_identity as id;
|
||||
use linkerd_io as io;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use linkerd_stack::{NewService, Service};
|
||||
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef};
|
||||
use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context};
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig};
|
||||
|
||||
/// A `NewService` that produces `Connect` services from a dynamic TLS configuration.
|
||||
#[derive(Clone)]
|
||||
pub struct NewClient {
|
||||
config: watch::Receiver<Arc<ClientConfig>>,
|
||||
}
|
||||
|
||||
/// A `Service` that initiates client-side TLS connections.
|
||||
#[derive(Clone)]
|
||||
pub struct Connect {
|
||||
server_id: id::Id,
|
||||
server_name: rustls::pki_types::ServerName<'static>,
|
||||
config: Arc<ClientConfig>,
|
||||
}
|
||||
|
||||
pub type ConnectFuture<I> = Pin<Box<dyn Future<Output = io::Result<ClientIo<I>>> + Send>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ClientIo<I>(tokio_rustls::client::TlsStream<I>);
|
||||
|
||||
// === impl NewClient ===
|
||||
|
||||
impl NewClient {
|
||||
pub(crate) fn new(config: watch::Receiver<Arc<ClientConfig>>) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
}
|
||||
|
||||
impl NewService<ClientTls> for NewClient {
|
||||
type Service = Connect;
|
||||
|
||||
fn new_service(&self, target: ClientTls) -> Self::Service {
|
||||
Connect::new(target, (*self.config.borrow()).clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NewClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("NewClient").finish()
|
||||
}
|
||||
}
|
||||
|
||||
// === impl Connect ===
|
||||
|
||||
impl Connect {
|
||||
pub(crate) fn new(client_tls: ClientTls, config: Arc<ClientConfig>) -> Self {
|
||||
// If ALPN protocols are configured by the endpoint, we have to clone the entire
|
||||
// configuration and set the protocols. If there are no ALPN options, clone the Arc'd base
|
||||
// configuration without extra allocation.
|
||||
//
|
||||
// TODO it would be better to avoid cloning the whole TLS config per-connection, but the
|
||||
// Rustls API doesn't give us a lot of options.
|
||||
let config = match client_tls.alpn {
|
||||
None => config,
|
||||
Some(AlpnProtocols(protocols)) => {
|
||||
let mut c = (*config).clone();
|
||||
c.alpn_protocols = protocols;
|
||||
Arc::new(c)
|
||||
}
|
||||
};
|
||||
|
||||
let server_name =
|
||||
rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string())
|
||||
.expect("identity must be a valid DNS name");
|
||||
|
||||
Self {
|
||||
server_id: client_tls.server_id.into(),
|
||||
server_name,
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> {
|
||||
match c.peer_certificates().and_then(|certs| certs.first()) {
|
||||
Some(leaf_cert) => io::Result::Ok(leaf_cert),
|
||||
None => Err(io::Error::other("missing tls end cert")),
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Service<I> for Connect
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
type Response = ClientIo<I>;
|
||||
type Error = io::Error;
|
||||
type Future = ConnectFuture<I>;
|
||||
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
io::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, io: I) -> Self::Future {
|
||||
let server_id = self.server_id.clone();
|
||||
Box::pin(
|
||||
// Connect to the server, sending the `server_name` SNI in the
|
||||
// client handshake. The provided config should use the
|
||||
// `AnySanVerifier` to ignore the server certificate's DNS SANs.
|
||||
// Instead, we extract the server's leaf certificate after the
|
||||
// handshake and verify that it matches the provided `server_id``.
|
||||
tokio_rustls::TlsConnector::from(self.config.clone())
|
||||
// XXX(eliza): it's a bummer that the server name has to be cloned here...
|
||||
.connect(self.server_name.clone(), io)
|
||||
.map(move |s| {
|
||||
let s = s?;
|
||||
let (_, conn) = s.get_ref();
|
||||
let end_cert = extract_cert(conn)?;
|
||||
verifier::verify_id(end_cert, &server_id)?;
|
||||
Ok(ClientIo(s))
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// === impl ClientIo ===
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
|
||||
#[inline]
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> ClientIo<I> {
|
||||
#[inline]
|
||||
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
|
||||
self.0
|
||||
.get_ref()
|
||||
.1
|
||||
.alpn_protocol()
|
||||
.map(NegotiatedProtocolRef)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
|
||||
#[inline]
|
||||
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
|
||||
self.0.get_ref().0.peer_addr()
|
||||
}
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
mod receiver;
|
||||
mod store;
|
||||
pub(crate) mod verify;
|
||||
|
||||
use crate::backend;
|
||||
|
||||
pub use self::{receiver::Receiver, store::Store};
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_error::Result;
|
||||
use linkerd_identity as id;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::{self, crypto::CryptoProvider};
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("invalid trust roots")]
|
||||
pub struct InvalidTrustRoots(());
|
||||
|
||||
pub fn watch(
|
||||
local_id: id::Id,
|
||||
server_name: dns::Name,
|
||||
roots_pem: &str,
|
||||
) -> Result<(Store, Receiver)> {
|
||||
let mut roots = rustls::RootCertStore::empty();
|
||||
let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
{
|
||||
Err(error) => {
|
||||
warn!(%error, "invalid trust anchors file");
|
||||
return Err(error.into());
|
||||
}
|
||||
Ok(certs) if certs.is_empty() => {
|
||||
warn!("no valid certs in trust anchors file");
|
||||
return Err("no trust roots in PEM file".into());
|
||||
}
|
||||
Ok(certs) => certs,
|
||||
};
|
||||
|
||||
let (added, skipped) = roots.add_parsable_certificates(certs);
|
||||
if skipped != 0 {
|
||||
warn!("Skipped {} invalid trust anchors", skipped);
|
||||
}
|
||||
if added == 0 {
|
||||
return Err("no trust roots loaded".into());
|
||||
}
|
||||
|
||||
// XXX: Rustls's built-in verifiers don't let us tweak things as fully as we'd like (e.g.
|
||||
// controlling the set of trusted signature algorithms), but they provide good enough
|
||||
// defaults for now.
|
||||
// TODO: lock down the verification further.
|
||||
let server_cert_verifier = Arc::new(verify::AnySanVerifier::new(roots.clone()));
|
||||
|
||||
let (client_tx, client_rx) = {
|
||||
// Since we don't have a certificate yet, build a client configuration
|
||||
// that doesn't attempt client authentication. Once we get a
|
||||
// certificate, the `Store` will publish a new configuration with a
|
||||
// client certificate resolver.
|
||||
let mut c =
|
||||
store::client_config_builder(server_cert_verifier.clone()).with_no_client_auth();
|
||||
|
||||
// Disable session resumption for the time-being until resumption is
|
||||
// more tested.
|
||||
c.resumption = rustls::client::Resumption::disabled();
|
||||
|
||||
watch::channel(Arc::new(c))
|
||||
};
|
||||
let (server_tx, server_rx) = {
|
||||
// Since we don't have a certificate yet, use an empty cert resolver so
|
||||
// that handshaking always fails. Once we get a certificate, the `Store`
|
||||
// will publish a new configuration with a server certificate resolver.
|
||||
let empty_resolver = Arc::new(rustls::server::ResolvesServerCertUsingSni::new());
|
||||
watch::channel(store::server_config(roots.clone(), empty_resolver))
|
||||
};
|
||||
|
||||
let rx = Receiver::new(local_id.clone(), server_name.clone(), client_rx, server_rx);
|
||||
let store = Store::new(
|
||||
roots,
|
||||
server_cert_verifier,
|
||||
local_id,
|
||||
server_name,
|
||||
client_tx,
|
||||
server_tx,
|
||||
);
|
||||
|
||||
Ok((store, rx))
|
||||
}
|
||||
|
||||
fn default_provider() -> Arc<CryptoProvider> {
|
||||
if let Some(provider) = CryptoProvider::get_default() {
|
||||
return Arc::clone(provider);
|
||||
}
|
||||
|
||||
let mut provider = backend::default_provider();
|
||||
provider.cipher_suites = params::TLS_SUPPORTED_CIPHERSUITES.to_vec();
|
||||
// Ignore install errors. This is the only place we install a provider, so if we raced with
|
||||
// another thread to set the provider it will be functionally the same as this provider.
|
||||
let _ = provider.install_default();
|
||||
Arc::clone(CryptoProvider::get_default().expect("Just installed a default"))
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-util")]
|
||||
pub fn default_provider_for_test() -> Arc<CryptoProvider> {
|
||||
default_provider()
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-util")]
|
||||
pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) {
|
||||
watch(
|
||||
ent.name.parse().expect("id must be valid"),
|
||||
ent.name.parse().expect("name must be valid"),
|
||||
std::str::from_utf8(ent.trust_anchors).expect("roots must be PEM"),
|
||||
)
|
||||
.expect("credentials must be valid")
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-util")]
|
||||
pub fn default_for_test() -> (Store, Receiver) {
|
||||
for_test(&linkerd_tls_test_util::FOO_NS1)
|
||||
}
|
||||
|
||||
mod params {
|
||||
use crate::backend;
|
||||
use tokio_rustls::rustls::{self, crypto::WebPkiSupportedAlgorithms};
|
||||
|
||||
// These must be kept in sync:
|
||||
pub const SIGNATURE_ALG_RUSTLS_SCHEME: rustls::SignatureScheme =
|
||||
rustls::SignatureScheme::ECDSA_NISTP256_SHA256;
|
||||
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = backend::SUPPORTED_SIG_ALGS;
|
||||
pub static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
|
||||
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
|
||||
backend::TLS_SUPPORTED_CIPHERSUITES;
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod backend;
|
||||
mod client;
|
||||
pub mod creds;
|
||||
mod server;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use self::{
|
||||
client::{ClientIo, Connect, ConnectFuture, NewClient},
|
||||
server::{Server, ServerIo, TerminateFuture},
|
||||
};
|
|
@ -1,197 +0,0 @@
|
|||
use futures::prelude::*;
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_io as io;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use linkerd_stack::{Param, Service};
|
||||
use linkerd_tls::{ClientId, NegotiatedProtocol, NegotiatedProtocolRef, ServerName, ServerTls};
|
||||
use std::{pin::Pin, sync::Arc, task::Context};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::{pki_types::CertificateDer, ServerConfig};
|
||||
use tracing::debug;
|
||||
|
||||
/// A Service that terminates TLS connections using a dynamically updated server configuration.
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
name: dns::Name,
|
||||
rx: watch::Receiver<Arc<ServerConfig>>,
|
||||
}
|
||||
|
||||
pub type TerminateFuture<I> = futures::future::MapOk<
|
||||
tokio_rustls::Accept<I>,
|
||||
fn(tokio_rustls::server::TlsStream<I>) -> (ServerTls, ServerIo<I>),
|
||||
>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ServerIo<I>(tokio_rustls::server::TlsStream<I>);
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("credential store lost")]
|
||||
pub struct LostStore(());
|
||||
|
||||
impl Server {
|
||||
pub(crate) fn new(name: dns::Name, rx: watch::Receiver<Arc<ServerConfig>>) -> Self {
|
||||
Self { name, rx }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn config(&self) -> Arc<ServerConfig> {
|
||||
(*self.rx.borrow()).clone()
|
||||
}
|
||||
|
||||
/// Spawns a background task that watches for TLS configuration updates and creates an augmented
|
||||
/// configuration with the provided ALPN protocols. The returned server uses this ALPN-aware
|
||||
/// configuration.
|
||||
pub fn spawn_with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self, LostStore> {
|
||||
if alpn_protocols.is_empty() {
|
||||
return Ok(self);
|
||||
}
|
||||
|
||||
let mut orig_rx = self.rx;
|
||||
|
||||
let mut c = (**orig_rx.borrow_and_update()).clone();
|
||||
c.alpn_protocols.clone_from(&alpn_protocols);
|
||||
let (tx, rx) = watch::channel(c.into());
|
||||
|
||||
// Spawn a background task that watches the optional server configuration and publishes it
|
||||
// as a reliable channel, including any ALPN overrides.
|
||||
//
|
||||
// The background task completes when the original sender is closed or when all receivers
|
||||
// are dropped.
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tx.closed() => {
|
||||
debug!("ALPN TLS config receivers dropped");
|
||||
return;
|
||||
}
|
||||
res = orig_rx.changed() => {
|
||||
if res.is_err() {
|
||||
debug!("TLS config sender closed");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut c = (*orig_rx.borrow().clone()).clone();
|
||||
c.alpn_protocols.clone_from(&alpn_protocols);
|
||||
let _ = tx.send(c.into());
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self::new(self.name, rx))
|
||||
}
|
||||
}
|
||||
|
||||
impl Param<ServerName> for Server {
|
||||
fn param(&self) -> ServerName {
|
||||
ServerName(self.name.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Service<I> for Server
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin,
|
||||
{
|
||||
type Response = (ServerTls, ServerIo<I>);
|
||||
type Error = std::io::Error;
|
||||
type Future = TerminateFuture<I>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
io::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn call(&mut self, io: I) -> Self::Future {
|
||||
tokio_rustls::TlsAcceptor::from((*self.rx.borrow()).clone())
|
||||
.accept(io)
|
||||
.map_ok(|io| {
|
||||
// Determine the peer's identity, if it exist.
|
||||
let client_id = client_identity(&io);
|
||||
|
||||
let negotiated_protocol = io
|
||||
.get_ref()
|
||||
.1
|
||||
.alpn_protocol()
|
||||
.map(|b| NegotiatedProtocol(b.into()));
|
||||
|
||||
debug!(client.id = ?client_id, alpn = ?negotiated_protocol, "Accepted TLS connection");
|
||||
let tls = ServerTls::Established {
|
||||
client_id,
|
||||
negotiated_protocol,
|
||||
};
|
||||
(tls, ServerIo(io))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn client_identity<I>(tls: &tokio_rustls::server::TlsStream<I>) -> Option<ClientId> {
|
||||
let (_io, session) = tls.get_ref();
|
||||
let certs = session.peer_certificates()?;
|
||||
let c = certs.first().map(CertificateDer::as_ref)?;
|
||||
|
||||
verifier::client_identity(c).map(ClientId)
|
||||
}
|
||||
|
||||
// === impl ServerIo ===
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
|
||||
#[inline]
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> ServerIo<I> {
|
||||
#[inline]
|
||||
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
|
||||
self.0
|
||||
.get_ref()
|
||||
.1
|
||||
.alpn_protocol()
|
||||
.map(NegotiatedProtocolRef)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
|
||||
#[inline]
|
||||
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
|
||||
self.0.get_ref().0.peer_addr()
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
use linkerd_identity::{Credentials, DerX509};
|
||||
use linkerd_tls_test_util::*;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
fn load(ent: &Entity) -> crate::creds::Store {
|
||||
let roots_pem = std::str::from_utf8(ent.trust_anchors).expect("valid PEM");
|
||||
let (store, _) = crate::creds::watch(
|
||||
ent.name.parse().unwrap(),
|
||||
ent.name.parse().unwrap(),
|
||||
roots_pem,
|
||||
)
|
||||
.expect("credentials must be readable");
|
||||
store
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_construct_client_and_server_config_from_valid_settings() {
|
||||
assert!(load(&FOO_NS1)
|
||||
.set_certificate(
|
||||
DerX509(FOO_NS1.crt.to_vec()),
|
||||
vec![],
|
||||
FOO_NS1.key.to_vec(),
|
||||
SystemTime::now() + Duration::from_secs(1000)
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recognize_ca_did_not_issue_cert() {
|
||||
assert!(load(&FOO_NS1_CA2)
|
||||
.set_certificate(
|
||||
DerX509(FOO_NS1.crt.to_vec()),
|
||||
vec![],
|
||||
FOO_NS1.key.to_vec(),
|
||||
SystemTime::now() + Duration::from_secs(1000)
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recognize_cert_is_not_valid_for_identity() {
|
||||
assert!(load(&BAR_NS1)
|
||||
.set_certificate(
|
||||
DerX509(FOO_NS1.crt.to_vec()),
|
||||
vec![],
|
||||
FOO_NS1.key.to_vec(),
|
||||
SystemTime::now() + Duration::from_secs(1000)
|
||||
)
|
||||
.is_err());
|
||||
}
|
|
@ -1,92 +1,93 @@
|
|||
use futures::prelude::*;
|
||||
use linkerd_identity as id;
|
||||
use linkerd_io as io;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use linkerd_stack::{NewService, Service};
|
||||
use linkerd_tls::{ClientTls, NegotiatedProtocol};
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
#[cfg(feature = "boring")]
|
||||
use crate::boring;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
use crate::rustls;
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
use std::marker::PhantomData;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum NewClient {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(boring::NewClient),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(rustls::NewClient),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls,
|
||||
}
|
||||
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocol, NegotiatedProtocolRef};
|
||||
use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context};
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig};
|
||||
|
||||
/// A `NewService` that produces `Connect` services from a dynamic TLS configuration.
|
||||
#[derive(Clone)]
|
||||
pub enum Connect {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(boring::Connect),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(rustls::Connect),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls,
|
||||
pub struct NewClient {
|
||||
config: watch::Receiver<Arc<ClientConfig>>,
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(project = ConnectFutureProj)]
|
||||
pub enum ConnectFuture<I> {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(#[pin] boring::ConnectFuture<I>),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(#[pin] rustls::ConnectFuture<I>),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls(PhantomData<fn(I)>),
|
||||
/// A `Service` that initiates client-side TLS connections.
|
||||
#[derive(Clone)]
|
||||
pub struct Connect {
|
||||
server_id: id::Id,
|
||||
server_name: rustls::pki_types::ServerName<'static>,
|
||||
config: Arc<ClientConfig>,
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(project = ClientIoProj)]
|
||||
pub type ConnectFuture<I> =
|
||||
Pin<Box<dyn Future<Output = io::Result<(ClientIo<I>, Option<NegotiatedProtocol>)>> + Send>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ClientIo<I> {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(#[pin] boring::ClientIo<I>),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(#[pin] rustls::ClientIo<I>),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls(PhantomData<fn(I)>),
|
||||
}
|
||||
pub struct ClientIo<I>(tokio_rustls::client::TlsStream<I>);
|
||||
|
||||
// === impl NewClient ===
|
||||
|
||||
impl NewClient {
|
||||
pub(crate) fn new(config: watch::Receiver<Arc<ClientConfig>>) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
}
|
||||
|
||||
impl NewService<ClientTls> for NewClient {
|
||||
type Service = Connect;
|
||||
|
||||
#[inline]
|
||||
fn new_service(&self, target: ClientTls) -> Self::Service {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(new_client) => Connect::Boring(new_client.new_service(target)),
|
||||
Connect::new(target, (*self.config.borrow()).clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(new_client) => Connect::Rustls(new_client.new_service(target)),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(target),
|
||||
}
|
||||
impl std::fmt::Debug for NewClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("NewClient").finish()
|
||||
}
|
||||
}
|
||||
|
||||
// === impl Connect ===
|
||||
|
||||
impl Connect {
|
||||
pub(crate) fn new(client_tls: ClientTls, config: Arc<ClientConfig>) -> Self {
|
||||
// If ALPN protocols are configured by the endpoint, we have to clone the entire
|
||||
// configuration and set the protocols. If there are no ALPN options, clone the Arc'd base
|
||||
// configuration without extra allocation.
|
||||
//
|
||||
// TODO it would be better to avoid cloning the whole TLS config per-connection, but the
|
||||
// Rustls API doesn't give us a lot of options.
|
||||
let config = match client_tls.alpn {
|
||||
None => config,
|
||||
Some(AlpnProtocols(protocols)) => {
|
||||
let mut c = (*config).clone();
|
||||
c.alpn_protocols = protocols;
|
||||
Arc::new(c)
|
||||
}
|
||||
};
|
||||
|
||||
let server_name =
|
||||
rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string())
|
||||
.expect("identity must be a valid DNS name");
|
||||
|
||||
Self {
|
||||
server_id: client_tls.server_id.into(),
|
||||
server_name,
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> {
|
||||
match c.peer_certificates().and_then(|certs| certs.first()) {
|
||||
Some(leaf_cert) => io::Result::Ok(leaf_cert),
|
||||
None => Err(io::Error::other("missing tls end cert")),
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Service<I> for Connect
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
|
||||
|
@ -95,67 +96,31 @@ where
|
|||
type Error = io::Error;
|
||||
type Future = ConnectFuture<I>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(connect) => <boring::Connect as Service<I>>::poll_ready(connect, cx),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(connect) => <rustls::Connect as Service<I>>::poll_ready(connect, cx),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
io::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn call(&mut self, io: I) -> Self::Future {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(connect) => ConnectFuture::Boring(connect.call(io)),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(connect) => ConnectFuture::Rustls(connect.call(io)),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(io),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// === impl ConnectFuture ===
|
||||
|
||||
impl<I> Future for ConnectFuture<I>
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Unpin,
|
||||
{
|
||||
type Output = io::Result<(ClientIo<I>, Option<NegotiatedProtocol>)>;
|
||||
|
||||
#[inline]
|
||||
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ConnectFutureProj::Boring(f) => {
|
||||
let res = futures::ready!(f.poll(cx));
|
||||
Poll::Ready(res.map(|io| {
|
||||
let server_id = self.server_id.clone();
|
||||
Box::pin(
|
||||
// Connect to the server, sending the `server_name` SNI in the
|
||||
// client handshake. The provided config should use the
|
||||
// `AnySanVerifier` to ignore the server certificate's DNS SANs.
|
||||
// Instead, we extract the server's leaf certificate after the
|
||||
// handshake and verify that it matches the provided `server_id``.
|
||||
tokio_rustls::TlsConnector::from(self.config.clone())
|
||||
// XXX(eliza): it's a bummer that the server name has to be cloned here...
|
||||
.connect(self.server_name.clone(), io)
|
||||
.map(move |s| {
|
||||
let s = s?;
|
||||
let (_, conn) = s.get_ref();
|
||||
let end_cert = extract_cert(conn)?;
|
||||
verifier::verify_id(end_cert, &server_id)?;
|
||||
let io = ClientIo(s);
|
||||
let np = io.negotiated_protocol().map(|np| np.to_owned());
|
||||
(ClientIo::Boring(io), np)
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ConnectFutureProj::Rustls(f) => {
|
||||
let res = futures::ready!(f.poll(cx));
|
||||
Poll::Ready(res.map(|io| {
|
||||
let np = io.negotiated_protocol().map(|np| np.to_owned());
|
||||
(ClientIo::Rustls(io), np)
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
Ok((io, np))
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,104 +129,59 @@ where
|
|||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> io::Poll<()> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ClientIoProj::Boring(io) => io.poll_read(cx, buf),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ClientIoProj::Rustls(io) => io.poll_read(cx, buf),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx, buf),
|
||||
}
|
||||
Pin::new(&mut self.0).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
|
||||
#[inline]
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ClientIoProj::Boring(io) => io.poll_flush(cx),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ClientIoProj::Rustls(io) => io.poll_flush(cx),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ClientIoProj::Boring(io) => io.poll_shutdown(cx),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ClientIoProj::Rustls(io) => io.poll_shutdown(cx),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ClientIoProj::Boring(io) => io.poll_write(cx, buf),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ClientIoProj::Rustls(io) => io.poll_write(cx, buf),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx, buf),
|
||||
}
|
||||
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
self: Pin<&mut Self>,
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> Poll<Result<usize, std::io::Error>> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ClientIoProj::Boring(io) => io.poll_write_vectored(cx, bufs),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ClientIoProj::Rustls(io) => io.poll_write_vectored(cx, bufs),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx, bufs),
|
||||
}
|
||||
) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(io) => io.is_write_vectored(),
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(io) => io.is_write_vectored(),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
impl<I> ClientIo<I> {
|
||||
#[inline]
|
||||
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
|
||||
self.0
|
||||
.get_ref()
|
||||
.1
|
||||
.alpn_protocol()
|
||||
.map(NegotiatedProtocolRef)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
|
||||
#[inline]
|
||||
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(io) => io.peer_addr(),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(io) => io.peer_addr(),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
self.0.get_ref().0.peer_addr()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,122 +1,101 @@
|
|||
use std::time::SystemTime;
|
||||
mod receiver;
|
||||
mod store;
|
||||
pub(crate) mod verify;
|
||||
|
||||
use crate::{NewClient, Server};
|
||||
pub use self::{receiver::Receiver, store::Store};
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_error::Result;
|
||||
use linkerd_identity::{Credentials, DerX509, Id};
|
||||
use linkerd_identity as id;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::{self};
|
||||
use tracing::warn;
|
||||
|
||||
#[cfg(feature = "boring")]
|
||||
pub use crate::boring;
|
||||
#[derive(Debug, Error)]
|
||||
#[error("invalid trust roots")]
|
||||
pub struct InvalidTrustRoots(());
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
pub use crate::rustls;
|
||||
|
||||
pub enum Store {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(boring::creds::Store),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(rustls::creds::Store),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum Receiver {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(boring::creds::Receiver),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(rustls::creds::Receiver),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls,
|
||||
}
|
||||
|
||||
// === impl Store ===
|
||||
|
||||
impl Credentials for Store {
|
||||
fn set_certificate(
|
||||
&mut self,
|
||||
leaf: DerX509,
|
||||
chain: Vec<DerX509>,
|
||||
key: Vec<u8>,
|
||||
exp: SystemTime,
|
||||
) -> Result<()> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(store) => store.set_certificate(leaf, chain, key, exp),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(store) => store.set_certificate(leaf, chain, key, exp),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(leaf, chain, key, exp),
|
||||
pub fn watch(
|
||||
local_id: id::Id,
|
||||
server_name: dns::Name,
|
||||
roots_pem: &str,
|
||||
) -> Result<(Store, Receiver)> {
|
||||
let mut roots = rustls::RootCertStore::empty();
|
||||
let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
{
|
||||
Err(error) => {
|
||||
warn!(%error, "invalid trust anchors file");
|
||||
return Err(error.into());
|
||||
}
|
||||
Ok(certs) if certs.is_empty() => {
|
||||
warn!("no valid certs in trust anchors file");
|
||||
return Err("no trust roots in PEM file".into());
|
||||
}
|
||||
Ok(certs) => certs,
|
||||
};
|
||||
|
||||
let (added, skipped) = roots.add_parsable_certificates(certs);
|
||||
if skipped != 0 {
|
||||
warn!("Skipped {} invalid trust anchors", skipped);
|
||||
}
|
||||
if added == 0 {
|
||||
return Err("no trust roots loaded".into());
|
||||
}
|
||||
|
||||
// XXX: Rustls's built-in verifiers don't let us tweak things as fully as we'd like (e.g.
|
||||
// controlling the set of trusted signature algorithms), but they provide good enough
|
||||
// defaults for now.
|
||||
// TODO: lock down the verification further.
|
||||
let server_cert_verifier = Arc::new(verify::AnySanVerifier::new(roots.clone()));
|
||||
|
||||
let (client_tx, client_rx) = {
|
||||
// Since we don't have a certificate yet, build a client configuration
|
||||
// that doesn't attempt client authentication. Once we get a
|
||||
// certificate, the `Store` will publish a new configuration with a
|
||||
// client certificate resolver.
|
||||
let mut c =
|
||||
store::client_config_builder(server_cert_verifier.clone()).with_no_client_auth();
|
||||
|
||||
// Disable session resumption for the time-being until resumption is
|
||||
// more tested.
|
||||
c.resumption = rustls::client::Resumption::disabled();
|
||||
|
||||
watch::channel(Arc::new(c))
|
||||
};
|
||||
let (server_tx, server_rx) = {
|
||||
// Since we don't have a certificate yet, use an empty cert resolver so
|
||||
// that handshaking always fails. Once we get a certificate, the `Store`
|
||||
// will publish a new configuration with a server certificate resolver.
|
||||
let empty_resolver = Arc::new(rustls::server::ResolvesServerCertUsingSni::new());
|
||||
watch::channel(store::server_config(roots.clone(), empty_resolver))
|
||||
};
|
||||
|
||||
let rx = Receiver::new(local_id.clone(), server_name.clone(), client_rx, server_rx);
|
||||
let store = Store::new(
|
||||
roots,
|
||||
server_cert_verifier,
|
||||
local_id,
|
||||
server_name,
|
||||
client_tx,
|
||||
server_tx,
|
||||
);
|
||||
|
||||
Ok((store, rx))
|
||||
}
|
||||
|
||||
// === impl Receiver ===
|
||||
|
||||
#[cfg(feature = "boring")]
|
||||
impl From<boring::creds::Receiver> for Receiver {
|
||||
fn from(rx: boring::creds::Receiver) -> Self {
|
||||
Self::Boring(rx)
|
||||
}
|
||||
#[cfg(feature = "test-util")]
|
||||
pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) {
|
||||
watch(
|
||||
ent.name.parse().expect("id must be valid"),
|
||||
ent.name.parse().expect("name must be valid"),
|
||||
std::str::from_utf8(ent.trust_anchors).expect("roots must be PEM"),
|
||||
)
|
||||
.expect("credentials must be valid")
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
impl From<rustls::creds::Receiver> for Receiver {
|
||||
fn from(rx: rustls::creds::Receiver) -> Self {
|
||||
Self::Rustls(rx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Receiver {
|
||||
pub fn local_id(&self) -> &Id {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(receiver) => receiver.local_id(),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(receiver) => receiver.local_id(),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn server_name(&self) -> &dns::Name {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(receiver) => receiver.server_name(),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(receiver) => receiver.server_name(),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_client(&self) -> NewClient {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(receiver) => NewClient::Boring(receiver.new_client()),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(receiver) => NewClient::Rustls(receiver.new_client()),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn server(&self) -> Server {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(receiver) => Server::Boring(receiver.server()),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(receiver) => Server::Rustls(receiver.server()),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "test-util")]
|
||||
pub fn default_for_test() -> (Store, Receiver) {
|
||||
for_test(&linkerd_tls_test_util::FOO_NS1)
|
||||
}
|
||||
|
|
|
@ -63,7 +63,6 @@ impl std::fmt::Debug for Receiver {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::creds::default_provider;
|
||||
|
||||
/// Returns the simplest default rustls server config.
|
||||
///
|
||||
|
@ -71,7 +70,7 @@ mod tests {
|
|||
/// incoming handshakes, but that doesn't matter for these tests, where we
|
||||
/// don't actually do any TLS.
|
||||
fn empty_server_config() -> rustls::ServerConfig {
|
||||
rustls::ServerConfig::builder_with_provider(default_provider())
|
||||
rustls::ServerConfig::builder_with_provider(linkerd_rustls::get_default_provider())
|
||||
.with_protocol_versions(rustls::ALL_VERSIONS)
|
||||
.expect("client config must be valid")
|
||||
.with_client_cert_verifier(Arc::new(rustls::server::NoClientAuth))
|
||||
|
@ -84,7 +83,7 @@ mod tests {
|
|||
/// it doesn't trust any root certificates. However, that doesn't actually
|
||||
/// matter for these tests, which don't actually do TLS.
|
||||
fn empty_client_config() -> rustls::ClientConfig {
|
||||
rustls::ClientConfig::builder_with_provider(default_provider())
|
||||
rustls::ClientConfig::builder_with_provider(linkerd_rustls::get_default_provider())
|
||||
.with_protocol_versions(rustls::ALL_VERSIONS)
|
||||
.expect("client config must be valid")
|
||||
.with_root_certificates(rustls::RootCertStore::empty())
|
|
@ -1,4 +1,3 @@
|
|||
use super::{default_provider, params::*};
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_error::Result;
|
||||
use linkerd_identity as id;
|
||||
|
@ -28,8 +27,8 @@ struct CertResolver(Arc<rustls::sign::CertifiedKey>);
|
|||
pub(super) fn client_config_builder(
|
||||
cert_verifier: Arc<dyn rustls::client::danger::ServerCertVerifier>,
|
||||
) -> rustls::ConfigBuilder<rustls::ClientConfig, rustls::client::WantsClientCert> {
|
||||
rustls::ClientConfig::builder_with_provider(default_provider())
|
||||
.with_protocol_versions(TLS_VERSIONS)
|
||||
rustls::ClientConfig::builder_with_provider(linkerd_rustls::get_default_provider())
|
||||
.with_protocol_versions(linkerd_rustls::TLS_VERSIONS)
|
||||
.expect("client config must be valid")
|
||||
// XXX: Rustls's built-in verifiers don't let us tweak things as fully
|
||||
// as we'd like (e.g. controlling the set of trusted signature
|
||||
|
@ -55,7 +54,7 @@ pub(super) fn server_config(
|
|||
// controlling the set of trusted signature algorithms), but they provide good enough
|
||||
// defaults for now.
|
||||
// TODO: lock down the verification further.
|
||||
let provider = default_provider();
|
||||
let provider = linkerd_rustls::get_default_provider();
|
||||
|
||||
let client_cert_verifier =
|
||||
WebPkiClientVerifier::builder_with_provider(Arc::new(roots), provider.clone())
|
||||
|
@ -64,7 +63,7 @@ pub(super) fn server_config(
|
|||
.expect("server verifier must be valid");
|
||||
|
||||
rustls::ServerConfig::builder_with_provider(provider)
|
||||
.with_protocol_versions(TLS_VERSIONS)
|
||||
.with_protocol_versions(linkerd_rustls::TLS_VERSIONS)
|
||||
.expect("server config must be valid")
|
||||
.with_client_cert_verifier(client_cert_verifier)
|
||||
.with_cert_resolver(resolver)
|
||||
|
@ -172,7 +171,7 @@ impl CertResolver {
|
|||
&self,
|
||||
sigschemes: &[rustls::SignatureScheme],
|
||||
) -> Option<Arc<rustls::sign::CertifiedKey>> {
|
||||
if !sigschemes.contains(&SIGNATURE_ALG_RUSTLS_SCHEME) {
|
||||
if !sigschemes.contains(&linkerd_rustls::SIGNATURE_ALG_RUSTLS_SCHEME) {
|
||||
debug!("Signature scheme not supported -> no certificate");
|
||||
return None;
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
use crate::creds::params::SUPPORTED_SIG_ALGS;
|
||||
use linkerd_rustls::SUPPORTED_SIG_ALGS;
|
||||
use std::{convert::TryFrom, sync::Arc};
|
||||
use tokio_rustls::rustls::{
|
||||
self,
|
|
@ -15,132 +15,11 @@
|
|||
mod client;
|
||||
pub mod creds;
|
||||
mod server;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub use self::{
|
||||
client::{ClientIo, Connect, ConnectFuture, NewClient},
|
||||
creds::watch,
|
||||
server::{Server, ServerIo, TerminateFuture},
|
||||
};
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_error::{Error, Result};
|
||||
use linkerd_identity as id;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[cfg(feature = "boring")]
|
||||
pub use linkerd_meshtls_boring as boring;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
pub use linkerd_meshtls_rustls as rustls;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum Mode {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring,
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls,
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls,
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
#[macro_export]
|
||||
macro_rules! no_tls {
|
||||
($($field:ident),*) => {
|
||||
{
|
||||
$(
|
||||
let _ = $field;
|
||||
)*
|
||||
unreachable!("compiled without any TLS implementations enabled!");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// === impl Mode ===
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
impl Default for Mode {
|
||||
fn default() -> Self {
|
||||
Self::Rustls
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME(ver) We should have a way to opt into boring by configuration when both are enabled.
|
||||
#[cfg(all(feature = "boring", not(feature = "rustls")))]
|
||||
impl Default for Mode {
|
||||
fn default() -> Self {
|
||||
Self::Boring
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
impl Default for Mode {
|
||||
fn default() -> Self {
|
||||
Self::NoTls
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
pub fn watch(
|
||||
self,
|
||||
local_id: id::Id,
|
||||
server_name: dns::Name,
|
||||
roots_pem: &str,
|
||||
) -> Result<(creds::Store, creds::Receiver)> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring => {
|
||||
let (store, receiver) = boring::creds::watch(local_id, server_name, roots_pem)?;
|
||||
Ok((
|
||||
creds::Store::Boring(store),
|
||||
creds::Receiver::Boring(receiver),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls => {
|
||||
let (store, receiver) = rustls::creds::watch(local_id, server_name, roots_pem)?;
|
||||
Ok((
|
||||
creds::Store::Rustls(store),
|
||||
creds::Receiver::Rustls(receiver),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => no_tls!(local_id, server_name, roots_pem),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Mode {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
#[cfg(feature = "boring")]
|
||||
if s.eq_ignore_ascii_case("boring") {
|
||||
return Ok(Self::Boring);
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
if s.eq_ignore_ascii_case("rustls") {
|
||||
return Ok(Self::Rustls);
|
||||
}
|
||||
|
||||
Err(format!("unknown TLS backend: {s}").into())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Mode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring => "boring".fmt(f),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls => "rustls".fmt(f),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => no_tls!(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,159 +1,137 @@
|
|||
use linkerd_error::Result;
|
||||
use futures::prelude::*;
|
||||
use linkerd_dns_name as dns;
|
||||
use linkerd_io as io;
|
||||
use linkerd_meshtls_verifier as verifier;
|
||||
use linkerd_stack::{Param, Service};
|
||||
use linkerd_tls::{ServerName, ServerTls};
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
#[cfg(feature = "boring")]
|
||||
use crate::boring;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
use crate::rustls;
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
use std::marker::PhantomData;
|
||||
use linkerd_tls::{ClientId, NegotiatedProtocol, NegotiatedProtocolRef, ServerName, ServerTls};
|
||||
use std::{pin::Pin, sync::Arc, task::Context};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::{pki_types::CertificateDer, ServerConfig};
|
||||
use tracing::debug;
|
||||
|
||||
/// A Service that terminates TLS connections using a dynamically updated server configuration.
|
||||
#[derive(Clone)]
|
||||
pub enum Server {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(boring::Server),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(rustls::Server),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls,
|
||||
pub struct Server {
|
||||
name: dns::Name,
|
||||
rx: watch::Receiver<Arc<ServerConfig>>,
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(project = TerminateFutureProj)]
|
||||
pub enum TerminateFuture<I> {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(#[pin] boring::TerminateFuture<I>),
|
||||
pub type TerminateFuture<I> = futures::future::MapOk<
|
||||
tokio_rustls::Accept<I>,
|
||||
fn(tokio_rustls::server::TlsStream<I>) -> (ServerTls, ServerIo<I>),
|
||||
>;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(#[pin] rustls::TerminateFuture<I>),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls(PhantomData<fn(I)>),
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(project = ServerIoProj)]
|
||||
#[derive(Debug)]
|
||||
pub enum ServerIo<I> {
|
||||
#[cfg(feature = "boring")]
|
||||
Boring(#[pin] boring::ServerIo<I>),
|
||||
pub struct ServerIo<I>(tokio_rustls::server::TlsStream<I>);
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(#[pin] rustls::ServerIo<I>),
|
||||
#[derive(Debug, Error)]
|
||||
#[error("credential store lost")]
|
||||
pub struct LostStore(());
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
NoTls(PhantomData<fn(I)>),
|
||||
}
|
||||
impl Server {
|
||||
pub(crate) fn new(name: dns::Name, rx: watch::Receiver<Arc<ServerConfig>>) -> Self {
|
||||
Self { name, rx }
|
||||
}
|
||||
|
||||
// === impl Server ===
|
||||
#[cfg(test)]
|
||||
pub(crate) fn config(&self) -> Arc<ServerConfig> {
|
||||
(*self.rx.borrow()).clone()
|
||||
}
|
||||
|
||||
impl Param<ServerName> for Server {
|
||||
#[inline]
|
||||
fn param(&self) -> ServerName {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(srv) => srv.param(),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(srv) => srv.param(),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
/// Spawns a background task that watches for TLS configuration updates and creates an augmented
|
||||
/// configuration with the provided ALPN protocols. The returned server uses this ALPN-aware
|
||||
/// configuration.
|
||||
pub fn spawn_with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self, LostStore> {
|
||||
if alpn_protocols.is_empty() {
|
||||
return Ok(self);
|
||||
}
|
||||
|
||||
let mut orig_rx = self.rx;
|
||||
|
||||
let mut c = (**orig_rx.borrow_and_update()).clone();
|
||||
c.alpn_protocols.clone_from(&alpn_protocols);
|
||||
let (tx, rx) = watch::channel(c.into());
|
||||
|
||||
// Spawn a background task that watches the optional server configuration and publishes it
|
||||
// as a reliable channel, including any ALPN overrides.
|
||||
//
|
||||
// The background task completes when the original sender is closed or when all receivers
|
||||
// are dropped.
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tx.closed() => {
|
||||
debug!("ALPN TLS config receivers dropped");
|
||||
return;
|
||||
}
|
||||
res = orig_rx.changed() => {
|
||||
if res.is_err() {
|
||||
debug!("TLS config sender closed");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut c = (*orig_rx.borrow().clone()).clone();
|
||||
c.alpn_protocols.clone_from(&alpn_protocols);
|
||||
let _ = tx.send(c.into());
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self::new(self.name, rx))
|
||||
}
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(srv) => Ok(Self::Boring(srv.with_alpn(alpn_protocols))),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(srv) => srv
|
||||
.spawn_with_alpn(alpn_protocols)
|
||||
.map(Self::Rustls)
|
||||
.map_err(Into::into),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(alpn_protocols),
|
||||
}
|
||||
impl Param<ServerName> for Server {
|
||||
fn param(&self) -> ServerName {
|
||||
ServerName(self.name.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Service<I> for Server
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
|
||||
I: io::AsyncRead + io::AsyncWrite + Send + Unpin,
|
||||
{
|
||||
type Response = (ServerTls, ServerIo<I>);
|
||||
type Error = io::Error;
|
||||
type Error = std::io::Error;
|
||||
type Future = TerminateFuture<I>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(svc) => <boring::Server as Service<I>>::poll_ready(svc, cx),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(svc) => <rustls::Server as Service<I>>::poll_ready(svc, cx),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
io::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn call(&mut self, io: I) -> Self::Future {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(svc) => TerminateFuture::Boring(svc.call(io)),
|
||||
tokio_rustls::TlsAcceptor::from((*self.rx.borrow()).clone())
|
||||
.accept(io)
|
||||
.map_ok(|io| {
|
||||
// Determine the peer's identity, if it exist.
|
||||
let client_id = client_identity(&io);
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(svc) => TerminateFuture::Rustls(svc.call(io)),
|
||||
let negotiated_protocol = io
|
||||
.get_ref()
|
||||
.1
|
||||
.alpn_protocol()
|
||||
.map(|b| NegotiatedProtocol(b.into()));
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(io),
|
||||
}
|
||||
debug!(client.id = ?client_id, alpn = ?negotiated_protocol, "Accepted TLS connection");
|
||||
let tls = ServerTls::Established {
|
||||
client_id,
|
||||
negotiated_protocol,
|
||||
};
|
||||
(tls, ServerIo(io))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// === impl TerminateFuture ===
|
||||
fn client_identity<I>(tls: &tokio_rustls::server::TlsStream<I>) -> Option<ClientId> {
|
||||
let (_io, session) = tls.get_ref();
|
||||
let certs = session.peer_certificates()?;
|
||||
let c = certs.first().map(CertificateDer::as_ref)?;
|
||||
|
||||
impl<I> Future for TerminateFuture<I>
|
||||
where
|
||||
I: io::AsyncRead + io::AsyncWrite + Unpin,
|
||||
{
|
||||
type Output = io::Result<(ServerTls, ServerIo<I>)>;
|
||||
|
||||
#[inline]
|
||||
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
TerminateFutureProj::Boring(f) => {
|
||||
let res = futures::ready!(f.poll(cx));
|
||||
Poll::Ready(res.map(|(tls, io)| (tls, ServerIo::Boring(io))))
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
TerminateFutureProj::Rustls(f) => {
|
||||
let res = futures::ready!(f.poll(cx));
|
||||
Poll::Ready(res.map(|(tls, io)| (tls, ServerIo::Rustls(io))))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
}
|
||||
verifier::client_identity(c).map(ClientId)
|
||||
}
|
||||
|
||||
// === impl ServerIo ===
|
||||
|
@ -161,105 +139,59 @@ where
|
|||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> io::Poll<()> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ServerIoProj::Boring(io) => io.poll_read(cx, buf),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ServerIoProj::Rustls(io) => io.poll_read(cx, buf),
|
||||
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx, buf),
|
||||
}
|
||||
Pin::new(&mut self.0).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
|
||||
#[inline]
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ServerIoProj::Boring(io) => io.poll_flush(cx),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ServerIoProj::Rustls(io) => io.poll_flush(cx),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ServerIoProj::Boring(io) => io.poll_shutdown(cx),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ServerIoProj::Rustls(io) => io.poll_shutdown(cx),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx),
|
||||
}
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
|
||||
Pin::new(&mut self.0).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ServerIoProj::Boring(io) => io.poll_write(cx, buf),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ServerIoProj::Rustls(io) => io.poll_write(cx, buf),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx, buf),
|
||||
}
|
||||
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_write_vectored(
|
||||
self: Pin<&mut Self>,
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[io::IoSlice<'_>],
|
||||
) -> Poll<Result<usize, std::io::Error>> {
|
||||
match self.project() {
|
||||
#[cfg(feature = "boring")]
|
||||
ServerIoProj::Boring(io) => io.poll_write_vectored(cx, bufs),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
ServerIoProj::Rustls(io) => io.poll_write_vectored(cx, bufs),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(cx, bufs),
|
||||
}
|
||||
) -> io::Poll<usize> {
|
||||
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(io) => io.is_write_vectored(),
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(io) => io.is_write_vectored(),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
impl<I> ServerIo<I> {
|
||||
#[inline]
|
||||
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
|
||||
self.0
|
||||
.get_ref()
|
||||
.1
|
||||
.alpn_protocol()
|
||||
.map(NegotiatedProtocolRef)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
|
||||
#[inline]
|
||||
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
|
||||
match self {
|
||||
#[cfg(feature = "boring")]
|
||||
Self::Boring(io) => io.peer_addr(),
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
Self::Rustls(io) => io.peer_addr(),
|
||||
#[cfg(not(feature = "__has_any_tls_impls"))]
|
||||
_ => crate::no_tls!(),
|
||||
}
|
||||
self.0.get_ref().0.peer_addr()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
#![cfg(feature = "boring")]
|
||||
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod util;
|
||||
|
||||
use linkerd_meshtls::Mode;
|
||||
|
||||
#[test]
|
||||
fn fails_processing_cert_when_wrong_id_configured() {
|
||||
util::fails_processing_cert_when_wrong_id_configured(Mode::Boring);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn plaintext() {
|
||||
util::plaintext(Mode::Boring).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn proxy_to_proxy_tls_works() {
|
||||
util::proxy_to_proxy_tls_works(Mode::Boring).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
|
||||
util::proxy_to_proxy_tls_pass_through_when_identity_does_not_match(Mode::Boring).await;
|
||||
}
|
|
@ -1,27 +1,24 @@
|
|||
#![cfg(feature = "rustls")]
|
||||
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod util;
|
||||
|
||||
use linkerd_meshtls::Mode;
|
||||
|
||||
#[test]
|
||||
fn fails_processing_cert_when_wrong_id_configured() {
|
||||
util::fails_processing_cert_when_wrong_id_configured(Mode::Rustls);
|
||||
util::fails_processing_cert_when_wrong_id_configured();
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn plaintext() {
|
||||
util::plaintext(Mode::Rustls).await;
|
||||
util::plaintext().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn proxy_to_proxy_tls_works() {
|
||||
util::proxy_to_proxy_tls_works(Mode::Rustls).await;
|
||||
util::proxy_to_proxy_tls_works().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
|
||||
util::proxy_to_proxy_tls_pass_through_when_identity_does_not_match(Mode::Rustls).await;
|
||||
util::proxy_to_proxy_tls_pass_through_when_identity_does_not_match().await;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ use linkerd_dns_name::Name;
|
|||
use linkerd_error::Infallible;
|
||||
use linkerd_identity::{Credentials, DerX509, Id};
|
||||
use linkerd_io::{self as io, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use linkerd_meshtls as meshtls;
|
||||
use linkerd_meshtls::{self as meshtls, watch};
|
||||
use linkerd_proxy_transport::{
|
||||
addrs::*,
|
||||
listen::{Addrs, Bind, BindTcp},
|
||||
|
@ -52,15 +52,13 @@ fn generate_cert_with_name(subject_alt_names: Vec<SanType>) -> (Vec<u8>, Vec<u8>
|
|||
)
|
||||
}
|
||||
|
||||
pub fn fails_processing_cert_when_wrong_id_configured(mode: meshtls::Mode) {
|
||||
pub fn fails_processing_cert_when_wrong_id_configured() {
|
||||
let server_name = Name::from_str("system.local").expect("should parse");
|
||||
let id = Id::Dns(server_name.clone());
|
||||
|
||||
let (cert, key, roots) =
|
||||
generate_cert_with_name(vec![SanType::URI("spiffe://system/local".parse().unwrap())]);
|
||||
let (mut store, _) = mode
|
||||
.watch(id, server_name.clone(), &roots)
|
||||
.expect("should construct");
|
||||
let (mut store, _) = watch(id, server_name.clone(), &roots).expect("should construct");
|
||||
|
||||
let err = store
|
||||
.set_certificate(DerX509(cert), vec![], key, SystemTime::now())
|
||||
|
@ -69,9 +67,9 @@ pub fn fails_processing_cert_when_wrong_id_configured(mode: meshtls::Mode) {
|
|||
assert_eq!("certificate does not match TLS identity", format!("{err}"),);
|
||||
}
|
||||
|
||||
pub async fn plaintext(mode: meshtls::Mode) {
|
||||
let (_foo, _, server_tls) = load(mode, &test_util::FOO_NS1);
|
||||
let (_bar, client_tls, _) = load(mode, &test_util::BAR_NS1);
|
||||
pub async fn plaintext() {
|
||||
let (_foo, _, server_tls) = load(&test_util::FOO_NS1);
|
||||
let (_bar, client_tls, _) = load(&test_util::BAR_NS1);
|
||||
let (client_result, server_result) = run_test(
|
||||
client_tls,
|
||||
Conditional::None(tls::NoClientTls::NotProvidedByServiceDiscovery),
|
||||
|
@ -94,9 +92,9 @@ pub async fn plaintext(mode: meshtls::Mode) {
|
|||
assert_eq!(&server_result.result.expect("ping")[..], PING);
|
||||
}
|
||||
|
||||
pub async fn proxy_to_proxy_tls_works(mode: meshtls::Mode) {
|
||||
let (_foo, _, server_tls) = load(mode, &test_util::FOO_NS1);
|
||||
let (_bar, client_tls, _) = load(mode, &test_util::BAR_NS1);
|
||||
pub async fn proxy_to_proxy_tls_works() {
|
||||
let (_foo, _, server_tls) = load(&test_util::FOO_NS1);
|
||||
let (_bar, client_tls, _) = load(&test_util::BAR_NS1);
|
||||
let server_id = tls::ServerId(test_util::FOO_NS1.id.parse().unwrap());
|
||||
let server_name = tls::ServerName(test_util::FOO_NS1.name.parse().unwrap());
|
||||
let (client_result, server_result) = run_test(
|
||||
|
@ -125,12 +123,12 @@ pub async fn proxy_to_proxy_tls_works(mode: meshtls::Mode) {
|
|||
assert_eq!(&server_result.result.expect("ping")[..], PING);
|
||||
}
|
||||
|
||||
pub async fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match(mode: meshtls::Mode) {
|
||||
let (_foo, _, server_tls) = load(mode, &test_util::FOO_NS1);
|
||||
pub async fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
|
||||
let (_foo, _, server_tls) = load(&test_util::FOO_NS1);
|
||||
|
||||
// Misuse the client's identity instead of the server's identity. Any
|
||||
// identity other than `server_tls.server_identity` would work.
|
||||
let (_bar, client_tls, _) = load(mode, &test_util::BAR_NS1);
|
||||
let (_bar, client_tls, _) = load(&test_util::BAR_NS1);
|
||||
let server_id = test_util::BAR_NS1.id.parse::<tls::ServerId>().unwrap();
|
||||
let server_name = test_util::BAR_NS1.name.parse::<tls::ServerName>().unwrap();
|
||||
|
||||
|
@ -161,18 +159,14 @@ type ServerConn<T, I> = (
|
|||
io::EitherIo<meshtls::ServerIo<tls::server::DetectIo<I>>, tls::server::DetectIo<I>>,
|
||||
);
|
||||
|
||||
fn load(
|
||||
mode: meshtls::Mode,
|
||||
ent: &test_util::Entity,
|
||||
) -> (meshtls::creds::Store, meshtls::NewClient, meshtls::Server) {
|
||||
fn load(ent: &test_util::Entity) -> (meshtls::creds::Store, meshtls::NewClient, meshtls::Server) {
|
||||
let roots_pem = std::str::from_utf8(ent.trust_anchors).expect("valid PEM");
|
||||
let (mut store, rx) = mode
|
||||
.watch(
|
||||
ent.name.parse().unwrap(),
|
||||
ent.name.parse().unwrap(),
|
||||
roots_pem,
|
||||
)
|
||||
.expect("credentials must be readable");
|
||||
let (mut store, rx) = watch(
|
||||
ent.name.parse().unwrap(),
|
||||
ent.name.parse().unwrap(),
|
||||
roots_pem,
|
||||
)
|
||||
.expect("credentials must be readable");
|
||||
|
||||
store
|
||||
.set_certificate(
|
||||
|
@ -285,7 +279,7 @@ where
|
|||
|
||||
let tls = Some(client_server_id.clone());
|
||||
let client = async move {
|
||||
let conn = tls::Client::layer(client_tls)
|
||||
let conn = tls::Client::<meshtls::NewClient, ConnectTcp>::layer(client_tls)
|
||||
.layer(ConnectTcp::new(Keepalive(None), UserTimeout(None)))
|
||||
.oneshot(Target(server_addr.into(), client_server_id))
|
||||
.await;
|
||||
|
|
|
@ -16,9 +16,7 @@ test_util = []
|
|||
bytes = { workspace = true }
|
||||
deflate = { version = "1", features = ["gzip"] }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
kubert-prometheus-process = { version = "0.2", optional = true }
|
||||
parking_lot = "0.12"
|
||||
prometheus-client = { workspace = true }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use super::{
|
||||
fmt::{FmtLabels, FmtMetric},
|
||||
Factor,
|
||||
legacy::Factor,
|
||||
};
|
||||
use std::fmt::{self, Display};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue