Compare commits

...

84 Commits

Author SHA1 Message Date
katelyn martin e8cc4ec47b
nit(app): remove frivolous code (#4094)
this commit removes a piece of code that has been commented out.

it additionally removes a variable binding that is not needed. `dst` is
not moved, so we do not need to bind the address of the destination
service to a variable, nor do we need to clone it.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-20 11:31:09 -04:00
dependabot[bot] b951a6c374
build(deps): bump tempfile from 3.20.0 to 3.21.0 (#4093)
Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.20.0 to 3.21.0.
- [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Stebalien/tempfile/commits)

---
updated-dependencies:
- dependency-name: tempfile
  dependency-version: 3.21.0
  dependency-type: indirect
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 10:13:45 -04:00
dependabot[bot] 7f6ac15f13
build(deps): bump cfg-if from 1.0.1 to 1.0.3 (#4092)
Bumps [cfg-if](https://github.com/rust-lang/cfg-if) from 1.0.1 to 1.0.3.
- [Release notes](https://github.com/rust-lang/cfg-if/releases)
- [Changelog](https://github.com/rust-lang/cfg-if/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cfg-if/compare/v1.0.1...v1.0.3)

---
updated-dependencies:
- dependency-name: cfg-if
  dependency-version: 1.0.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 10:13:04 -04:00
dependabot[bot] 75e9caaeae
build(deps): bump thiserror from 2.0.15 to 2.0.16 (#4091)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.15 to 2.0.16.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.15...2.0.16)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-version: 2.0.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 10:12:43 -04:00
dependabot[bot] 02bbb3d617
build(deps): bump prettyplease from 0.2.36 to 0.2.37 (#4090)
Bumps [prettyplease](https://github.com/dtolnay/prettyplease) from 0.2.36 to 0.2.37.
- [Release notes](https://github.com/dtolnay/prettyplease/releases)
- [Commits](https://github.com/dtolnay/prettyplease/compare/0.2.36...0.2.37)

---
updated-dependencies:
- dependency-name: prettyplease
  dependency-version: 0.2.37
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:53 -04:00
dependabot[bot] 103c69ca75
build(deps): bump serde_json from 1.0.142 to 1.0.143 (#4088)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.142 to 1.0.143.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.142...v1.0.143)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-version: 1.0.143
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:39 -04:00
dependabot[bot] 4663cc4eb6
build(deps): bump tinyvec from 1.9.0 to 1.10.0 (#4087)
Bumps [tinyvec](https://github.com/Lokathor/tinyvec) from 1.9.0 to 1.10.0.
- [Changelog](https://github.com/Lokathor/tinyvec/blob/main/CHANGELOG.md)
- [Commits](https://github.com/Lokathor/tinyvec/compare/v1.9.0...v1.10.0)

---
updated-dependencies:
- dependency-name: tinyvec
  dependency-version: 1.10.0
  dependency-type: indirect
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:31 -04:00
dependabot[bot] 03374b9543
build(deps): bump hyper from 1.6.0 to 1.7.0 (#4089)
Bumps [hyper](https://github.com/hyperium/hyper) from 1.6.0 to 1.7.0.
- [Release notes](https://github.com/hyperium/hyper/releases)
- [Changelog](https://github.com/hyperium/hyper/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper/compare/v1.6.0...v1.7.0)

---
updated-dependencies:
- dependency-name: hyper
  dependency-version: 1.7.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:03 -04:00
Scott Fleener 4c9ae74450 chore(metrics): Use `linkerd-rustls` for crypto provider metrics
Now that the `rustls` initialization/configuration has been decoupled from `linkerd-meshtls`, we can get the provider directly from there. This handles the uninitialized case better, which should be less of a problem now that we always directly initialize the provider in main.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-19 14:06:32 -04:00
katelyn martin 94572d174d
refactor(http/classify): remove unused classification middleware (#4085)
`NewBroadcastClassification<C, X, N>` is not used.

`BroadcastClassification<C, S>` is only used by the `gate` submodule in
this crate.

this commit removes `NewBroadcastClassification`, since it is unused.
this commit demotes `channel` to an internal submodule, since it has no
external users.

the reëxport of `BroadcastClassification` is unused, though it is left
intact because it _is_ exposed by `NewClassifyGateSet`'s implementation
of `NewService<T>`.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-19 09:33:29 -04:00
katelyn martin 897c7e85bc
refactor(app/core): remove unused `gate` reëxport (#4084)
`linkerd_app_core::classify` reëxports symbols from
`linkerd_proxy_http::classify::gate`.

nothing makes use of this, and these symbols are already reëxported from
`linkerd_proxy_http::classify`. existing callsites in the outbound proxy
import this middleware directly, or though the reëxport in
`linkerd_proxy_http`.

this commit removes this `pub use` directive, since it does nothing.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-18 20:58:18 +00:00
Scott Fleener 036ca75c00
chore(tls): Install default rustls provider in main (#4083)
* chore(tls): Move rustls into dedicated crate

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Remove extraneous provider installs from tests

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Install default rustls provider in main

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 19:55:09 +00:00
Scott Fleener 98e731d841
chore(tls): Move `rustls` configuration into dedicated crate (#4082)
* chore(tls): Move rustls into dedicated crate

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Remove extraneous provider installs from tests

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 12:48:28 -07:00
Scott Fleener d5b12ea5e2 chore(tls): Hoist rustls directly into linkerd-meshtls
Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 15:37:05 -04:00
Scott Fleener a64170bd61
chore(tls): Refactor `rustls` shims out of `linkerd-meshtls` (#4080)
* chore(tls): Refactor shims out of meshtls

Meshtls previously assumed that mutliple TLS implementations could be used. Now that we've consolidated on rustls as the TLS implementation, we can remove these shims.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Refactor mode out of meshtls

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 12:27:09 -07:00
katelyn martin 973dfa6f4d
refactor(app/inbound): remove unused `proxy_metrics()` method (#4079)
this commit removes `linkerd_app_inbound::Inbound::proxy_metrics()`.

this accessor is not used anywhere.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-18 14:36:31 -04:00
Scott Fleener 17bff6144a
feat(tls): Explicitly include post-quantum key exchange algorithms (#4070)
* feat(tls): Explicitly include post-quantum key exchange algorithms

This explicitly sets the key exchange algorithms the proxy uses. It adds `X25519MLKEM768` as the most preferred algorithm in non-FIPS mode, and `SECP256R1MLKEM768` in FIPS mode.

Note that `X25519MLKEM768` is still probably appropriate for FIPS environments according to [NIST's special publication 800-56Cr2](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Cr2.pdf) as it performs a FIPS-approved key-establishment first (`MLKEM768`), but we should evaluate this position more before committing to it.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 14:09:34 -04:00
katelyn martin d385094caa
nit(app/inbound): fix `InboundMetrics` doc comment (#4078)
this comment changes this comment in two ways:

1. fix a copy-paste typo. this should say "inbound", not "outbound".
2. add note that this is a "legacy" structure.

the equivalent structure in the outbound proxy was labeled as such
in https://github.com/linkerd/linkerd2-proxy/pull/2887.

see:

```rust
 /// Holds LEGACY outbound proxy metrics.
 #[derive(Clone, Debug)]
 pub struct OutboundMetrics {
     pub(crate) http_errors: error::Http,
     pub(crate) tcp_errors: error::Tcp,

     // pub(crate) http_route_backends: RouteBackendMetrics,
     // pub(crate) grpc_route_backends: RouteBackendMetrics,
     /// Holds metrics that are common to both inbound and outbound proxies. These metrics are
     /// reported separately
     pub(crate) proxy: Proxy,

     pub(crate) prom: PromMetrics,
 }
```

\- <dce6b61191/linkerd/app/outbound/src/metrics.rs (L22-L35)>

`authz::HttpAuthzMetrics`, `error::HttpErrorMetrics`,
`authz::TcpAuthzMetrics`, and `error::TcpErrorMetrics` all make use of
the "legacy" metrics implementation defined in `linkerd_metrics`.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-18 14:01:37 -04:00
dependabot[bot] dce6b61191
build(deps): bump syn from 2.0.105 to 2.0.106 (#4077)
Bumps [syn](https://github.com/dtolnay/syn) from 2.0.105 to 2.0.106.
- [Release notes](https://github.com/dtolnay/syn/releases)
- [Commits](https://github.com/dtolnay/syn/compare/2.0.105...2.0.106)

---
updated-dependencies:
- dependency-name: syn
  dependency-version: 2.0.106
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 12:12:30 -04:00
dependabot[bot] 28ebc47a6b
build(deps): bump cc from 1.2.32 to 1.2.33 (#4076)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.32 to 1.2.33.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.32...cc-v1.2.33)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.33
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 11:38:09 -04:00
dependabot[bot] 4bae7e98f2
build(deps): bump proc-macro2 from 1.0.97 to 1.0.101 (#4075)
Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.97 to 1.0.101.
- [Release notes](https://github.com/dtolnay/proc-macro2/releases)
- [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.97...1.0.101)

---
updated-dependencies:
- dependency-name: proc-macro2
  dependency-version: 1.0.101
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 11:37:49 -04:00
dependabot[bot] b89c4902c6
build(deps): bump thiserror from 2.0.14 to 2.0.15 (#4074)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.14 to 2.0.15.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.14...2.0.15)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-version: 2.0.15
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 11:26:07 -04:00
dependabot[bot] 8a80f1ce95
build(deps): bump async-trait from 0.1.88 to 0.1.89 (#4073)
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.88 to 0.1.89.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.88...0.1.89)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-version: 0.1.89
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 10:47:36 -04:00
dependabot[bot] edc35d6e18
build(deps): bump derive_arbitrary from 1.4.1 to 1.4.2 (#4072)
Bumps [derive_arbitrary](https://github.com/rust-fuzz/arbitrary) from 1.4.1 to 1.4.2.
- [Changelog](https://github.com/rust-fuzz/arbitrary/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-fuzz/arbitrary/compare/v1.4.1...v1.4.2)

---
updated-dependencies:
- dependency-name: derive_arbitrary
  dependency-version: 1.4.2
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 10:44:51 -04:00
dependabot[bot] 99f322a9a0
build(deps): bump arbitrary from 1.4.1 to 1.4.2 (#4071)
Bumps [arbitrary](https://github.com/rust-fuzz/arbitrary) from 1.4.1 to 1.4.2.
- [Changelog](https://github.com/rust-fuzz/arbitrary/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-fuzz/arbitrary/compare/v1.4.1...v1.4.2)

---
updated-dependencies:
- dependency-name: arbitrary
  dependency-version: 1.4.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 07:26:46 -07:00
Scott Fleener 627a5aad21
feat(tls): Remove boring as a TLS implementation (#4038)
* chore(tls): Remove ring as crypto backend

The broader ecosystem has mostly moved to aws-lc-rs as the primary rustls backend, and we should follow suit. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

There will need to be some refactoring to clean up the rustls provider interfaces, but that will come in follow-ups.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* feat(tls): Remove boring as a TLS implementation

BoringSSL, as we use it today, doesn't integrate well with the broader rustls ecosystem, so this removes it. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

There will need to be some refactoring to clean up the rustls provider interfaces, but that will come in follow-ups.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Restore existing aws-lc feature names for compatibility

Signed-off-by: Scott Fleener <scott@buoyant.io>

* fix(tls): Use correct feature name for fips conditionals

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-14 23:27:58 -04:00
Scott Fleener 356f80b786
chore(tls): Improve `aws-lc` usage (#4069)
This adds a few small improvements to how we handle the `aws-lc` usage in the proxy:

- Pull provider customization to the `aws-lc` backend, reducing the amount that the module exposes
- Validate that the provider is actually FIPS compatible when fips is enabled
- Use the same signature verification algorithms in the `rustls` provider as we do in the cert verifier. Previously, the provider also included RSA_PSS_2048_8192_SHA256, which is marked as legacy and we don't have a strong reason to support.
- Add change detector tests for the cipher suites, key exchange groups, and signature algorithms. These should ideally never change unless `rustls` changes, at which point we can re-evaluate which algorithms are in use by the proxy.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-14 18:56:35 +00:00
dependabot[bot] 0b3bc61263
build(deps): bump hashbrown from 0.15.2 to 0.15.5 (#4068)
Bumps [hashbrown](https://github.com/rust-lang/hashbrown) from 0.15.2 to 0.15.5.
- [Release notes](https://github.com/rust-lang/hashbrown/releases)
- [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/hashbrown/commits/v0.15.5)

---
updated-dependencies:
- dependency-name: hashbrown
  dependency-version: 0.15.5
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-14 11:30:16 -04:00
Scott Fleener 2b0e723027
chore(deps): Update tonic deps to 0.13 (#4066)
* chore(deps): Update tonic deps to 0.13

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(deps): Update linkerd2-proxy-api to 0.17.0

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-13 10:45:42 -07:00
Scott Fleener 2156c3d5e3
chore(tls): Remove `ring` as `rustls` crypto backend (#4029)
* chore(tls): Remove ring as crypto backend

The broader ecosystem has mostly moved to aws-lc-rs as the primary rustls backend, and we should follow suit. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

There will need to be some refactoring to clean up the rustls provider interfaces, but that will come in follow-ups.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Restore existing aws-lc feature names for compatibility

Signed-off-by: Scott Fleener <scott@buoyant.io>

* fix(tls): Use correct feature name for fips conditionals

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-13 13:13:57 -04:00
dependabot[bot] c4cae21e11
build(deps): bump syn from 2.0.104 to 2.0.105 (#4067)
Bumps [syn](https://github.com/dtolnay/syn) from 2.0.104 to 2.0.105.
- [Release notes](https://github.com/dtolnay/syn/releases)
- [Commits](https://github.com/dtolnay/syn/compare/2.0.104...2.0.105)

---
updated-dependencies:
- dependency-name: syn
  dependency-version: 2.0.105
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-13 10:09:16 -07:00
Scott Fleener 89c88caf5c
chore(deps): Manually remove unused dependencies (#4065)
Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-13 10:44:43 -04:00
dependabot[bot] bb612d3aac
build(deps): bump the symbolic group with 2 updates (#4063)
Bumps the symbolic group with 2 updates: [symbolic-common](https://github.com/getsentry/symbolic) and [symbolic-demangle](https://github.com/getsentry/symbolic).


Updates `symbolic-common` from 12.16.1 to 12.16.2
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.1...12.16.2)

Updates `symbolic-demangle` from 12.16.1 to 12.16.2
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.1...12.16.2)

---
updated-dependencies:
- dependency-name: symbolic-common
  dependency-version: 12.16.2
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
- dependency-name: symbolic-demangle
  dependency-version: 12.16.2
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 17:18:00 -07:00
dependabot[bot] 7030cc51ed
build(deps): bump anyhow from 1.0.98 to 1.0.99 (#4062)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.98 to 1.0.99.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.98...1.0.99)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-version: 1.0.99
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:39:04 +00:00
dependabot[bot] af520dfd12
build(deps): bump proc-macro2 from 1.0.96 to 1.0.97 (#4061)
Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.96 to 1.0.97.
- [Release notes](https://github.com/dtolnay/proc-macro2/releases)
- [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.96...1.0.97)

---
updated-dependencies:
- dependency-name: proc-macro2
  dependency-version: 1.0.97
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 12:33:58 -04:00
dependabot[bot] ccf91dfb3e
build(deps): bump actions/checkout from 4.3.0 to 5.0.0 (#4060)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.3.0 to 5.0.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](08eba0b27e...08c6903cd8)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 16:18:00 +00:00
dependabot[bot] 69cd164da1
build(deps): bump thiserror from 2.0.12 to 2.0.14 (#4059)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.12 to 2.0.14.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.12...2.0.14)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-version: 2.0.14
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:56:38 +00:00
dependabot[bot] feb5f87713
build(deps): bump libc from 0.2.174 to 0.2.175 (#4057)
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.174 to 0.2.175.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.175/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.174...0.2.175)

---
updated-dependencies:
- dependency-name: libc
  dependency-version: 0.2.175
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 12:14:04 -04:00
dependabot[bot] fdd7f218a3
build(deps): bump actions/checkout from 4.2.2 to 4.3.0 (#4053)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 4.3.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](11bd71901b...08eba0b27e)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 4.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 11:06:10 -04:00
dependabot[bot] b4e2b7e24f
build(deps): bump glob from 0.3.2 to 0.3.3 (#4055)
Bumps [glob](https://github.com/rust-lang/glob) from 0.3.2 to 0.3.3.
- [Release notes](https://github.com/rust-lang/glob/releases)
- [Changelog](https://github.com/rust-lang/glob/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/glob/compare/v0.3.2...v0.3.3)

---
updated-dependencies:
- dependency-name: glob
  dependency-version: 0.3.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 11:05:55 -04:00
dependabot[bot] 1b07f277d7
build(deps): bump rustversion from 1.0.21 to 1.0.22 (#4056)
Bumps [rustversion](https://github.com/dtolnay/rustversion) from 1.0.21 to 1.0.22.
- [Release notes](https://github.com/dtolnay/rustversion/releases)
- [Commits](https://github.com/dtolnay/rustversion/compare/1.0.21...1.0.22)

---
updated-dependencies:
- dependency-name: rustversion
  dependency-version: 1.0.22
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 11:05:34 -04:00
dependabot[bot] 25cf0c7f11
build(deps): bump proc-macro2 from 1.0.95 to 1.0.96 (#4054)
Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.95 to 1.0.96.
- [Release notes](https://github.com/dtolnay/proc-macro2/releases)
- [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.95...1.0.96)

---
updated-dependencies:
- dependency-name: proc-macro2
  dependency-version: 1.0.96
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 14:52:05 +00:00
dependabot[bot] d46e7c0c82
build(deps): bump cc from 1.2.31 to 1.2.32 (#4051)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.31 to 1.2.32.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.31...cc-v1.2.32)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.32
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-08 13:28:21 -04:00
dependabot[bot] 3305a890b0
build(deps): bump slab from 0.4.10 to 0.4.11 (#4052)
Bumps [slab](https://github.com/tokio-rs/slab) from 0.4.10 to 0.4.11.
- [Release notes](https://github.com/tokio-rs/slab/releases)
- [Changelog](https://github.com/tokio-rs/slab/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/slab/compare/v0.4.10...v0.4.11)

---
updated-dependencies:
- dependency-name: slab
  dependency-version: 0.4.11
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-08 13:28:01 -04:00
katelyn martin d850fa6f73
refactor(metrics): introduce a `legacy` namespace (#4050)
`linkerd-metrics` contains a suite of facilities for defining,
registering, and serving Prometheus metrics. these predate the
[`prometheus-client`](https://crates.io/crates/prometheus-client/)
crate, which should now be used for our metrics.

`linkerd-metrics` defines a `prom` namespace, which reëxports symbols
from the `prometheus-client` library. as the documentation comment for
this submodule notes, this should be used for all new metrics.

6b323d8457/linkerd/metrics/src/lib.rs (L30-L60)

`linkerd-metrics` still provides its legacy types in the public surface
of this library today, which can make it difficult to differentiate
between our two metrics implementations.

this branch introduces a new `legacy` namespace, to help clarify the
distinction between these two Prometheus implementations, and to smooth
the road to further adoption of `prometheus-client` interfaces across
the proxy.

---

* refactor(metrics): introduce empty `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Counter` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Gauge` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Histogram` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Metric` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `FmtMetric` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `FmtMetrics` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `FmtLabels` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `LastUpdate` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Store` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `SharedStore` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Serve` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `NewMetrics` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Factor` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-07 15:33:17 -04:00
dependabot[bot] 6b323d8457
build(deps): bump governor from 0.10.0 to 0.10.1 (#4049)
Bumps [governor](https://github.com/boinkor-net/governor) from 0.10.0 to 0.10.1.
- [Release notes](https://github.com/boinkor-net/governor/releases)
- [Changelog](https://github.com/boinkor-net/governor/blob/master/release.toml)
- [Commits](https://github.com/boinkor-net/governor/compare/v0.10.0...v0.10.1)

---
updated-dependencies:
- dependency-name: governor
  dependency-version: 0.10.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-07 10:37:18 -04:00
dependabot[bot] 7f58cd56ed
build(deps): bump tokio-metrics from 0.4.3 to 0.4.4 (#4048)
Bumps [tokio-metrics](https://github.com/tokio-rs/tokio-metrics) from 0.4.3 to 0.4.4.
- [Release notes](https://github.com/tokio-rs/tokio-metrics/releases)
- [Changelog](https://github.com/tokio-rs/tokio-metrics/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/tokio-metrics/commits)

---
updated-dependencies:
- dependency-name: tokio-metrics
  dependency-version: 0.4.4
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-07 10:37:00 -04:00
Scott Fleener 43e3f630ec feat(tls): Include rustls crypto provider metrics
This includes a small set of metrics about the currently installed rustls crypto provider and the algorithms it is configured to use.

We don't have 100% assurance that a default crypto provider has been installed before registering the metric, but in local testing it never appeared to be a problem. When we refactor the rustls initialization we can add an extra guarantee that we've initialized it by this point.

Example metric:
```
# HELP rustls_info Proxy TLS info.
# TYPE rustls_info gauge
rustls_info{tls_suites="TLS13_AES_128_GCM_SHA256,TLS13_AES_256_GCM_SHA384,TLS13_CHACHA20_POLY1305_SHA256,",tls_kx_groups="X25519,secp256r1,secp384r1,X25519MLKEM768,",tls_rand="AwsLcRs",tls_key_provider="AwsLcRs",tls_fips="false"} 1
```

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-07 10:01:07 -04:00
Oliver Gould 7758436831
chore(ci): exercise all release platforms on workflow change (#4047)
We only build linux/amd64 during typical release CI runs. This means that the platform-
specific builds are not exercised. This change updates the release workflow so that
all platforms are built whenever the workflow itself is changed.
2025-08-06 14:29:05 +00:00
dependabot[bot] c0f921af33
build(deps): bump zerovec from 0.11.3 to 0.11.4 (#4046)
Bumps [zerovec](https://github.com/unicode-org/icu4x) from 0.11.3 to 0.11.4.
- [Release notes](https://github.com/unicode-org/icu4x/releases)
- [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md)
- [Commits](https://github.com/unicode-org/icu4x/compare/ind/zerovec@0.11.3...ind/zerovec@0.11.4)

---
updated-dependencies:
- dependency-name: zerovec
  dependency-version: 0.11.4
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-06 09:48:31 -04:00
Scott Fleener b558ce5320
chore(meshtls-rustls): use aws-lc as the default crypto backend (#4043)
The broader ecosystem has mostly moved to `aws-lc-rs` as the primary `rustls` backend, and we should follow suit. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

This requires some extra configuration for successful cross-compilation, ideally we can remove this extra configuration once linkerd/dev v48 is available.

This doesn't remove `ring` as a crypto backend, that can come in a follow-up at https://github.com/linkerd/linkerd2-proxy/pull/4029
2025-08-05 13:22:26 -07:00
dependabot[bot] 894d3506df
build(deps): bump socket2 from 0.5.10 to 0.6.0 (#4003)
* build(deps): bump socket2 from 0.5.10 to 0.6.0

Bumps [socket2](https://github.com/rust-lang/socket2) from 0.5.10 to 0.6.0.
- [Release notes](https://github.com/rust-lang/socket2/releases)
- [Changelog](https://github.com/rust-lang/socket2/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/socket2/commits)

---
updated-dependencies:
- dependency-name: socket2
  dependency-version: 0.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore(proxy/transport): use `original_dst_v*` methods

these have been renamed.

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: katelyn martin <kate@buoyant.io>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: katelyn martin <kate@buoyant.io>
2025-08-05 13:09:56 -07:00
dependabot[bot] 01e7ec0820
build(deps): bump zerovec from 0.11.2 to 0.11.3 (#4044)
Bumps [zerovec](https://github.com/unicode-org/icu4x) from 0.11.2 to 0.11.3.
- [Release notes](https://github.com/unicode-org/icu4x/releases)
- [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md)
- [Commits](https://github.com/unicode-org/icu4x/commits/ind/zerovec@0.11.3)

---
updated-dependencies:
- dependency-name: zerovec
  dependency-version: 0.11.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 13:00:54 -07:00
dependabot[bot] 4f563fab68
build(deps): bump tokio-metrics from 0.4.2 to 0.4.3 (#3995)
Bumps [tokio-metrics](https://github.com/tokio-rs/tokio-metrics) from 0.4.2 to 0.4.3.
- [Release notes](https://github.com/tokio-rs/tokio-metrics/releases)
- [Changelog](https://github.com/tokio-rs/tokio-metrics/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/tokio-metrics/compare/v0.4.2...v0.4.3)

---
updated-dependencies:
- dependency-name: tokio-metrics
  dependency-version: 0.4.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 07:50:44 -07:00
dependabot[bot] 168c4bff7d
build(deps): bump tokio from 1.45.0 to 1.47.1 (#4040)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.45.0 to 1.47.1.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.45.0...tokio-1.47.1)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.47.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 09:44:53 -04:00
dependabot[bot] 0df8cdbedb
build(deps): bump cc from 1.2.30 to 1.2.31 (#4042)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.30 to 1.2.31.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.30...cc-v1.2.31)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.31
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 14:59:55 -07:00
dependabot[bot] 1b837b7f91
build(deps): bump tokio-util from 0.7.15 to 0.7.16 (#4041)
Bumps [tokio-util](https://github.com/tokio-rs/tokio) from 0.7.15 to 0.7.16.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-util-0.7.15...tokio-util-0.7.16)

---
updated-dependencies:
- dependency-name: tokio-util
  dependency-version: 0.7.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 14:59:00 -07:00
katelyn martin 40078c96ca
chore(deps): update from tokio 1.45 to 1.47 (#4032)
* build(deps): bump tokio from 1.45.0 to 1.47.0

Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.45.0 to 1.47.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.45.0...tokio-1.47.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.47.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

---

chore(deny): ignore socket2@v0.5

there is now a v0.6 used by the latest tokio.

while we wait for this new version to propagate through the ecosystem,
allow for two socket2 dependencies.

Signed-off-by: katelyn martin <kate@buoyant.io>

* chore(app/integration): remove inbound_io_err test

> @cratelyn I think it would be appropriate to remove these tests, given
> that they can no longer behave properly. I don't think that this test
> case is particularly meaningful or load bearing, it's best just to
> unblock the dependency updates.

\- <https://github.com/BuoyantIO/enterprise-linkerd/issues/1645#issuecomment-3046905516>

Co-authored-by: Oliver Gould <ver@buoyant.io>
Signed-off-by: katelyn martin <kate@buoyant.io>

* chore(app/integration): remove inbound_multi test

this test exercises the same thing that the previous two tests do, as
the comment at the top of it points out.

this test is redundant, and we have removed the i/o error coverage that
this was redunant with. let's remove it.

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
Co-authored-by: Oliver Gould <ver@buoyant.io>
2025-08-04 09:04:02 -07:00
dependabot[bot] 9eaf1425a7
build(deps): bump signal-hook-registry from 1.4.5 to 1.4.6 (#4039)
Bumps [signal-hook-registry](https://github.com/vorner/signal-hook) from 1.4.5 to 1.4.6.
- [Changelog](https://github.com/vorner/signal-hook/blob/master/CHANGELOG.md)
- [Commits](https://github.com/vorner/signal-hook/compare/registry-v1.4.5...registry-v1.4.6)

---
updated-dependencies:
- dependency-name: signal-hook-registry
  dependency-version: 1.4.6
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 08:08:41 -07:00
dependabot[bot] 842452368c
build(deps): bump rustc-demangle from 0.1.25 to 0.1.26 (#4026)
Bumps [rustc-demangle](https://github.com/rust-lang/rustc-demangle) from 0.1.25 to 0.1.26.
- [Release notes](https://github.com/rust-lang/rustc-demangle/releases)
- [Changelog](https://github.com/rust-lang/rustc-demangle/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/rustc-demangle/commits/rustc-demangle-v0.1.26)

---
updated-dependencies:
- dependency-name: rustc-demangle
  dependency-version: 0.1.26
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 11:07:17 -04:00
dependabot[bot] 09333dc2b2
build(deps): bump the symbolic group with 2 updates (#4035)
Bumps the symbolic group with 2 updates: [symbolic-common](https://github.com/getsentry/symbolic) and [symbolic-demangle](https://github.com/getsentry/symbolic).


Updates `symbolic-common` from 12.16.0 to 12.16.1
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.0...12.16.1)

Updates `symbolic-demangle` from 12.16.0 to 12.16.1
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.0...12.16.1)

---
updated-dependencies:
- dependency-name: symbolic-common
  dependency-version: 12.16.1
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
- dependency-name: symbolic-demangle
  dependency-version: 12.16.1
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Oliver Gould <ver@buoyant.io>
2025-08-01 18:28:55 +00:00
dependabot[bot] ddc847ccc4
build(deps): bump serde_json from 1.0.141 to 1.0.142 (#4036)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.141 to 1.0.142.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.141...v1.0.142)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-version: 1.0.142
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-01 11:23:49 -07:00
Scott Fleener 8d56746c1f
feat!(ci): Remove arm/v7 support (#4037)
This architecture has become too significant of a maintenance burden, and isn't used often enough to justify the associated maintenance cost.

This removes arm/v7 from all the build infrastructure/dockerfiles/etc. Note that arm64 targets are still widely used and well supported.

Related: https://github.com/linkerd/linkerd2/pull/14308

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-01 13:26:12 -04:00
dependabot[bot] db0ed46978
build(deps): bump rustls from 0.23.29 to 0.23.31 in the rustls group (#4034)
Bumps the rustls group with 1 update: [rustls](https://github.com/rustls/rustls).


Updates `rustls` from 0.23.29 to 0.23.31
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.23.29...v/0.23.31)

---
updated-dependencies:
- dependency-name: rustls
  dependency-version: 0.23.31
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: rustls
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-31 05:53:48 -07:00
katelyn martin 1d94082d4b
chore(deps): downgrade deranged from 0.4.1 to 0.4.0 (#4031)
- https://crates.io/crates/deranged/versions
- https://crates.io/crates/deranged/0.4.1

this version has been yanked. this commit addresses this cargo deny
warning:

```
warning[yanked]: detected yanked crate (try `cargo update -p deranged`)
   ┌─ /home/katie/linkerd/linkerd2-proxy/Cargo.lock:41:1
   │
41 │ deranged 0.4.1 registry+https://github.com/rust-lang/crates.io-index
   │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ yanked version
   │
```

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-29 18:14:11 +00:00
dependabot[bot] 744e29e0bd
build(deps): bump rangemap from 1.5.1 to 1.6.0 (#4028)
Bumps [rangemap](https://github.com/jeffparsons/rangemap) from 1.5.1 to 1.6.0.
- [Changelog](https://github.com/jeffparsons/rangemap/blob/main/CHANGELOG.md)
- [Commits](https://github.com/jeffparsons/rangemap/commits)

---
updated-dependencies:
- dependency-name: rangemap
  dependency-version: 1.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-29 10:59:35 -04:00
katelyn martin 83373d6b89
chore(deps): bump h2 from 0.4.8 to 0.4.11 (#4024)
- https://github.com/hyperium/h2/compare/v0.4.8...v0.4.11

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-28 21:23:25 +00:00
Scott Fleener 1dcb7a7d1a
fix(rustls): Make `ring` and `aws-lc-rs` exclusive features (#4009)
Currently, disabling the `ring` feature does not actually disable the dependency across the tree. Doing so requires a couple of tightly coupled steps:

- Making `ring` and `aws-lc` exclusive features, raising a compile error if they are both enabled.
- Removing a direct dependency on some `ring` types, and instead going through `rustls` for equivalent functionality.
- Removing a direct dependency on the `ring` crypto provider for integration tests, and instead using the provider from `linkerd-meshtls`.
- Installing the default crypto provider globally for the process and re-using it when requested, mostly to make the tests pass.

This was tested using a temporary `cargo deny` config that forbid `ring` when `aws-lc-rs` was used, and vice-versa. Note that it doesn't completely remove ring for dev dependencies, but that can be done as a follow-up.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-07-28 15:28:47 -04:00
Scott Fleener 68a6b6d1e8
fix(tls): Update preferred cipher suite order (#4015)
This makes two changes to the preferred cipher suite order.
- Prefer AES algorithms over ChaCha20. AES is significantly faster when AES hardware is present, and AES hardware is on all x86 CPUs since ~2010, and all ARM server CPUs for a similar amount of time. For these reasons it's reasonable to default to AES for modern deployments, and it's the same default that `aws-lc-rs` makes anyway.
- Remove ChaCha20 when FIPS is enabled. It's no longer a supported algorithm, so we shouldn't have it as an option.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-07-28 15:05:05 -04:00
Oliver Gould cdbb55fd53
build(linkerd2-proxy): make release artifacts auditable (#4023)
Auditing tools like Syft cannot inspect proxy dependencies, which makes it difficult to inspect the state of a binary. This change updates the release process to use cargo-auditable, which documents the proxy's crate dependencies in its release binary.
2025-07-28 17:00:06 +00:00
Oliver Gould 5997453393
build(deps): update linkerd/dev to v47 (#4022) 2025-07-28 11:49:06 -05:00
Joe 10643b9525
updating metrics descriptions (#4020)
Signed-off-by: Joe F <joe@buoyant.io>
2025-07-25 20:08:56 +00:00
dependabot[bot] fd0ea24b87
build(deps): bump serde_json from 1.0.140 to 1.0.141 (#4021)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.140 to 1.0.141.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.140...v1.0.141)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-version: 1.0.141
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-25 16:07:16 -04:00
dependabot[bot] 3a159be91a
build(deps): bump prettyplease from 0.2.35 to 0.2.36 (#4018)
Bumps [prettyplease](https://github.com/dtolnay/prettyplease) from 0.2.35 to 0.2.36.
- [Release notes](https://github.com/dtolnay/prettyplease/releases)
- [Commits](https://github.com/dtolnay/prettyplease/compare/0.2.35...0.2.36)

---
updated-dependencies:
- dependency-name: prettyplease
  dependency-version: 0.2.36
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-24 10:03:57 -04:00
dependabot[bot] b3bc6fe8cd
build(deps): bump hyper-util from 0.1.15 to 0.1.16 (#4019)
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.15 to 0.1.16.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.15...v0.1.16)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-24 10:02:38 -04:00
dependabot[bot] 5a00b70d11
build(deps): bump aws-lc-rs from 1.13.2 to 1.13.3 (#4016)
Bumps [aws-lc-rs](https://github.com/aws/aws-lc-rs) from 1.13.2 to 1.13.3.
- [Release notes](https://github.com/aws/aws-lc-rs/releases)
- [Commits](https://github.com/aws/aws-lc-rs/compare/v1.13.2...v1.13.3)

---
updated-dependencies:
- dependency-name: aws-lc-rs
  dependency-version: 1.13.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-23 07:02:52 -07:00
dependabot[bot] b91dd3e7af
build(deps): bump rcgen from 0.14.2 to 0.14.3 (#4017)
Bumps [rcgen](https://github.com/rustls/rcgen) from 0.14.2 to 0.14.3.
- [Release notes](https://github.com/rustls/rcgen/releases)
- [Commits](https://github.com/rustls/rcgen/compare/v0.14.2...v0.14.3)

---
updated-dependencies:
- dependency-name: rcgen
  dependency-version: 0.14.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-23 07:02:25 -07:00
dependabot[bot] a699b1cf58
build(deps): bump cc from 1.2.29 to 1.2.30 (#4014)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.29 to 1.2.30.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.29...cc-v1.2.30)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.30
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-22 09:35:32 -04:00
dependabot[bot] 9f3c45874e
build(deps): bump rand from 0.9.1 to 0.9.2 (#4013)
Bumps [rand](https://github.com/rust-random/rand) from 0.9.1 to 0.9.2.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/rand_core-0.9.1...rand_core-0.9.2)

---
updated-dependencies:
- dependency-name: rand
  dependency-version: 0.9.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-22 09:35:19 -04:00
Oliver Gould 1de179e178
chore(deps): update jemallocator 0.5 to tikv-jemallocator 0.6 (#4012)
tikv-jemallocator supersedes jemallocator. To enable jemalloc profiling, this change updates the dependency and adds a `jemalloc-profiling` feature so that profiling can be enabled at build time.
2025-07-18 17:42:41 -04:00
dependabot[bot] f9d7e08242
build(deps): bump the rustls group with 2 updates (#4010)
---
updated-dependencies:
- dependency-name: rustls-webpki
  dependency-version: 0.103.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: rustls
- dependency-name: rustls
  dependency-version: 0.23.29
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: rustls
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-18 07:48:31 -07:00
dependabot[bot] bd454b4be8
build(deps): bump the symbolic group with 2 updates (#4011)
---
updated-dependencies:
- dependency-name: symbolic-common
  dependency-version: 12.16.0
  dependency-type: indirect
  update-type: version-update:semver-minor
  dependency-group: symbolic
- dependency-name: symbolic-demangle
  dependency-version: 12.16.0
  dependency-type: indirect
  update-type: version-update:semver-minor
  dependency-group: symbolic
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-18 07:48:06 -07:00
Oliver Gould b1f8fa5419
chore(ci): enable overriding the runner in workflows (#4008)
We use the ubuntu-24.04 runner by default, but in forks this may not be appropriate. This change updates the runners to support overriding via the LINKERD2_PROXY_RUNNER variable.
2025-07-17 17:22:42 -07:00
dependabot[bot] e04947610a
build(deps): bump aws-lc-rs from 1.13.1 to 1.13.2 (#4006)
---
updated-dependencies:
- dependency-name: aws-lc-rs
  dependency-version: 1.13.2
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-17 11:05:10 -07:00
153 changed files with 1281 additions and 3254 deletions

View File

@ -3,7 +3,7 @@
"build": {
"dockerfile": "Dockerfile",
"args": {
"DEV_VERSION": "v46",
"DEV_VERSION": "v47",
"http_proxy": "${localEnv:http_proxy}",
"https_proxy": "${localEnv:https_proxy}"
}

View File

@ -22,13 +22,13 @@ permissions:
jobs:
build:
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v46-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
timeout-minutes: 20
continue-on-error: true
steps:
- run: rustup toolchain install --profile=minimal beta
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: just toolchain=beta fetch
- run: just toolchain=beta build

View File

@ -21,9 +21,9 @@ env:
jobs:
meta:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- id: changed
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
@ -40,15 +40,15 @@ jobs:
codecov:
needs: meta
if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || needs.meta.outputs.any_changed == 'true'
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 30
container:
image: docker://ghcr.io/linkerd/dev:v46-rust
image: docker://ghcr.io/linkerd/dev:v47-rust
options: --security-opt seccomp=unconfined # 🤷
env:
CXX: "/usr/bin/clang++-19"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml

View File

@ -26,11 +26,11 @@ permissions:
jobs:
list-changed:
timeout-minutes: 3
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: docker://rust:1.88.0
steps:
- run: apt update && apt install -y jo
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
id: changed-files
@ -47,7 +47,7 @@ jobs:
build:
needs: [list-changed]
timeout-minutes: 40
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: docker://rust:1.88.0
strategy:
matrix:
@ -55,7 +55,7 @@ jobs:
steps:
- run: rustup toolchain add nightly
- run: cargo install cargo-fuzz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- working-directory: ${{matrix.dir}}
run: cargo +nightly fetch

View File

@ -12,9 +12,9 @@ on:
jobs:
markdownlint:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e
with:
globs: "**/*.md"

View File

@ -22,13 +22,13 @@ permissions:
jobs:
build:
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v46-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
timeout-minutes: 20
continue-on-error: true
steps:
- run: rustup toolchain install --profile=minimal nightly
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: just toolchain=nightly fetch
- run: just toolchain=nightly profile=release build

View File

@ -14,9 +14,9 @@ concurrency:
jobs:
meta:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- id: build
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
@ -57,7 +57,7 @@ jobs:
info:
timeout-minutes: 3
needs: meta
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- name: Info
run: |
@ -74,24 +74,24 @@ jobs:
actions:
needs: meta
if: needs.meta.outputs.actions_changed == 'true'
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: linkerd/dev/actions/setup-tools@v46
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: linkerd/dev/actions/setup-tools@v47
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: just action-lint
- run: just action-dev-check
rust:
needs: meta
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v46-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
permissions:
contents: read
timeout-minutes: 20
steps:
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: just fetch
- run: cargo deny --all-features check bans licenses sources
@ -107,14 +107,14 @@ jobs:
needs: meta
if: needs.meta.outputs.cargo_changed == 'true'
timeout-minutes: 20
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v46-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
strategy:
matrix:
crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }}
steps:
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: just fetch
- run: just check-crate ${{ matrix.crate }}
@ -123,11 +123,11 @@ jobs:
needs: meta
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
timeout-minutes: 20
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
env:
WAIT_TIMEOUT: 2m
steps:
- uses: linkerd/dev/actions/setup-tools@v46
- uses: linkerd/dev/actions/setup-tools@v47
- name: scurl https://run.linkerd.io/install-edge | sh
run: |
scurl https://run.linkerd.io/install-edge | sh
@ -136,7 +136,7 @@ jobs:
tag=$(linkerd version --client --short)
echo "linkerd $tag"
echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: just docker
- run: just k3d-create
- run: just k3d-load-linkerd
@ -149,7 +149,7 @@ jobs:
timeout-minutes: 3
needs: [meta, actions, rust, rust-crates, linkerd-install]
if: always()
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
permissions:
contents: write
@ -168,7 +168,7 @@ jobs:
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
run: exit 1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
- name: "Merge dependabot changes"
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'

View File

@ -13,7 +13,7 @@ concurrency:
jobs:
last-release:
if: github.repository == 'linkerd/linkerd2-proxy' # Don't run this in forks.
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
env:
GH_REPO: ${{ github.repository }}
@ -41,10 +41,10 @@ jobs:
last-commit:
needs: last-release
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- name: Check if the most recent commit is after the last release
id: recency
env:
@ -62,7 +62,7 @@ jobs:
trigger-release:
needs: [last-release, last-commit]
if: needs.last-release.outputs.recent == 'false' && needs.last-commit.outputs.after-release == 'true'
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
permissions:
actions: write

View File

@ -46,6 +46,7 @@ on:
default: true
env:
CARGO: "cargo auditable"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable"
@ -58,9 +59,25 @@ concurrency:
jobs:
meta:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- id: meta
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
if: github.event_name == 'pull_request'
- id: workflow
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
.github/workflows/release.yml
- id: build
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
justfile
Cargo.toml
- id: version
env:
VERSION: ${{ inputs.version }}
shell: bash
@ -68,47 +85,45 @@ jobs:
set -euo pipefail
shopt -s extglob
if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then
echo version="0.0.0-test.${GITHUB_SHA:0:7}"
echo archs='["amd64"]'
echo oses='["linux"]'
echo version="0.0.0-test.${GITHUB_SHA:0:7}" >> "$GITHUB_OUTPUT"
exit 0
fi >> "$GITHUB_OUTPUT"
fi
if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then
echo "Invalid version: $VERSION" >&2
exit 1
fi
( echo version="${VERSION#v}"
echo archs='["amd64", "arm64", "arm"]'
echo version="${VERSION#v}" >> "$GITHUB_OUTPUT"
- id: platform
shell: bash
env:
WORKFLOW_CHANGED: ${{ steps.workflow.outputs.any_changed }}
run: |
if [[ "$GITHUB_EVENT_NAME" == pull_request && "$WORKFLOW_CHANGED" != 'true' ]]; then
( echo archs='["amd64"]'
echo oses='["linux"]' ) >> "$GITHUB_OUTPUT"
exit 0
fi
( echo archs='["amd64", "arm64"]'
echo oses='["linux", "windows"]'
) >> "$GITHUB_OUTPUT"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
if: github.event_name == 'pull_request'
- id: changed
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
.github/workflows/release.yml
justfile
Cargo.toml
outputs:
archs: ${{ steps.meta.outputs.archs }}
oses: ${{ steps.meta.outputs.oses }}
version: ${{ steps.meta.outputs.version }}
package: ${{ github.event_name == 'workflow_dispatch' || steps.changed.outputs.any_changed == 'true' }}
archs: ${{ steps.platform.outputs.archs }}
oses: ${{ steps.platform.outputs.oses }}
version: ${{ steps.version.outputs.version }}
package: ${{ github.event_name == 'workflow_dispatch' || steps.build.outputs.any_changed == 'true' || steps.workflow.outputs.any_changed == 'true' }}
profile: ${{ inputs.profile || 'release' }}
publish: ${{ inputs.publish }}
ref: ${{ inputs.ref || github.sha }}
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.meta.outputs.version }}"
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.version.outputs.version }}"
prerelease: ${{ inputs.prerelease }}
draft: ${{ inputs.draft }}
latest: ${{ inputs.latest }}
info:
needs: meta
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 3
steps:
- name: Inputs
@ -134,15 +149,13 @@ jobs:
exclude:
- os: windows
arch: arm64
- os: windows
arch: arm
# If we're not actually building on a release tag, don't short-circuit on
# errors. This helps us know whether a failure is platform-specific.
continue-on-error: ${{ needs.meta.outputs.publish != 'true' }}
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 40
container: docker://ghcr.io/linkerd/dev:v46-rust-musl
container: docker://ghcr.io/linkerd/dev:v47-rust-musl
env:
LINKERD2_PROXY_VENDOR: ${{ github.repository_owner }}
LINKERD2_PROXY_VERSION: ${{ needs.meta.outputs.version }}
@ -150,15 +163,19 @@ jobs:
# TODO: add to dev image
- name: Install MiniGW
if: matrix.os == 'windows'
run: apt-get update && apt-get install mingw-w64 -y
run: apt-get update && apt-get install -y mingw-w64
- name: Install cross compilation toolchain
if: matrix.arch == 'arm64'
run: apt-get update && apt-get install -y binutils-aarch64-linux-gnu
- name: Configure git
run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
ref: ${{ needs.meta.outputs.ref }}
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
with:
key: ${{ matrix.arch }}
key: ${{ matrix.os }}-${{ matrix.arch }}
- run: just fetch
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build
@ -170,7 +187,7 @@ jobs:
publish:
needs: [meta, package]
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
permissions:
actions: write
@ -187,7 +204,7 @@ jobs:
git config --global user.name "$GITHUB_USERNAME"
git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com
# Tag the release.
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }}
ref: ${{ needs.meta.outputs.ref }}
@ -225,7 +242,7 @@ jobs:
needs: publish
if: always()
timeout-minutes: 3
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- name: Results
run: |

View File

@ -13,8 +13,8 @@ on:
jobs:
sh-lint:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: linkerd/dev/actions/setup-tools@v46
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: linkerd/dev/actions/setup-tools@v47
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: just sh-lint

View File

@ -13,10 +13,10 @@ permissions:
jobs:
devcontainer:
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v46-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: |
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
@ -35,10 +35,10 @@ jobs:
workflows:
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: linkerd/dev/actions/setup-tools@v46
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: linkerd/dev/actions/setup-tools@v47
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- shell: bash
run: |
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'

File diff suppressed because it is too large Load Diff

View File

@ -42,8 +42,6 @@ members = [
"linkerd/idle-cache",
"linkerd/io",
"linkerd/meshtls",
"linkerd/meshtls/boring",
"linkerd/meshtls/rustls",
"linkerd/meshtls/verifier",
"linkerd/metrics",
"linkerd/mock/http-body",
@ -71,6 +69,7 @@ members = [
"linkerd/reconnect",
"linkerd/retry",
"linkerd/router",
"linkerd/rustls",
"linkerd/service-profiles",
"linkerd/signal",
"linkerd/stack",
@ -115,11 +114,10 @@ prost = { version = "0.13" }
prost-build = { version = "0.13", default-features = false }
prost-types = { version = "0.13" }
tokio-rustls = { version = "0.26", default-features = false, features = [
"ring",
"logging",
] }
tonic = { version = "0.12", default-features = false }
tonic-build = { version = "0.12", default-features = false }
tonic = { version = "0.13", default-features = false }
tonic-build = { version = "0.13", default-features = false }
tower = { version = "0.5", default-features = false }
tower-service = { version = "0.3" }
tower-test = { version = "0.4" }
@ -136,4 +134,4 @@ default-features = false
features = ["tokio", "tracing"]
[workspace.dependencies.linkerd2-proxy-api]
version = "0.16.0"
version = "0.17.0"

View File

@ -3,7 +3,7 @@
# This is intended **DEVELOPMENT ONLY**, i.e. so that proxy developers can
# easily test the proxy in the context of the larger `linkerd2` project.
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v46-rust
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v47-rust
# Use an arbitrary ~recent edge release image to get the proxy
# identity-initializing and linkerd-await wrappers.
@ -14,11 +14,16 @@ FROM $LINKERD2_IMAGE as linkerd2
FROM --platform=$BUILDPLATFORM $RUST_IMAGE as fetch
ARG PROXY_FEATURES=""
ARG TARGETARCH="amd64"
RUN apt-get update && \
apt-get install -y time && \
if [[ "$PROXY_FEATURES" =~ .*meshtls-boring.* ]] ; then \
apt-get install -y golang ; \
fi && \
case "$TARGETARCH" in \
amd64) true ;; \
arm64) apt-get install --no-install-recommends -y binutils-aarch64-linux-gnu ;; \
esac && \
rm -rf /var/lib/apt/lists/*
ENV CARGO_NET_RETRY=10
@ -33,7 +38,6 @@ RUN --mount=type=cache,id=cargo,target=/usr/local/cargo/registry \
FROM fetch as build
ENV CARGO_INCREMENTAL=0
ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable"
ARG TARGETARCH="amd64"
ARG PROFILE="release"
ARG LINKERD2_PROXY_VERSION=""
ARG LINKERD2_PROXY_VENDOR=""

View File

@ -2,7 +2,6 @@
targets = [
{ triple = "x86_64-unknown-linux-gnu" },
{ triple = "aarch64-unknown-linux-gnu" },
{ triple = "armv7-unknown-linux-gnu" },
]
[advisories]
@ -24,11 +23,6 @@ allow = [
private = { ignore = true }
confidence-threshold = 0.8
exceptions = [
{ allow = [
"ISC",
"MIT",
"OpenSSL",
], name = "ring", version = "*" },
{ allow = [
"ISC",
"OpenSSL",
@ -39,14 +33,6 @@ exceptions = [
], name = "aws-lc-fips-sys", version = "*" },
]
[[licenses.clarify]]
name = "ring"
version = "*"
expression = "MIT AND ISC AND OpenSSL"
license-files = [
{ path = "LICENSE", hash = 0xbd0eed23 },
]
[bans]
multiple-versions = "deny"
# Wildcard dependencies are used for all workspace-local crates.
@ -56,6 +42,8 @@ deny = [
{ name = "rustls", wrappers = ["tokio-rustls"] },
# rustls-webpki should be used instead.
{ name = "webpki" },
# aws-lc-rs should be used instead.
{ name = "ring" }
]
skip = [
# `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64`
@ -76,6 +64,8 @@ skip-tree = [
{ name = "pprof" },
# aws-lc-rs uses a slightly outdated version of bindgen
{ name = "bindgen", version = "0.69.5" },
# socket v0.6 is still propagating through the ecosystem
{ name = "socket2", version = "0.5" },
]
[sources]

View File

@ -18,6 +18,10 @@ features := ""
export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`)
export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`)
# TODO: these variables will be included in dev v48
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
# The version name to use for packages.
package_version := "v" + LINKERD2_PROXY_VERSION
@ -26,7 +30,7 @@ docker-repo := "localhost/linkerd/proxy"
docker-tag := `git rev-parse --abbrev-ref HEAD | sed 's|/|.|g'` + "." + `git rev-parse --short HEAD`
docker-image := docker-repo + ":" + docker-tag
# The architecture name to use for packages. Either 'amd64', 'arm64', or 'arm'.
# The architecture name to use for packages. Either 'amd64' or 'arm64'.
arch := "amd64"
# The OS name to use for packages. Either 'linux' or 'windows'.
os := "linux"
@ -39,8 +43,6 @@ _target := if os + '-' + arch == "linux-amd64" {
"x86_64-unknown-linux-" + libc
} else if os + '-' + arch == "linux-arm64" {
"aarch64-unknown-linux-" + libc
} else if os + '-' + arch == "linux-arm" {
"armv7-unknown-linux-" + libc + "eabihf"
} else if os + '-' + arch == "windows-amd64" {
"x86_64-pc-windows-" + libc
} else {
@ -139,7 +141,7 @@ _strip:
_package_bin := _package_dir / "bin" / "linkerd2-proxy"
# XXX {aarch64,arm}-musl builds do not enable PIE, so we use target-specific
# XXX aarch64-musl builds do not enable PIE, so we use target-specific
# files to document those differences.
_expected_checksec := '.checksec' / arch + '-' + libc + '.json'

View File

@ -13,7 +13,7 @@
use futures::future::{self, TryFutureExt};
use http::StatusCode;
use linkerd_app_core::{
metrics::{self as metrics, FmtMetrics},
metrics::{self as metrics, legacy::FmtMetrics},
proxy::http::{Body, BoxBody, ClientHandle, Request, Response},
trace, Error, Result,
};
@ -32,7 +32,7 @@ pub use self::readiness::{Latch, Readiness};
#[derive(Clone)]
pub struct Admin<M> {
metrics: metrics::Serve<M>,
metrics: metrics::legacy::Serve<M>,
tracing: trace::Handle,
ready: Readiness,
shutdown_tx: mpsc::UnboundedSender<()>,
@ -52,7 +52,7 @@ impl<M> Admin<M> {
tracing: trace::Handle,
) -> Self {
Self {
metrics: metrics::Serve::new(metrics),
metrics: metrics::legacy::Serve::new(metrics),
ready,
shutdown_tx,
enable_shutdown,

View File

@ -2,7 +2,7 @@ use linkerd_app_core::{
classify,
config::ServerConfig,
drain, errors, identity,
metrics::{self, FmtMetrics},
metrics::{self, legacy::FmtMetrics},
proxy::http,
serve,
svc::{self, ExtractParam, InsertParam, Param},

View File

@ -13,31 +13,23 @@ independently of the inbound and outbound proxy logic.
"""
[dependencies]
bytes = { workspace = true }
drain = { workspace = true, features = ["retain"] }
http = { workspace = true }
http-body = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["http1", "http2"] }
hyper-util = { workspace = true }
futures = { version = "0.3", default-features = false }
ipnet = "2.11"
prometheus-client = { workspace = true }
regex = "1"
serde_json = "1"
thiserror = "2"
tokio = { version = "1", features = ["macros", "sync", "parking_lot"] }
tokio-stream = { version = "0.1", features = ["time"] }
tonic = { workspace = true, default-features = false, features = ["prost"] }
tracing = { workspace = true }
parking_lot = "0.12"
pin-project = "1"
linkerd-addr = { path = "../../addr" }
linkerd-conditional = { path = "../../conditional" }
linkerd-dns = { path = "../../dns" }
linkerd-duplex = { path = "../../duplex" }
linkerd-errno = { path = "../../errno" }
linkerd-error = { path = "../../error" }
linkerd-error-respond = { path = "../../error-respond" }
linkerd-exp-backoff = { path = "../../exp-backoff" }
@ -64,6 +56,7 @@ linkerd-proxy-tcp = { path = "../../proxy/tcp" }
linkerd-proxy-transport = { path = "../../proxy/transport" }
linkerd-reconnect = { path = "../../reconnect" }
linkerd-router = { path = "../../router" }
linkerd-rustls = { path = "../../rustls" }
linkerd-service-profiles = { path = "../../service-profiles" }
linkerd-stack = { path = "../../stack" }
linkerd-stack-metrics = { path = "../../stack/metrics" }
@ -83,5 +76,6 @@ features = ["make", "spawn-ready", "timeout", "util", "limit"]
semver = "1"
[dev-dependencies]
bytes = { workspace = true }
http-body-util = { workspace = true }
linkerd-mock-http-body = { path = "../../mock/http-body" }
quickcheck = { version = "1", default-features = false }

View File

@ -1,5 +1,4 @@
use crate::profiles;
pub use classify::gate;
use linkerd_error::Error;
use linkerd_proxy_client_policy as client_policy;
use linkerd_proxy_http::{classify, HasH2Reason, ResponseTimeoutError};

View File

@ -101,7 +101,7 @@ impl Config {
identity: identity::NewClient,
) -> svc::ArcNewService<
(),
svc::BoxCloneSyncService<http::Request<tonic::body::BoxBody>, http::Response<RspBody>>,
svc::BoxCloneSyncService<http::Request<tonic::body::Body>, http::Response<RspBody>>,
> {
let addr = self.addr;
tracing::trace!(%addr, "Building");

View File

@ -25,6 +25,7 @@ pub mod metrics;
pub mod proxy;
pub mod serve;
pub mod svc;
pub mod tls_info;
pub mod transport;
pub use self::build_info::{BuildInfo, BUILD_INFO};

View File

@ -166,7 +166,7 @@ where
// === impl Metrics ===
impl Metrics {
pub fn new(retain_idle: Duration) -> (Self, impl FmtMetrics + Clone + Send + 'static) {
pub fn new(retain_idle: Duration) -> (Self, impl legacy::FmtMetrics + Clone + Send + 'static) {
let (control, control_report) = {
let m = http_metrics::Requests::<ControlLabels, Class>::default();
let r = m.clone().into_report(retain_idle).with_prefix("control");
@ -223,6 +223,7 @@ impl Metrics {
opentelemetry,
};
use legacy::FmtMetrics as _;
let report = endpoint_report
.and_report(profile_route_report)
.and_report(retry_report)
@ -248,7 +249,7 @@ impl svc::Param<ControlLabels> for control::ControlAddr {
}
}
impl FmtLabels for ControlLabels {
impl legacy::FmtLabels for ControlLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { addr, server_id } = self;
@ -281,7 +282,7 @@ impl ProfileRouteLabels {
}
}
impl FmtLabels for ProfileRouteLabels {
impl legacy::FmtLabels for ProfileRouteLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
direction,
@ -314,7 +315,7 @@ impl From<OutboundEndpointLabels> for EndpointLabels {
}
}
impl FmtLabels for EndpointLabels {
impl legacy::FmtLabels for EndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Inbound(i) => (Direction::In, i).fmt_labels(f),
@ -323,7 +324,7 @@ impl FmtLabels for EndpointLabels {
}
}
impl FmtLabels for InboundEndpointLabels {
impl legacy::FmtLabels for InboundEndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
tls,
@ -343,7 +344,7 @@ impl FmtLabels for InboundEndpointLabels {
}
}
impl FmtLabels for ServerLabel {
impl legacy::FmtLabels for ServerLabel {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(meta, port) = self;
write!(
@ -374,7 +375,7 @@ impl prom::EncodeLabelSetMut for ServerLabel {
}
}
impl FmtLabels for ServerAuthzLabels {
impl legacy::FmtLabels for ServerAuthzLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { server, authz } = self;
@ -389,7 +390,7 @@ impl FmtLabels for ServerAuthzLabels {
}
}
impl FmtLabels for RouteLabels {
impl legacy::FmtLabels for RouteLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { server, route } = self;
@ -404,7 +405,7 @@ impl FmtLabels for RouteLabels {
}
}
impl FmtLabels for RouteAuthzLabels {
impl legacy::FmtLabels for RouteAuthzLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { route, authz } = self;
@ -425,7 +426,7 @@ impl svc::Param<OutboundZoneLocality> for OutboundEndpointLabels {
}
}
impl FmtLabels for OutboundEndpointLabels {
impl legacy::FmtLabels for OutboundEndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
server_id,
@ -462,20 +463,20 @@ impl fmt::Display for Direction {
}
}
impl FmtLabels for Direction {
impl legacy::FmtLabels for Direction {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "direction=\"{self}\"")
}
}
impl FmtLabels for Authority<'_> {
impl legacy::FmtLabels for Authority<'_> {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(authority) = self;
write!(f, "authority=\"{authority}\"")
}
}
impl FmtLabels for Class {
impl legacy::FmtLabels for Class {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let class = |ok: bool| if ok { "success" } else { "failure" };
@ -523,7 +524,7 @@ impl StackLabels {
}
}
impl FmtLabels for StackLabels {
impl legacy::FmtLabels for StackLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
direction,

View File

@ -0,0 +1,70 @@
use linkerd_metrics::prom;
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
use std::{
fmt::{Error, Write},
sync::{Arc, OnceLock},
};
static TLS_INFO: OnceLock<Arc<TlsInfo>> = OnceLock::new();
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, EncodeLabelSet)]
pub struct TlsInfo {
tls_suites: MetricValueList,
tls_kx_groups: MetricValueList,
tls_rand: String,
tls_key_provider: String,
tls_fips: bool,
}
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
struct MetricValueList {
values: Vec<&'static str>,
}
impl FromIterator<&'static str> for MetricValueList {
fn from_iter<T: IntoIterator<Item = &'static str>>(iter: T) -> Self {
MetricValueList {
values: iter.into_iter().collect(),
}
}
}
impl EncodeLabelValue for MetricValueList {
fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), Error> {
for value in &self.values {
value.encode(encoder)?;
encoder.write_char(',')?;
}
Ok(())
}
}
pub fn metric() -> prom::Family<TlsInfo, prom::ConstGauge> {
let fam = prom::Family::<TlsInfo, prom::ConstGauge>::new_with_constructor(|| {
prom::ConstGauge::new(1)
});
let tls_info = TLS_INFO.get_or_init(|| {
let provider = linkerd_rustls::get_default_provider();
let tls_suites = provider
.cipher_suites
.iter()
.flat_map(|cipher_suite| cipher_suite.suite().as_str())
.collect::<MetricValueList>();
let tls_kx_groups = provider
.kx_groups
.iter()
.flat_map(|suite| suite.name().as_str())
.collect::<MetricValueList>();
Arc::new(TlsInfo {
tls_suites,
tls_kx_groups,
tls_rand: format!("{:?}", provider.secure_random),
tls_key_provider: format!("{:?}", provider.key_provider),
tls_fips: provider.fips(),
})
});
let _ = fam.get_or_create(tls_info);
fam
}

View File

@ -1,7 +1,7 @@
use crate::metrics::ServerLabel as PolicyServerLabel;
pub use crate::metrics::{Direction, OutboundEndpointLabels};
use linkerd_conditional::Conditional;
use linkerd_metrics::FmtLabels;
use linkerd_metrics::legacy::FmtLabels;
use linkerd_tls as tls;
use std::{fmt, net::SocketAddr};

View File

@ -13,8 +13,7 @@ Configures and runs the inbound proxy
test-util = [
"linkerd-app-test",
"linkerd-idle-cache/test-util",
"linkerd-meshtls/rustls",
"linkerd-meshtls-rustls/test-util",
"linkerd-meshtls/test-util",
]
[dependencies]
@ -25,8 +24,7 @@ linkerd-app-core = { path = "../core" }
linkerd-app-test = { path = "../test", optional = true }
linkerd-http-access-log = { path = "../../http/access-log" }
linkerd-idle-cache = { path = "../../idle-cache" }
linkerd-meshtls = { path = "../../meshtls", optional = true }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
linkerd-proxy-client-policy = { path = "../../proxy/client-policy" }
linkerd-tonic-stream = { path = "../../tonic-stream" }
linkerd-tonic-watch = { path = "../../tonic-watch" }
@ -49,7 +47,7 @@ hyper = { workspace = true, features = ["http1", "http2"] }
linkerd-app-test = { path = "../test" }
arbitrary = { version = "1", features = ["derive"] }
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../meshtls", features = [
"test-util",
] }
@ -62,8 +60,7 @@ linkerd-http-metrics = { path = "../../http/metrics", features = ["test-util"] }
linkerd-http-box = { path = "../../http/box" }
linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] }
linkerd-io = { path = "../../io", features = ["tokio-test"] }
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../meshtls", features = [
"test-util",
] }
linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [

View File

@ -18,8 +18,7 @@ linkerd-app-core = { path = "../../core" }
linkerd-app-inbound = { path = ".." }
linkerd-app-test = { path = "../../test" }
linkerd-idle-cache = { path = "../../../idle-cache", features = ["test-util"] }
linkerd-meshtls = { path = "../../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../../meshtls", features = [
"test-util",
] }
linkerd-tracing = { path = "../../../tracing", features = ["ansi"] }

View File

@ -117,7 +117,7 @@ impl<N> Inbound<N> {
let identity = rt
.identity
.server()
.with_alpn(vec![transport_header::PROTOCOL.into()])
.spawn_with_alpn(vec![transport_header::PROTOCOL.into()])
.expect("TLS credential store must be held");
inner

View File

@ -113,10 +113,6 @@ impl<S> Inbound<S> {
&self.runtime.identity
}
pub fn proxy_metrics(&self) -> &metrics::Proxy {
&self.runtime.metrics.proxy
}
/// A helper for gateways to instrument policy checks.
pub fn authorize_http<N>(
&self,

View File

@ -13,7 +13,7 @@ pub(crate) mod error;
pub use linkerd_app_core::metrics::*;
/// Holds outbound proxy metrics.
/// Holds LEGACY inbound proxy metrics.
#[derive(Clone, Debug)]
pub struct InboundMetrics {
pub http_authz: authz::HttpAuthzMetrics,
@ -50,7 +50,7 @@ impl InboundMetrics {
}
}
impl FmtMetrics for InboundMetrics {
impl legacy::FmtMetrics for InboundMetrics {
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.http_authz.fmt_metrics(f)?;
self.http_errors.fmt_metrics(f)?;

View File

@ -1,8 +1,9 @@
use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit};
use linkerd_app_core::{
metrics::{
metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels,
ServerLabel, TargetAddr, TlsAccept,
legacy::{Counter, FmtLabels, FmtMetrics},
metrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, ServerLabel, TargetAddr,
TlsAccept,
},
tls,
transport::OrigDstAddr,

View File

@ -8,7 +8,7 @@ use crate::{
};
use linkerd_app_core::{
errors::{FailFastError, LoadShedError},
metrics::FmtLabels,
metrics::legacy::FmtLabels,
tls,
};
use std::fmt;

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics, ServerLabel},
metrics::{
legacy::{Counter, FmtMetrics},
metrics, ServerLabel,
},
svc::{self, stack::NewMonitor},
transport::{labels::TargetAddr, OrigDstAddr},
Error,

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics},
metrics::{
legacy::{Counter, FmtMetrics},
metrics,
},
svc::{self, stack::NewMonitor},
transport::{labels::TargetAddr, OrigDstAddr},
Error,

View File

@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ServerPolicy> = once_cell::sync
impl<S> Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
{
pub(super) fn new(
@ -57,7 +57,7 @@ where
impl<S> Service<u16> for Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
S::Future: Send + 'static,

View File

@ -40,7 +40,7 @@ impl Config {
limits: ReceiveLimits,
) -> impl GetPolicy + Clone + Send + Sync + 'static
where
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static,

View File

@ -74,7 +74,7 @@ impl<S> Store<S> {
opaque_ports: RangeInclusiveSet<u16>,
) -> Self
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::Future: Send,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
@ -138,7 +138,7 @@ impl<S> Store<S> {
impl<S> GetPolicy for Store<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::Future: Send,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,

View File

@ -263,7 +263,7 @@ fn orig_dst_addr() -> OrigDstAddr {
OrigDstAddr(([192, 0, 2, 2], 1000).into())
}
impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
impl tonic::client::GrpcService<tonic::body::Body> for MockSvc {
type ResponseBody = linkerd_app_core::control::RspBody;
type Error = Error;
type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>;
@ -275,7 +275,7 @@ impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
unreachable!()
}
fn call(&mut self, _req: http::Request<tonic::body::BoxBody>) -> Self::Future {
fn call(&mut self, _req: http::Request<tonic::body::Body>) -> Self::Future {
unreachable!()
}
}

View File

@ -27,7 +27,7 @@ impl Inbound<()> {
limits: ReceiveLimits,
) -> impl policy::GetPolicy + Clone + Send + Sync + 'static
where
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static,

View File

@ -3,9 +3,7 @@ pub use futures::prelude::*;
use linkerd_app_core::{
config,
dns::Suffix,
drain, exp_backoff,
identity::rustls,
metrics,
drain, exp_backoff, identity, metrics,
proxy::{
http::{h1, h2},
tap,
@ -98,7 +96,7 @@ pub fn runtime() -> (ProxyRuntime, drain::Signal) {
let (tap, _) = tap::new();
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
let runtime = ProxyRuntime {
identity: rustls::creds::default_for_test().1.into(),
identity: identity::creds::default_for_test().1,
metrics: metrics.proxy,
tap,
span_sink: None,

View File

@ -28,17 +28,19 @@ ipnet = "2"
linkerd-app = { path = "..", features = ["allow-loopback"] }
linkerd-app-core = { path = "../core" }
linkerd-app-test = { path = "../test" }
linkerd-meshtls = { path = "../../meshtls", features = ["test-util"] }
linkerd-metrics = { path = "../../metrics", features = ["test_util"] }
linkerd-rustls = { path = "../../rustls" }
linkerd-tracing = { path = "../../tracing" }
maplit = "1"
parking_lot = "0.12"
regex = "1"
rustls-pemfile = "2.2"
socket2 = "0.5"
socket2 = "0.6"
tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] }
tokio-rustls = { workspace = true }
tokio-stream = { version = "0.1", features = ["sync"] }
tonic = { workspace = true, features = ["transport"], default-features = false }
tonic = { workspace = true, features = ["transport", "router"], default-features = false }
tower = { workspace = true, default-features = false }
tracing = { workspace = true }
@ -72,8 +74,5 @@ flate2 = { version = "1", default-features = false, features = [
] }
# Log streaming isn't enabled by default globally, but we want to test it.
linkerd-app-admin = { path = "../admin", features = ["log-streaming"] }
# No code from this crate is actually used; only necessary to enable the Rustls
# implementation.
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
serde_json = "1"

View File

@ -8,6 +8,7 @@ use std::{
};
use linkerd2_proxy_api::identity as pb;
use linkerd_rustls::get_default_provider;
use tokio_rustls::rustls::{self, server::WebPkiClientVerifier};
use tonic as grpc;
@ -34,10 +35,6 @@ type Certify = Box<
> + Send,
>;
static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
&[rustls::crypto::ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256];
struct Certificates {
pub leaf: Vec<u8>,
pub intermediates: Vec<Vec<u8>>,
@ -110,12 +107,10 @@ impl Identity {
assert_ne!(added, 0, "trust anchors must include at least one cert");
assert_eq!(skipped, 0, "no certs in pemfile should be invalid");
let mut provider = rustls::crypto::ring::default_provider();
provider.cipher_suites = TLS_SUPPORTED_CIPHERSUITES.to_vec();
let provider = Arc::new(provider);
let provider = get_default_provider();
let client_config = rustls::ClientConfig::builder_with_provider(provider.clone())
.with_protocol_versions(TLS_VERSIONS)
.with_safe_default_protocol_versions()
.expect("client config must be valid")
.with_root_certificates(roots.clone())
.with_no_client_auth();
@ -127,7 +122,7 @@ impl Identity {
.expect("server verifier must be valid");
let server_config = rustls::ServerConfig::builder_with_provider(provider)
.with_protocol_versions(TLS_VERSIONS)
.with_safe_default_protocol_versions()
.expect("server config must be valid")
.with_client_cert_verifier(client_cert_verifier)
.with_single_cert(certs.chain(), key)

View File

@ -302,7 +302,7 @@ impl Controller {
}
pub async fn run(self) -> controller::Listening {
let svc = grpc::transport::Server::builder()
let routes = grpc::service::Routes::default()
.add_service(
inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new(
self.inbound,
@ -310,9 +310,9 @@ impl Controller {
)
.add_service(outbound_policies_server::OutboundPoliciesServer::new(
Server(Arc::new(self.outbound)),
))
.into_service();
controller::run(RoutesSvc(svc), "support policy controller", None).await
));
controller::run(RoutesSvc(routes), "support policy controller", None).await
}
}
@ -525,7 +525,9 @@ impl Service<Request<hyper::body::Incoming>> for RoutesSvc {
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let Self(routes) = self;
routes.poll_ready(cx)
<grpc::service::Routes as Service<Request<UnsyncBoxBody<Bytes, grpc::Status>>>>::poll_ready(
routes, cx,
)
}
fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future {

View File

@ -124,26 +124,6 @@ async fn inbound_timeout() {
.await;
}
/// Tests that the detect metric is labeled and incremented on I/O error.
#[tokio::test]
async fn inbound_io_err() {
let _trace = trace_init();
let (proxy, metrics) = Test::default().run().await;
let client = crate::tcp::client(proxy.inbound);
let tcp_client = client.connect().await;
tcp_client.write(TcpFixture::HELLO_MSG).await;
drop(tcp_client);
metric(&proxy)
.label("error", "i/o")
.value(1u64)
.assert_in(&metrics)
.await;
}
/// Tests that the detect metric is not incremented when TLS is successfully
/// detected.
#[tokio::test]
@ -189,44 +169,6 @@ async fn inbound_success() {
metric.assert_in(&metrics).await;
}
/// Tests both of the above cases together.
#[tokio::test]
async fn inbound_multi() {
let _trace = trace_init();
let (proxy, metrics) = Test::default().run().await;
let client = crate::tcp::client(proxy.inbound);
let metric = metric(&proxy);
let timeout_metric = metric.clone().label("error", "tls detection timeout");
let io_metric = metric.label("error", "i/o");
let tcp_client = client.connect().await;
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
.await;
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
drop(tcp_client);
let tcp_client = client.connect().await;
tcp_client.write(TcpFixture::HELLO_MSG).await;
drop(tcp_client);
io_metric.clone().value(1u64).assert_in(&metrics).await;
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
let tcp_client = client.connect().await;
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
.await;
io_metric.clone().value(1u64).assert_in(&metrics).await;
timeout_metric.clone().value(2u64).assert_in(&metrics).await;
drop(tcp_client);
}
/// Tests that TLS detect failure metrics are collected for the direct stack.
#[tokio::test]
async fn inbound_direct_multi() {

View File

@ -13,7 +13,7 @@ Configures and runs the outbound proxy
default = []
allow-loopback = []
test-subscriber = []
test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util", "dep:http-body"]
test-util = ["linkerd-app-test", "linkerd-meshtls/test-util", "dep:http-body"]
prometheus-client-rust-242 = [] # TODO
@ -42,7 +42,7 @@ linkerd-http-prom = { path = "../../http/prom" }
linkerd-http-retry = { path = "../../http/retry" }
linkerd-http-route = { path = "../../http/route" }
linkerd-identity = { path = "../../identity" }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
linkerd-opaq-route = { path = "../../opaq-route" }
linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [
"proto",
@ -67,8 +67,7 @@ linkerd-app-test = { path = "../test", features = ["client-policy"] }
linkerd-http-box = { path = "../../http/box" }
linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] }
linkerd-io = { path = "../../io", features = ["tokio-test"] }
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../meshtls", features = [
"test-util",
] }
linkerd-mock-http-body = { path = "../../mock/http-body" }

View File

@ -146,7 +146,7 @@ impl Outbound<()> {
export_hostname_labels: bool,
) -> impl policy::GetPolicy
where
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static,

View File

@ -130,7 +130,7 @@ impl OutboundMetrics {
}
}
impl FmtMetrics for OutboundMetrics {
impl legacy::FmtMetrics for OutboundMetrics {
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.http_errors.fmt_metrics(f)?;
self.tcp_errors.fmt_metrics(f)?;
@ -243,7 +243,7 @@ impl EncodeLabelSet for RouteRef {
// === impl ConcreteLabels ===
impl FmtLabels for ConcreteLabels {
impl legacy::FmtLabels for ConcreteLabels {
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ConcreteLabels(parent, backend) = self;

View File

@ -5,7 +5,7 @@ pub(crate) use self::{http::Http, tcp::Tcp};
use crate::http::IdentityRequired;
use linkerd_app_core::{
errors::{FailFastError, LoadShedError},
metrics::FmtLabels,
metrics::legacy::FmtLabels,
proxy::http::ResponseTimeoutError,
};
use std::fmt;

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics},
metrics::{
legacy::{Counter, FmtMetrics},
metrics,
},
svc, Error,
};
use parking_lot::RwLock;

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics},
metrics::{
legacy::{Counter, FmtMetrics},
metrics,
},
svc,
transport::{labels::TargetAddr, OrigDstAddr},
Error,

View File

@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ClientPolicy> = once_cell::sync
impl<S> Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
{
pub(crate) fn new(
@ -59,7 +59,7 @@ where
impl<S> Service<Addr> for Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
S::Future: Send + 'static,

View File

@ -60,7 +60,7 @@ pub(crate) fn runtime() -> (ProxyRuntime, drain::Signal) {
let (tap, _) = tap::new();
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
let runtime = ProxyRuntime {
identity: linkerd_meshtls_rustls::creds::default_for_test().1.into(),
identity: linkerd_meshtls::creds::default_for_test().1,
metrics: metrics.proxy,
tap,
span_sink: None,

View File

@ -43,7 +43,7 @@ impl Config {
) -> Result<
Dst<
impl svc::Service<
http::Request<tonic::body::BoxBody>,
http::Request<tonic::body::Body>,
Response = http::Response<control::RspBody>,
Error = Error,
Future = impl Send,

View File

@ -4,7 +4,8 @@ pub use linkerd_app_core::identity::{client, Id};
use linkerd_app_core::{
control, dns,
identity::{
client::linkerd::Certify, creds, CertMetrics, Credentials, DerX509, Mode, WithCertMetrics,
client::linkerd::Certify, creds, watch as watch_identity, CertMetrics, Credentials,
DerX509, WithCertMetrics,
},
metrics::{prom, ControlHttp as ClientMetrics},
Result,
@ -137,8 +138,7 @@ fn watch(
watch::Receiver<bool>,
)> {
let (tx, ready) = watch::channel(false);
let (store, receiver) =
Mode::default().watch(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
let (store, receiver) = watch_identity(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
let cred = WithCertMetrics::new(metrics, NotifyReady { store, tx });
Ok((cred, receiver, ready))
}

View File

@ -19,9 +19,10 @@ use linkerd_app_core::{
config::ServerConfig,
control::{ControlAddr, Metrics as ControlMetrics},
dns, drain,
metrics::{prom, FmtMetrics},
metrics::{legacy::FmtMetrics, prom},
serve,
svc::Param,
tls_info,
transport::{addrs::*, listen::Bind},
Error, ProxyRuntime,
};
@ -251,9 +252,6 @@ impl Config {
export_hostname_labels,
);
let dst_addr = dst.addr.clone();
// registry.sub_registry_with_prefix("gateway"),
let gateway = gateway::Gateway::new(gateway, inbound.clone(), outbound.clone()).stack(
dst.resolve.clone(),
dst.profiles.clone(),
@ -304,6 +302,7 @@ impl Config {
error!(%error, "Failed to register process metrics");
}
registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric());
registry.register("rustls_info", "Proxy TLS info", tls_info::metric());
let admin = {
let identity = identity.receiver().server();
@ -330,7 +329,7 @@ impl Config {
Ok(App {
admin,
dst: dst_addr,
dst: dst.addr,
drain: drain_tx,
identity,
inbound_addr,

View File

@ -46,7 +46,7 @@ impl Config {
) -> Result<
Policy<
impl svc::Service<
http::Request<tonic::body::BoxBody>,
http::Request<tonic::body::Body>,
Response = http::Response<control::RspBody>,
Error = Error,
Future = impl Send,

View File

@ -6,7 +6,7 @@ use linkerd_opencensus::{self as opencensus, metrics, proto};
use std::{collections::HashMap, time::SystemTime};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{body::BoxBody, client::GrpcService};
use tonic::{body::Body as TonicBody, client::GrpcService};
use tracing::Instrument;
pub(super) fn create_collector<S>(
@ -18,7 +18,7 @@ pub(super) fn create_collector<S>(
legacy_metrics: metrics::Registry,
) -> EnabledCollector
where
S: GrpcService<BoxBody> + Clone + Send + 'static,
S: GrpcService<TonicBody> + Clone + Send + 'static,
S::Error: Into<Error>,
S::Future: Send,
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,

View File

@ -15,7 +15,7 @@ use std::{
};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{body::BoxBody, client::GrpcService};
use tonic::{body::Body as TonicBody, client::GrpcService};
use tracing::Instrument;
pub(super) struct OtelCollectorAttributes {
@ -31,7 +31,7 @@ pub(super) fn create_collector<S>(
legacy_metrics: metrics::Registry,
) -> EnabledCollector
where
S: GrpcService<BoxBody> + Clone + Send + 'static,
S: GrpcService<TonicBody> + Clone + Send + 'static,
S::Error: Into<Error>,
S::Future: Send,
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,

View File

@ -11,7 +11,6 @@ ahash = "0.8"
linkerd-stack = { path = "../stack" }
parking_lot = "0.12"
rand = { version = "0.9", features = ["small_rng"] }
tokio = { version = "1", features = ["macros"] }
tracing = { workspace = true }
[dev-dependencies]

View File

@ -7,14 +7,15 @@ edition = { workspace = true }
publish = { workspace = true }
[dependencies]
futures = { version = "0.3", default-features = false }
hickory-resolver = "0.25.2"
linkerd-dns-name = { path = "./name" }
linkerd-error = { path = "../error" }
prometheus-client = { workspace = true }
thiserror = "2"
tokio = { version = "1", features = ["rt", "sync", "time"] }
tracing = { workspace = true }
[dev-dependencies]
linkerd-error = { path = "../error" }
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] }

View File

@ -15,10 +15,10 @@ tokio = { version = "1", default-features = false }
tracing = { workspace = true }
linkerd-error = { path = "../../error" }
linkerd-http-box = { path = "../../http/box" }
linkerd-stack = { path = "../../stack" }
[dev-dependencies]
tokio-test = "0.4"
tower-test = { workspace = true }
linkerd-http-box = { path = "../../http/box" }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }

View File

@ -2,7 +2,7 @@ use super::{ClassifyEos, ClassifyResponse};
use futures::{prelude::*, ready};
use http_body::Frame;
use linkerd_error::Error;
use linkerd_stack::{layer, ExtractParam, NewService, Service};
use linkerd_stack::Service;
use pin_project::{pin_project, pinned_drop};
use std::{
fmt::Debug,
@ -12,18 +12,6 @@ use std::{
};
use tokio::sync::mpsc;
/// Constructs new [`BroadcastClassification`] services.
///
/// `X` is an [`ExtractParam`] implementation that extracts a [`Tx`] from each
/// target. The [`Tx`] is used to broadcast the classification of each response
/// from the constructed [`BroadcastClassification`] service.
#[derive(Debug)]
pub struct NewBroadcastClassification<C, X, N> {
inner: N,
extract: X,
_marker: PhantomData<fn() -> C>,
}
/// A HTTP `Service` that applies a [`ClassifyResponse`] to each response, and
/// broadcasts the classification over a [`mpsc`] channel.
#[derive(Debug)]
@ -33,14 +21,6 @@ pub struct BroadcastClassification<C: ClassifyResponse, S> {
_marker: PhantomData<fn() -> C>,
}
/// A handle to a [`mpsc`] channel over which response classifications are
/// broadcasted.
///
/// This is extracted from a target value by [`NewBroadcastClassification`] when
/// constructing a [`BroadcastClassification`] service.
#[derive(Clone, Debug)]
pub struct Tx<C>(pub mpsc::Sender<C>);
#[pin_project]
pub struct ResponseFuture<C: ClassifyResponse, B, F> {
#[pin]
@ -62,59 +42,6 @@ struct State<C, T> {
tx: mpsc::Sender<T>,
}
// === impl NewBroadcastClassification ===
impl<C, X: Clone, N> NewBroadcastClassification<C, X, N> {
pub fn new(extract: X, inner: N) -> Self {
Self {
inner,
extract,
_marker: PhantomData,
}
}
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
/// [`NewService`]s, using the provided [`ExtractParam`] implementation to
/// extract a classification [`Tx`] from the target.
pub fn layer_via(extract: X) -> impl layer::Layer<N, Service = Self> + Clone {
layer::mk(move |inner| Self::new(extract.clone(), inner))
}
}
impl<C, N> NewBroadcastClassification<C, (), N> {
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
/// [`NewService`]s when the target type implements
/// [`linkerd_stack::Param`]`<`[`Tx`]`>`.
pub fn layer() -> impl layer::Layer<N, Service = Self> + Clone {
Self::layer_via(())
}
}
impl<T, C, X, N> NewService<T> for NewBroadcastClassification<C, X, N>
where
C: ClassifyResponse,
X: ExtractParam<Tx<C::Class>, T>,
N: NewService<T>,
{
type Service = BroadcastClassification<C, N::Service>;
fn new_service(&self, target: T) -> Self::Service {
let Tx(tx) = self.extract.extract_param(&target);
let inner = self.inner.new_service(target);
BroadcastClassification::new(tx, inner)
}
}
impl<C, X: Clone, N: Clone> Clone for NewBroadcastClassification<C, X, N> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
extract: self.extract.clone(),
_marker: PhantomData,
}
}
}
// === impl BroadcastClassification ===
impl<C: ClassifyResponse, S> BroadcastClassification<C, S> {

View File

@ -4,12 +4,12 @@
use linkerd_error::Error;
pub use self::{
channel::{BroadcastClassification, NewBroadcastClassification, Tx},
channel::BroadcastClassification,
gate::{NewClassifyGate, NewClassifyGateSet},
insert::{InsertClassifyResponse, NewInsertClassifyResponse},
};
pub mod channel;
mod channel;
pub mod gate;
mod insert;

View File

@ -10,11 +10,9 @@ publish = { workspace = true }
test-util = []
[dependencies]
bytes = { workspace = true }
futures = { version = "0.3", default-features = false }
http = { workspace = true }
http-body = { workspace = true }
hyper = { workspace = true, features = ["http1", "http2"] }
parking_lot = "0.12"
pin-project = "1"
tokio = { version = "1", features = ["time"] }

View File

@ -2,7 +2,7 @@
#![forbid(unsafe_code)]
pub use self::{requests::Requests, retries::Retries};
use linkerd_metrics::SharedStore;
use linkerd_metrics::legacy::SharedStore;
use parking_lot::Mutex;
use std::{fmt, hash::Hash, time::Duration};

View File

@ -4,7 +4,10 @@ mod service;
pub use self::service::{NewHttpMetrics, ResponseBody};
use super::Report;
use linkerd_http_classify::ClassifyResponse;
use linkerd_metrics::{latency, Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics};
use linkerd_metrics::{
latency,
legacy::{Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics},
};
use linkerd_stack::{self as svc, layer};
use std::{collections::HashMap, fmt::Debug, hash::Hash};
use tokio::time::{Duration, Instant};
@ -146,7 +149,7 @@ impl ClassMetrics {
mod tests {
#[test]
fn expiry() {
use linkerd_metrics::FmtLabels;
use linkerd_metrics::legacy::FmtLabels;
use std::fmt;
use tokio::time::{Duration, Instant};

View File

@ -1,7 +1,8 @@
use super::{ClassMetrics, Metrics, StatusMetrics};
use crate::{Prefixed, Report};
use linkerd_metrics::{
latency, Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store,
latency,
legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store},
};
use parking_lot::Mutex;
use std::{fmt, hash::Hash};

View File

@ -3,7 +3,7 @@ use futures::{ready, TryFuture};
use http_body::{Body, Frame};
use linkerd_error::Error;
use linkerd_http_classify::{ClassifyEos, ClassifyResponse};
use linkerd_metrics::NewMetrics;
use linkerd_metrics::legacy::NewMetrics;
use linkerd_stack::Proxy;
use parking_lot::Mutex;
use pin_project::{pin_project, pinned_drop};

View File

@ -1,5 +1,5 @@
use super::{Prefixed, Registry, Report};
use linkerd_metrics::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
use linkerd_metrics::legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
use parking_lot::Mutex;
use std::{fmt, hash::Hash, sync::Arc};
use tokio::time::{Duration, Instant};

View File

@ -17,7 +17,6 @@ bytes = { workspace = true }
futures = { version = "0.3", default-features = false }
http = { workspace = true }
http-body = { workspace = true }
parking_lot = "0.12"
pin-project = "1"
prometheus-client = { workspace = true }
thiserror = "2"

View File

@ -14,7 +14,6 @@ http-body-util = { workspace = true }
http = { workspace = true }
parking_lot = "0.12"
pin-project = "1"
tokio = { version = "1", features = ["macros", "rt"] }
tower = { workspace = true, features = ["retry"] }
tracing = { workspace = true }
thiserror = "2"
@ -26,7 +25,6 @@ linkerd-metrics = { path = "../../metrics" }
linkerd-stack = { path = "../../stack" }
[dev-dependencies]
hyper = { workspace = true }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
linkerd-mock-http-body = { path = "../../mock/http-body" }
tokio = { version = "1", features = ["macros", "rt"] }

View File

@ -21,6 +21,3 @@ url = "2"
workspace = true
features = ["http-route", "grpc-route"]
optional = true
[dev-dependencies]
maplit = "1"

View File

@ -10,7 +10,6 @@ Facilities for HTTP/1 upgrades.
"""
[dependencies]
bytes = { workspace = true }
drain = { workspace = true }
futures = { version = "0.3", default-features = false }
http = { workspace = true }

View File

@ -25,14 +25,14 @@ impl CertMetrics {
let expiry_ts = prom::Gauge::default();
registry.register_with_unit(
"expiration_timestamp",
"Time when the this proxy's current mTLS identity certificate will expire (in seconds since the UNIX epoch)",
"Time when this proxy's current mTLS identity certificate will expire (in seconds since the UNIX epoch)",
prom::Unit::Seconds, expiry_ts.clone()
);
let refresh_ts = prom::Gauge::default();
registry.register_with_unit(
"refresh_timestamp",
"Time when the this proxy's current mTLS identity certificate were last updated",
"Time when this proxy's current mTLS identity certificate was last updated",
prom::Unit::Seconds,
refresh_ts.clone(),
);

View File

@ -10,8 +10,6 @@ publish = { workspace = true }
test-util = []
[dependencies]
futures = { version = "0.3", default-features = false }
linkerd-error = { path = "../error" }
linkerd-stack = { path = "../stack" }
parking_lot = "0.12"
tokio = { version = "1", default-features = false, features = [
@ -28,4 +26,3 @@ tokio = { version = "1", default-features = false, features = [
"test-util",
"time",
] }
linkerd-tracing = { path = "../tracing", features = ["ansi"] }

View File

@ -7,34 +7,34 @@ edition = "2018"
publish = { workspace = true }
[features]
rustls = ["linkerd-meshtls-rustls", "__has_any_tls_impls"]
rustls-aws-lc = ["rustls", "linkerd-meshtls-rustls/aws-lc"]
rustls-aws-lc-fips = ["rustls-aws-lc", "linkerd-meshtls-rustls/aws-lc-fips"]
rustls-ring = ["rustls", "linkerd-meshtls-rustls/ring"]
boring = ["linkerd-meshtls-boring", "__has_any_tls_impls"]
boring-fips = ["boring", "linkerd-meshtls-boring/fips"]
# Enabled if *any* TLS impl is enabled.
__has_any_tls_impls = []
rustls-aws-lc-fips = ["tokio-rustls/fips"]
test-util = ["linkerd-tls-test-util"]
[dependencies]
futures = { version = "0.3", default-features = false }
pin-project = "1"
rustls-pemfile = "2.2"
rustls-webpki = { version = "0.103.4", default-features = false, features = ["std", "aws-lc-rs"] }
thiserror = "2"
tokio = { version = "1", features = ["macros", "rt", "sync"] }
tokio-rustls = { workspace = true, features = ["aws-lc-rs"] }
tracing = { workspace = true }
linkerd-dns-name = { path = "../dns/name" }
linkerd-error = { path = "../error" }
linkerd-identity = { path = "../identity" }
linkerd-io = { path = "../io" }
linkerd-meshtls-boring = { path = "boring", optional = true }
linkerd-meshtls-rustls = { path = "rustls", optional = true }
linkerd-meshtls-verifier = { path = "verifier" }
linkerd-rustls = { path = "../rustls" }
linkerd-stack = { path = "../stack" }
linkerd-tls = { path = "../tls" }
linkerd-tls-test-util = { path = "../tls/test-util", optional = true }
[dev-dependencies]
tokio = { version = "1", features = ["macros", "net", "rt-multi-thread"] }
tracing = { workspace = true }
rcgen = "0.14.2"
rcgen = { version = "0.14.3", default-features = false, features = ["crypto", "pem", "aws_lc_rs"] }
linkerd-conditional = { path = "../conditional" }
linkerd-proxy-transport = { path = "../proxy/transport" }
linkerd-tls-test-util = { path = "../tls/test-util" }
linkerd-tracing = { path = "../tracing", features = ["ansi"] }
linkerd-tracing = { path = "../tracing", features = ["ansi"] }

View File

@ -1,30 +0,0 @@
[package]
name = "linkerd-meshtls-boring"
version = { workspace = true }
authors = { workspace = true }
license = { workspace = true }
edition = "2018"
publish = { workspace = true }
[dependencies]
boring = "4"
futures = { version = "0.3", default-features = false }
hex = "0.4" # used for debug logging
tokio = { version = "1", features = ["macros", "sync"] }
tokio-boring = "4"
tracing = { workspace = true }
linkerd-dns-name = { path = "../../dns/name" }
linkerd-error = { path = "../../error" }
linkerd-identity = { path = "../../identity" }
linkerd-io = { path = "../../io" }
linkerd-meshtls-verifier = { path = "../verifier" }
linkerd-stack = { path = "../../stack" }
linkerd-tls = { path = "../../tls" }
[features]
fips = ["boring/fips"]
[dev-dependencies]
linkerd-tls-test-util = { path = "../../tls/test-util" }
linkerd-meshtls = { path = "../../meshtls" }

View File

@ -1,185 +0,0 @@
use crate::creds::CredsRx;
use linkerd_identity as id;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{NewService, Service};
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef, ServerName};
use std::{future::Future, pin::Pin, sync::Arc, task::Context};
use tracing::{debug, trace};
#[derive(Clone)]
pub struct NewClient(CredsRx);
#[derive(Clone)]
pub struct Connect {
rx: CredsRx,
alpn: Option<Arc<[Vec<u8>]>>,
id: id::Id,
server: ServerName,
}
pub type ConnectFuture<I> = Pin<Box<dyn Future<Output = io::Result<ClientIo<I>>> + Send>>;
#[derive(Debug)]
pub struct ClientIo<I>(tokio_boring::SslStream<I>);
// === impl NewClient ===
impl NewClient {
pub(crate) fn new(rx: CredsRx) -> Self {
Self(rx)
}
}
impl NewService<ClientTls> for NewClient {
type Service = Connect;
fn new_service(&self, target: ClientTls) -> Self::Service {
Connect::new(target, self.0.clone())
}
}
impl std::fmt::Debug for NewClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NewClient").finish()
}
}
// === impl Connect ===
impl Connect {
pub(crate) fn new(client_tls: ClientTls, rx: CredsRx) -> Self {
Self {
rx,
alpn: client_tls.alpn.map(|AlpnProtocols(ps)| ps.into()),
server: client_tls.server_name,
id: client_tls.server_id.into(),
}
}
}
impl<I> Service<I> for Connect
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
{
type Response = ClientIo<I>;
type Error = io::Error;
type Future = ConnectFuture<I>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
fn call(&mut self, io: I) -> Self::Future {
let server_name = self.server.clone();
let server_id = self.id.clone();
let connector = self
.rx
.borrow()
.connector(self.alpn.as_deref().unwrap_or(&[]));
Box::pin(async move {
let config = connector
.map_err(io::Error::other)?
.configure()
.map_err(io::Error::other)?;
// Establish a TLS connection to the server using the provided
// `server_name` as an SNI value to the server.
//
// Hostname verification is DISABLED, as we do not require that the
// peer's certificate actually matches the `server_name`. Instead,
// the `server_id` is used to perform the appropriate form of
// verification after the session is established.
let io = tokio_boring::connect(config.verify_hostname(false), server_name.as_str(), io)
.await
.map_err(|e| match e.as_io_error() {
// TODO(ver) boring should let us take ownership of the error directly.
Some(ioe) => io::Error::new(ioe.kind(), ioe.to_string()),
// XXX(ver) to use the boring error directly here we have to
// constrain the socket on Sync + std::fmt::Debug, which is
// a pain.
None => io::Error::other("unexpected TLS handshake error"),
})?;
// Servers must present a peer certificate. We extract the x509 cert
// and verify it manually against the `server_id`.
let cert = io
.ssl()
.peer_certificate()
.ok_or_else(|| io::Error::other("could not extract peer cert"))?;
let cert_der = id::DerX509(cert.to_der()?);
verifier::verify_id(&cert_der, &server_id)?;
debug!(
tls = io.ssl().version_str(),
client.cert = ?io.ssl().certificate().and_then(super::fingerprint),
peer.cert = ?io.ssl().peer_certificate().as_deref().and_then(super::fingerprint),
alpn = ?io.ssl().selected_alpn_protocol(),
"Initiated TLS connection"
);
trace!(peer.id = %server_id, peer.name = %server_name);
Ok(ClientIo(io))
})
}
}
// === impl ClientIo ===
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I> ClientIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.ssl()
.selected_alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().peer_addr()
}
}

View File

@ -1,209 +0,0 @@
mod receiver;
mod store;
pub use self::{receiver::Receiver, store::Store};
use boring::{
pkey::{PKey, Private},
ssl,
x509::{store::X509StoreBuilder, X509},
};
use linkerd_dns_name as dns;
use linkerd_error::Result;
use linkerd_identity as id;
use std::sync::Arc;
use tokio::sync::watch;
pub fn watch(
local_id: id::Id,
server_name: dns::Name,
roots_pem: &str,
) -> Result<(Store, Receiver)> {
let creds = {
let roots = X509::stack_from_pem(roots_pem.as_bytes())?;
Arc::new(BaseCreds { roots })
};
let (tx, rx) = watch::channel(Creds::from(creds.clone()));
let rx = Receiver::new(local_id.clone(), server_name, rx);
let store = Store::new(creds, local_id, tx);
Ok((store, rx))
}
pub(crate) struct Creds {
base: Arc<BaseCreds>,
certs: Option<Certs>,
}
struct BaseCreds {
roots: Vec<X509>,
}
struct Certs {
leaf: X509,
intermediates: Vec<X509>,
key: PKey<Private>,
}
pub(crate) type CredsRx = watch::Receiver<Creds>;
type CredsTx = watch::Sender<Creds>;
// === impl Creds ===
impl From<Arc<BaseCreds>> for Creds {
fn from(base: Arc<BaseCreds>) -> Self {
Self { base, certs: None }
}
}
impl Creds {
// TODO(ver) Specify certificate types, signing algorithms, cipher suites..
pub(crate) fn acceptor(&self, alpn_protocols: &[Vec<u8>]) -> Result<ssl::SslAcceptor> {
// mozilla_intermediate_v5 is the only variant that enables TLSv1.3, so we use that.
let mut conn = ssl::SslAcceptor::mozilla_intermediate_v5(ssl::SslMethod::tls_server())?;
// Force use of TLSv1.3.
conn.set_options(ssl::SslOptions::NO_TLSV1_2);
conn.clear_options(ssl::SslOptions::NO_TLSV1_3);
let roots = self.root_store()?;
tracing::debug!(
roots = ?self
.base
.roots
.iter()
.filter_map(|c| super::fingerprint(c))
.collect::<Vec<_>>(),
"Configuring acceptor roots",
);
conn.set_cert_store(roots);
// Ensure that client certificates are validated when present.
conn.set_verify(ssl::SslVerifyMode::PEER);
if let Some(certs) = &self.certs {
tracing::debug!(
cert = ?super::fingerprint(&certs.leaf),
"Configuring acceptor certificate",
);
conn.set_private_key(&certs.key)?;
conn.set_certificate(&certs.leaf)?;
conn.check_private_key()?;
for c in &certs.intermediates {
conn.add_extra_chain_cert(c.to_owned())?;
}
}
if !alpn_protocols.is_empty() {
let p = serialize_alpn(alpn_protocols)?;
conn.set_alpn_protos(&p)?;
}
Ok(conn.build())
}
// TODO(ver) Specify certificate types, signing algorithms, cipher suites..
pub(crate) fn connector(&self, alpn_protocols: &[Vec<u8>]) -> Result<ssl::SslConnector> {
// XXX(ver) This function reads from the environment and/or the filesystem. This likely is
// at best wasteful and at worst unsafe (if another thread were to mutate these environment
// variables simultaneously, for instance). Unfortunately, the boring APIs don't really give
// us an alternative AFAICT.
let mut conn = ssl::SslConnector::builder(ssl::SslMethod::tls_client())?;
// Explicitly enable use of TLSv1.3
conn.set_options(ssl::SslOptions::NO_TLSV1 | ssl::SslOptions::NO_TLSV1_1);
// XXX(ver) if we disable use of TLSv1.2, connections just hang.
//conn.set_options(ssl::SslOptions::NO_TLSV1_2);
conn.clear_options(ssl::SslOptions::NO_TLSV1_3);
tracing::debug!(
roots = ?self
.base
.roots
.iter()
.filter_map(|c| super::fingerprint(c))
.collect::<Vec<_>>(),
"Configuring connector roots",
);
let roots = self.root_store()?;
conn.set_cert_store(roots);
if let Some(certs) = &self.certs {
tracing::debug!(
cert = ?super::fingerprint(&certs.leaf),
intermediates = %certs.intermediates.len(),
"Configuring connector certificate",
);
conn.set_private_key(&certs.key)?;
conn.set_certificate(&certs.leaf)?;
conn.check_private_key()?;
for c in &certs.intermediates {
conn.add_extra_chain_cert(c.to_owned())?;
}
}
if !alpn_protocols.is_empty() {
let p = serialize_alpn(alpn_protocols)?;
conn.set_alpn_protos(&p)?;
}
Ok(conn.build())
}
fn root_store(&self) -> Result<boring::x509::store::X509Store> {
let mut store = X509StoreBuilder::new()?;
for c in &self.base.roots {
store.add_cert(c.to_owned())?;
}
Ok(store.build())
}
}
/// Encodes a list of ALPN protocols into a slice of bytes.
///
/// `boring` requires that the list of protocols be encoded in the wire format.
fn serialize_alpn(protocols: &[Vec<u8>]) -> Result<Vec<u8>> {
// Allocate a buffer to hold the encoded protocols.
let mut bytes = {
// One additional byte for each protocol's length prefix.
let cap = protocols.len() + protocols.iter().map(Vec::len).sum::<usize>();
Vec::with_capacity(cap)
};
// Encode each protocol as a length-prefixed string.
for p in protocols {
if p.is_empty() {
continue;
}
if p.len() > 255 {
return Err("ALPN protocols must be less than 256 bytes".into());
}
bytes.push(p.len() as u8);
bytes.extend(p);
}
Ok(bytes)
}
#[cfg(test)]
#[test]
fn test_serialize_alpn() {
assert_eq!(serialize_alpn(&[b"h2".to_vec()]).unwrap(), b"\x02h2");
assert_eq!(
serialize_alpn(&[b"h2".to_vec(), b"http/1.1".to_vec()]).unwrap(),
b"\x02h2\x08http/1.1"
);
assert_eq!(
serialize_alpn(&[b"h2".to_vec(), b"http/1.1".to_vec()]).unwrap(),
b"\x02h2\x08http/1.1"
);
assert_eq!(
serialize_alpn(&[b"h2".to_vec(), vec![], b"http/1.1".to_vec()]).unwrap(),
b"\x02h2\x08http/1.1"
);
assert!(serialize_alpn(&[(0..255).collect()]).is_ok());
assert!(serialize_alpn(&[(0..=255).collect()]).is_err());
}

View File

@ -1,45 +0,0 @@
use super::CredsRx;
use crate::{NewClient, Server};
use linkerd_dns_name as dns;
use linkerd_identity as id;
#[derive(Clone)]
pub struct Receiver {
id: id::Id,
name: dns::Name,
rx: CredsRx,
}
impl Receiver {
pub(crate) fn new(id: id::Id, name: dns::Name, rx: CredsRx) -> Self {
Self { id, name, rx }
}
/// Returns the local identity.
pub fn local_id(&self) -> &id::Id {
&self.id
}
/// Returns the mTLS Server Name.
pub fn server_name(&self) -> &dns::Name {
&self.name
}
/// Returns a `NewClient` that can be used to establish TLS on client connections.
pub fn new_client(&self) -> NewClient {
NewClient::new(self.rx.clone())
}
/// Returns a `Server` that can be used to terminate TLS on server connections.
pub fn server(&self) -> Server {
Server::new(self.name.clone(), self.rx.clone())
}
}
impl std::fmt::Debug for Receiver {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Receiver")
.field("name", &self.name)
.finish()
}
}

View File

@ -1,72 +0,0 @@
use super::{BaseCreds, Certs, Creds, CredsTx};
use boring::pkey::PKey;
use boring::x509::{X509StoreContext, X509};
use linkerd_error::Result;
use linkerd_identity as id;
use linkerd_meshtls_verifier as verifier;
use std::sync::Arc;
pub struct Store {
creds: Arc<BaseCreds>,
id: id::Id,
tx: CredsTx,
}
// === impl Store ===
impl Store {
pub(super) fn new(creds: Arc<BaseCreds>, id: id::Id, tx: CredsTx) -> Self {
Self { creds, id, tx }
}
}
impl id::Credentials for Store {
/// Publishes TLS client and server configurations using
fn set_certificate(
&mut self,
id::DerX509(leaf_der): id::DerX509,
intermediates: Vec<id::DerX509>,
key_pkcs8: Vec<u8>,
_expiry: std::time::SystemTime,
) -> Result<()> {
let leaf = X509::from_der(&leaf_der)?;
verifier::verify_id(&leaf_der, &self.id)?;
let intermediates = intermediates
.into_iter()
.map(|id::DerX509(der)| X509::from_der(&der).map_err(Into::into))
.collect::<Result<Vec<_>>>()?;
let key = PKey::private_key_from_pkcs8(&key_pkcs8)?;
let creds = Creds {
base: self.creds.clone(),
certs: Some(Certs {
leaf,
intermediates,
key,
}),
};
let mut context = X509StoreContext::new()?;
let roots = creds.root_store()?;
let mut chain = boring::stack::Stack::new()?;
for i in &creds.certs.as_ref().unwrap().intermediates {
chain.push(i.to_owned())?;
}
let init = {
let leaf = &creds.certs.as_ref().unwrap().leaf;
context.init(&roots, leaf, &chain, |c| c.verify_cert())?
};
if !init {
return Err("certificate could not be validated against the trust chain".into());
}
// If receivers are dropped, we don't return an error (as this would likely cause the
// updater to retry more aggressively). It's fine to silently ignore these errors.
let _ = self.tx.send(creds);
Ok(())
}
}

View File

@ -1,40 +0,0 @@
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
#![forbid(unsafe_code)]
//! This crate provides an implementation of _meshtls_ backed by `boringssl` (as
//! provided by <https://github.com/cloudflare/boring>).
//!
//! There are several caveats with the current implementation:
//!
//! In its current form, this crate is compatible with the `meshtls-rustls`
//! implementation, which requires of ECDSA-P256-SHA256 keys & signature
//! algorithms. This crate doesn't actually constrain the algorithms beyond the
//! Mozilla's 'intermediate' (v5) [defaults][defaults]. But, the goal for
//! supporting `boring` is to provide a FIPS 140-2 compliant mode. There's a
//! [PR][fips-pr] that implements this, but code changes will likely be required
//! to enable this once it's merged/released.
//!
//! A new SSL context is created for each connection. This is probably
//! unnecessary, but it's simpler for now. We can revisit this if needed.
//!
//! This module is not enabled by default. See the `linkerd-meshtls` and
//! `linkerd2-proxy` crates for more information.
//!
//! [defaults]: https://wiki.mozilla.org/Security/Server_Side_TLS
//! [fips-pr]: https://github.com/cloudflare/boring/pull/52
mod client;
pub mod creds;
mod server;
#[cfg(test)]
mod tests;
pub use self::{
client::{ClientIo, Connect, ConnectFuture, NewClient},
server::{Server, ServerIo, TerminateFuture},
};
fn fingerprint(c: &boring::x509::X509Ref) -> Option<String> {
let digest = c.digest(boring::hash::MessageDigest::sha256()).ok()?;
Some(hex::encode(digest)[0..8].to_string())
}

View File

@ -1,180 +0,0 @@
use crate::creds::CredsRx;
use linkerd_dns_name as dns;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{Param, Service};
use linkerd_tls::{ClientId, NegotiatedProtocol, ServerName, ServerTls};
use std::{future::Future, pin::Pin, sync::Arc, task::Context};
use tracing::debug;
#[derive(Clone)]
pub struct Server {
name: dns::Name,
rx: CredsRx,
alpn: Option<Arc<[Vec<u8>]>>,
}
pub type TerminateFuture<I> =
Pin<Box<dyn Future<Output = io::Result<(ServerTls, ServerIo<I>)>> + Send>>;
#[derive(Debug)]
pub struct ServerIo<I>(tokio_boring::SslStream<I>);
// === impl Server ===
impl Server {
pub(crate) fn new(name: dns::Name, rx: CredsRx) -> Self {
Self {
name,
rx,
alpn: None,
}
}
pub fn with_alpn(mut self, alpn_protocols: Vec<Vec<u8>>) -> Self {
self.alpn = if alpn_protocols.is_empty() {
None
} else {
Some(alpn_protocols.into())
};
self
}
}
impl Param<ServerName> for Server {
fn param(&self) -> ServerName {
ServerName(self.name.clone())
}
}
impl<I> Service<I> for Server
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
{
type Response = (ServerTls, ServerIo<I>);
type Error = std::io::Error;
type Future = TerminateFuture<I>;
#[inline]
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
fn call(&mut self, io: I) -> Self::Future {
// TODO(ver) we should avoid creating a new context for each connection.
let acceptor = self
.rx
.borrow()
.acceptor(self.alpn.as_deref().unwrap_or(&[]));
Box::pin(async move {
let acc = acceptor.map_err(io::Error::other)?;
let io = tokio_boring::accept(&acc, io)
.await
.map(ServerIo)
.map_err(|e| match e.as_io_error() {
Some(ioe) => io::Error::new(ioe.kind(), ioe.to_string()),
// XXX(ver) to use the boring error directly here we have to constraint the
// socket on Sync + std::fmt::Debug, which is a pain.
None => io::Error::other("unexpected TLS handshake error"),
})?;
let client_id = io.client_identity();
let negotiated_protocol = io.negotiated_protocol();
debug!(
tls = io.0.ssl().version_str(),
srv.cert = ?io.0.ssl().certificate().and_then(super::fingerprint),
peer.cert = ?io.0.ssl().peer_certificate().as_deref().and_then(super::fingerprint),
client.id = ?client_id,
alpn = ?negotiated_protocol,
"Accepted TLS connection"
);
let tls = ServerTls::Established {
client_id,
negotiated_protocol,
};
Ok((tls, io))
})
}
}
// === impl ServerIo ===
impl<I> ServerIo<I> {
#[inline]
fn negotiated_protocol(&self) -> Option<NegotiatedProtocol> {
self.0
.ssl()
.selected_alpn_protocol()
.map(|p| NegotiatedProtocol(p.to_vec()))
}
fn client_identity(&self) -> Option<ClientId> {
match self.0.ssl().peer_certificate() {
Some(cert) => {
let der = cert
.to_der()
.map_err(
|error| tracing::warn!(%error, "Failed to encode client end cert to der"),
)
.ok()?;
verifier::client_identity(&der).map(ClientId)
}
None => {
debug!("Connection missing peer certificate");
None
}
}
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().peer_addr()
}
}

View File

@ -1,36 +0,0 @@
[package]
name = "linkerd-meshtls-rustls"
version = { workspace = true }
authors = { workspace = true }
license = { workspace = true }
edition = "2018"
publish = { workspace = true }
[features]
default = ["ring"]
ring = ["tokio-rustls/ring", "rustls-webpki/ring"]
aws-lc = ["tokio-rustls/aws-lc-rs", "rustls-webpki/aws-lc-rs"]
aws-lc-fips = ["aws-lc", "tokio-rustls/fips"]
test-util = ["linkerd-tls-test-util"]
[dependencies]
futures = { version = "0.3", default-features = false }
ring = { version = "0.17", features = ["std"] }
rustls-pemfile = "2.2"
rustls-webpki = { version = "0.103.3", default-features = false, features = ["std"] }
thiserror = "2"
tokio = { version = "1", features = ["macros", "rt", "sync"] }
tokio-rustls = { workspace = true }
tracing = { workspace = true }
linkerd-dns-name = { path = "../../dns/name" }
linkerd-error = { path = "../../error" }
linkerd-io = { path = "../../io" }
linkerd-identity = { path = "../../identity" }
linkerd-stack = { path = "../../stack" }
linkerd-tls = { path = "../../tls" }
linkerd-tls-test-util = { path = "../../tls/test-util", optional = true }
linkerd-meshtls-verifier = { path = "../verifier" }
[dev-dependencies]
linkerd-tls-test-util = { path = "../../tls/test-util" }

View File

@ -1,11 +0,0 @@
#[cfg(feature = "aws-lc")]
mod aws_lc;
#[cfg(feature = "ring")]
mod ring;
#[cfg(feature = "aws-lc")]
pub use aws_lc::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
#[cfg(all(not(feature = "aws-lc"), feature = "ring"))]
pub use ring::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
#[cfg(all(not(feature = "aws-lc"), not(feature = "ring")))]
compile_error!("No rustls backend enabled. Enabled one of the \"ring\" or \"aws-lc\" features");

View File

@ -1,74 +0,0 @@
pub use aws_lc_rs::default_provider;
use tokio_rustls::rustls::{
self,
crypto::{aws_lc_rs, WebPkiSupportedAlgorithms},
};
#[cfg(not(feature = "aws-lc-fips"))]
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
aws_lc_rs::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
];
// Prefer aes-256-gcm if fips is enabled, with chaha20-poly1305 as a fallback
#[cfg(feature = "aws-lc-fips")]
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
aws_lc_rs::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
];
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
all: &[
webpki::aws_lc_rs::ECDSA_P256_SHA256,
webpki::aws_lc_rs::ECDSA_P256_SHA384,
webpki::aws_lc_rs::ECDSA_P384_SHA256,
webpki::aws_lc_rs::ECDSA_P384_SHA384,
webpki::aws_lc_rs::ECDSA_P521_SHA256,
webpki::aws_lc_rs::ECDSA_P521_SHA384,
webpki::aws_lc_rs::ECDSA_P521_SHA512,
webpki::aws_lc_rs::ED25519,
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA256,
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA384,
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA512,
webpki::aws_lc_rs::RSA_PKCS1_3072_8192_SHA384,
],
mapping: &[
// Note: for TLS1.2 the curve is not fixed by SignatureScheme. For TLS1.3 it is.
(
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
&[
webpki::aws_lc_rs::ECDSA_P384_SHA384,
webpki::aws_lc_rs::ECDSA_P256_SHA384,
webpki::aws_lc_rs::ECDSA_P521_SHA384,
],
),
(
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
&[
webpki::aws_lc_rs::ECDSA_P256_SHA256,
webpki::aws_lc_rs::ECDSA_P384_SHA256,
webpki::aws_lc_rs::ECDSA_P521_SHA256,
],
),
(
rustls::SignatureScheme::ECDSA_NISTP521_SHA512,
&[webpki::aws_lc_rs::ECDSA_P521_SHA512],
),
(
rustls::SignatureScheme::ED25519,
&[webpki::aws_lc_rs::ED25519],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA512,
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA512],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA384,
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA384],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA256,
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA256],
),
],
};

View File

@ -1,55 +0,0 @@
pub use ring::default_provider;
use tokio_rustls::rustls::{
self,
crypto::{ring, WebPkiSupportedAlgorithms},
};
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
ring::cipher_suite::TLS13_AES_128_GCM_SHA256,
ring::cipher_suite::TLS13_AES_256_GCM_SHA384,
];
// A subset of the algorithms supported by rustls+ring, imported from
// https://github.com/rustls/rustls/blob/v/0.23.21/rustls/src/crypto/ring/mod.rs#L107
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
all: &[
webpki::ring::ECDSA_P256_SHA256,
webpki::ring::ECDSA_P256_SHA384,
webpki::ring::ECDSA_P384_SHA256,
webpki::ring::ECDSA_P384_SHA384,
webpki::ring::ED25519,
webpki::ring::RSA_PKCS1_2048_8192_SHA256,
webpki::ring::RSA_PKCS1_2048_8192_SHA384,
webpki::ring::RSA_PKCS1_2048_8192_SHA512,
webpki::ring::RSA_PKCS1_3072_8192_SHA384,
],
mapping: &[
(
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
&[
webpki::ring::ECDSA_P384_SHA384,
webpki::ring::ECDSA_P256_SHA384,
],
),
(
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
&[
webpki::ring::ECDSA_P256_SHA256,
webpki::ring::ECDSA_P384_SHA256,
],
),
(rustls::SignatureScheme::ED25519, &[webpki::ring::ED25519]),
(
rustls::SignatureScheme::RSA_PKCS1_SHA512,
&[webpki::ring::RSA_PKCS1_2048_8192_SHA512],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA384,
&[webpki::ring::RSA_PKCS1_2048_8192_SHA384],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA256,
&[webpki::ring::RSA_PKCS1_2048_8192_SHA256],
),
],
};

View File

@ -1,184 +0,0 @@
use futures::prelude::*;
use linkerd_identity as id;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{NewService, Service};
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef};
use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context};
use tokio::sync::watch;
use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig};
/// A `NewService` that produces `Connect` services from a dynamic TLS configuration.
#[derive(Clone)]
pub struct NewClient {
config: watch::Receiver<Arc<ClientConfig>>,
}
/// A `Service` that initiates client-side TLS connections.
#[derive(Clone)]
pub struct Connect {
server_id: id::Id,
server_name: rustls::pki_types::ServerName<'static>,
config: Arc<ClientConfig>,
}
pub type ConnectFuture<I> = Pin<Box<dyn Future<Output = io::Result<ClientIo<I>>> + Send>>;
#[derive(Debug)]
pub struct ClientIo<I>(tokio_rustls::client::TlsStream<I>);
// === impl NewClient ===
impl NewClient {
pub(crate) fn new(config: watch::Receiver<Arc<ClientConfig>>) -> Self {
Self { config }
}
}
impl NewService<ClientTls> for NewClient {
type Service = Connect;
fn new_service(&self, target: ClientTls) -> Self::Service {
Connect::new(target, (*self.config.borrow()).clone())
}
}
impl std::fmt::Debug for NewClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NewClient").finish()
}
}
// === impl Connect ===
impl Connect {
pub(crate) fn new(client_tls: ClientTls, config: Arc<ClientConfig>) -> Self {
// If ALPN protocols are configured by the endpoint, we have to clone the entire
// configuration and set the protocols. If there are no ALPN options, clone the Arc'd base
// configuration without extra allocation.
//
// TODO it would be better to avoid cloning the whole TLS config per-connection, but the
// Rustls API doesn't give us a lot of options.
let config = match client_tls.alpn {
None => config,
Some(AlpnProtocols(protocols)) => {
let mut c = (*config).clone();
c.alpn_protocols = protocols;
Arc::new(c)
}
};
let server_name =
rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string())
.expect("identity must be a valid DNS name");
Self {
server_id: client_tls.server_id.into(),
server_name,
config,
}
}
}
fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> {
match c.peer_certificates().and_then(|certs| certs.first()) {
Some(leaf_cert) => io::Result::Ok(leaf_cert),
None => Err(io::Error::other("missing tls end cert")),
}
}
impl<I> Service<I> for Connect
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
{
type Response = ClientIo<I>;
type Error = io::Error;
type Future = ConnectFuture<I>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
fn call(&mut self, io: I) -> Self::Future {
let server_id = self.server_id.clone();
Box::pin(
// Connect to the server, sending the `server_name` SNI in the
// client handshake. The provided config should use the
// `AnySanVerifier` to ignore the server certificate's DNS SANs.
// Instead, we extract the server's leaf certificate after the
// handshake and verify that it matches the provided `server_id``.
tokio_rustls::TlsConnector::from(self.config.clone())
// XXX(eliza): it's a bummer that the server name has to be cloned here...
.connect(self.server_name.clone(), io)
.map(move |s| {
let s = s?;
let (_, conn) = s.get_ref();
let end_cert = extract_cert(conn)?;
verifier::verify_id(end_cert, &server_id)?;
Ok(ClientIo(s))
}),
)
}
}
// === impl ClientIo ===
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I> ClientIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.get_ref()
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().0.peer_addr()
}
}

View File

@ -1,131 +0,0 @@
mod receiver;
mod store;
pub(crate) mod verify;
use crate::backend;
pub use self::{receiver::Receiver, store::Store};
use linkerd_dns_name as dns;
use linkerd_error::Result;
use linkerd_identity as id;
use ring::error::KeyRejected;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{self, crypto::CryptoProvider};
use tracing::warn;
#[derive(Debug, Error)]
#[error("{0}")]
pub struct InvalidKey(#[source] KeyRejected);
#[derive(Debug, Error)]
#[error("invalid trust roots")]
pub struct InvalidTrustRoots(());
pub fn watch(
local_id: id::Id,
server_name: dns::Name,
roots_pem: &str,
) -> Result<(Store, Receiver)> {
let mut roots = rustls::RootCertStore::empty();
let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem))
.collect::<Result<Vec<_>, _>>()
{
Err(error) => {
warn!(%error, "invalid trust anchors file");
return Err(error.into());
}
Ok(certs) if certs.is_empty() => {
warn!("no valid certs in trust anchors file");
return Err("no trust roots in PEM file".into());
}
Ok(certs) => certs,
};
let (added, skipped) = roots.add_parsable_certificates(certs);
if skipped != 0 {
warn!("Skipped {} invalid trust anchors", skipped);
}
if added == 0 {
return Err("no trust roots loaded".into());
}
// XXX: Rustls's built-in verifiers don't let us tweak things as fully as we'd like (e.g.
// controlling the set of trusted signature algorithms), but they provide good enough
// defaults for now.
// TODO: lock down the verification further.
let server_cert_verifier = Arc::new(verify::AnySanVerifier::new(roots.clone()));
let (client_tx, client_rx) = {
// Since we don't have a certificate yet, build a client configuration
// that doesn't attempt client authentication. Once we get a
// certificate, the `Store` will publish a new configuration with a
// client certificate resolver.
let mut c =
store::client_config_builder(server_cert_verifier.clone()).with_no_client_auth();
// Disable session resumption for the time-being until resumption is
// more tested.
c.resumption = rustls::client::Resumption::disabled();
watch::channel(Arc::new(c))
};
let (server_tx, server_rx) = {
// Since we don't have a certificate yet, use an empty cert resolver so
// that handshaking always fails. Once we get a certificate, the `Store`
// will publish a new configuration with a server certificate resolver.
let empty_resolver = Arc::new(rustls::server::ResolvesServerCertUsingSni::new());
watch::channel(store::server_config(roots.clone(), empty_resolver))
};
let rx = Receiver::new(local_id.clone(), server_name.clone(), client_rx, server_rx);
let store = Store::new(
roots,
server_cert_verifier,
local_id,
server_name,
client_tx,
server_tx,
);
Ok((store, rx))
}
fn default_provider() -> CryptoProvider {
let mut provider = backend::default_provider();
provider.cipher_suites = params::TLS_SUPPORTED_CIPHERSUITES.to_vec();
provider
}
#[cfg(feature = "test-util")]
pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) {
watch(
ent.name.parse().expect("id must be valid"),
ent.name.parse().expect("name must be valid"),
std::str::from_utf8(ent.trust_anchors).expect("roots must be PEM"),
)
.expect("credentials must be valid")
}
#[cfg(feature = "test-util")]
pub fn default_for_test() -> (Store, Receiver) {
for_test(&linkerd_tls_test_util::FOO_NS1)
}
mod params {
use crate::backend;
use tokio_rustls::rustls::{self, crypto::WebPkiSupportedAlgorithms};
// These must be kept in sync:
pub static SIGNATURE_ALG_RING_SIGNING: &ring::signature::EcdsaSigningAlgorithm =
&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING;
pub const SIGNATURE_ALG_RUSTLS_SCHEME: rustls::SignatureScheme =
rustls::SignatureScheme::ECDSA_NISTP256_SHA256;
pub const SIGNATURE_ALG_RUSTLS_ALGORITHM: rustls::SignatureAlgorithm =
rustls::SignatureAlgorithm::ECDSA;
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = backend::SUPPORTED_SIG_ALGS;
pub static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
backend::TLS_SUPPORTED_CIPHERSUITES;
}

View File

@ -1,14 +0,0 @@
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
#![forbid(unsafe_code)]
mod backend;
mod client;
pub mod creds;
mod server;
#[cfg(test)]
mod tests;
pub use self::{
client::{ClientIo, Connect, ConnectFuture, NewClient},
server::{Server, ServerIo, TerminateFuture},
};

View File

@ -1,197 +0,0 @@
use futures::prelude::*;
use linkerd_dns_name as dns;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{Param, Service};
use linkerd_tls::{ClientId, NegotiatedProtocol, NegotiatedProtocolRef, ServerName, ServerTls};
use std::{pin::Pin, sync::Arc, task::Context};
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{pki_types::CertificateDer, ServerConfig};
use tracing::debug;
/// A Service that terminates TLS connections using a dynamically updated server configuration.
#[derive(Clone)]
pub struct Server {
name: dns::Name,
rx: watch::Receiver<Arc<ServerConfig>>,
}
pub type TerminateFuture<I> = futures::future::MapOk<
tokio_rustls::Accept<I>,
fn(tokio_rustls::server::TlsStream<I>) -> (ServerTls, ServerIo<I>),
>;
#[derive(Debug)]
pub struct ServerIo<I>(tokio_rustls::server::TlsStream<I>);
#[derive(Debug, Error)]
#[error("credential store lost")]
pub struct LostStore(());
impl Server {
pub(crate) fn new(name: dns::Name, rx: watch::Receiver<Arc<ServerConfig>>) -> Self {
Self { name, rx }
}
#[cfg(test)]
pub(crate) fn config(&self) -> Arc<ServerConfig> {
(*self.rx.borrow()).clone()
}
/// Spawns a background task that watches for TLS configuration updates and creates an augmented
/// configuration with the provided ALPN protocols. The returned server uses this ALPN-aware
/// configuration.
pub fn spawn_with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self, LostStore> {
if alpn_protocols.is_empty() {
return Ok(self);
}
let mut orig_rx = self.rx;
let mut c = (**orig_rx.borrow_and_update()).clone();
c.alpn_protocols.clone_from(&alpn_protocols);
let (tx, rx) = watch::channel(c.into());
// Spawn a background task that watches the optional server configuration and publishes it
// as a reliable channel, including any ALPN overrides.
//
// The background task completes when the original sender is closed or when all receivers
// are dropped.
tokio::spawn(async move {
loop {
tokio::select! {
_ = tx.closed() => {
debug!("ALPN TLS config receivers dropped");
return;
}
res = orig_rx.changed() => {
if res.is_err() {
debug!("TLS config sender closed");
return;
}
}
}
let mut c = (*orig_rx.borrow().clone()).clone();
c.alpn_protocols.clone_from(&alpn_protocols);
let _ = tx.send(c.into());
}
});
Ok(Self::new(self.name, rx))
}
}
impl Param<ServerName> for Server {
fn param(&self) -> ServerName {
ServerName(self.name.clone())
}
}
impl<I> Service<I> for Server
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin,
{
type Response = (ServerTls, ServerIo<I>);
type Error = std::io::Error;
type Future = TerminateFuture<I>;
#[inline]
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, io: I) -> Self::Future {
tokio_rustls::TlsAcceptor::from((*self.rx.borrow()).clone())
.accept(io)
.map_ok(|io| {
// Determine the peer's identity, if it exist.
let client_id = client_identity(&io);
let negotiated_protocol = io
.get_ref()
.1
.alpn_protocol()
.map(|b| NegotiatedProtocol(b.into()));
debug!(client.id = ?client_id, alpn = ?negotiated_protocol, "Accepted TLS connection");
let tls = ServerTls::Established {
client_id,
negotiated_protocol,
};
(tls, ServerIo(io))
})
}
}
fn client_identity<I>(tls: &tokio_rustls::server::TlsStream<I>) -> Option<ClientId> {
let (_io, session) = tls.get_ref();
let certs = session.peer_certificates()?;
let c = certs.first().map(CertificateDer::as_ref)?;
verifier::client_identity(c).map(ClientId)
}
// === impl ServerIo ===
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I> ServerIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.get_ref()
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().0.peer_addr()
}
}

View File

@ -1,50 +0,0 @@
use linkerd_identity::{Credentials, DerX509};
use linkerd_tls_test_util::*;
use std::time::{Duration, SystemTime};
fn load(ent: &Entity) -> crate::creds::Store {
let roots_pem = std::str::from_utf8(ent.trust_anchors).expect("valid PEM");
let (store, _) = crate::creds::watch(
ent.name.parse().unwrap(),
ent.name.parse().unwrap(),
roots_pem,
)
.expect("credentials must be readable");
store
}
#[test]
fn can_construct_client_and_server_config_from_valid_settings() {
assert!(load(&FOO_NS1)
.set_certificate(
DerX509(FOO_NS1.crt.to_vec()),
vec![],
FOO_NS1.key.to_vec(),
SystemTime::now() + Duration::from_secs(1000)
)
.is_ok());
}
#[test]
fn recognize_ca_did_not_issue_cert() {
assert!(load(&FOO_NS1_CA2)
.set_certificate(
DerX509(FOO_NS1.crt.to_vec()),
vec![],
FOO_NS1.key.to_vec(),
SystemTime::now() + Duration::from_secs(1000)
)
.is_err());
}
#[test]
fn recognize_cert_is_not_valid_for_identity() {
assert!(load(&BAR_NS1)
.set_certificate(
DerX509(FOO_NS1.crt.to_vec()),
vec![],
FOO_NS1.key.to_vec(),
SystemTime::now() + Duration::from_secs(1000)
)
.is_err());
}

View File

@ -1,92 +1,93 @@
use futures::prelude::*;
use linkerd_identity as id;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{NewService, Service};
use linkerd_tls::{ClientTls, NegotiatedProtocol};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
#[cfg(feature = "boring")]
use crate::boring;
#[cfg(feature = "rustls")]
use crate::rustls;
#[cfg(not(feature = "__has_any_tls_impls"))]
use std::marker::PhantomData;
#[derive(Clone, Debug)]
pub enum NewClient {
#[cfg(feature = "boring")]
Boring(boring::NewClient),
#[cfg(feature = "rustls")]
Rustls(rustls::NewClient),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
}
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocol, NegotiatedProtocolRef};
use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context};
use tokio::sync::watch;
use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig};
/// A `NewService` that produces `Connect` services from a dynamic TLS configuration.
#[derive(Clone)]
pub enum Connect {
#[cfg(feature = "boring")]
Boring(boring::Connect),
#[cfg(feature = "rustls")]
Rustls(rustls::Connect),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
pub struct NewClient {
config: watch::Receiver<Arc<ClientConfig>>,
}
#[pin_project::pin_project(project = ConnectFutureProj)]
pub enum ConnectFuture<I> {
#[cfg(feature = "boring")]
Boring(#[pin] boring::ConnectFuture<I>),
#[cfg(feature = "rustls")]
Rustls(#[pin] rustls::ConnectFuture<I>),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls(PhantomData<fn(I)>),
/// A `Service` that initiates client-side TLS connections.
#[derive(Clone)]
pub struct Connect {
server_id: id::Id,
server_name: rustls::pki_types::ServerName<'static>,
config: Arc<ClientConfig>,
}
#[pin_project::pin_project(project = ClientIoProj)]
pub type ConnectFuture<I> =
Pin<Box<dyn Future<Output = io::Result<(ClientIo<I>, Option<NegotiatedProtocol>)>> + Send>>;
#[derive(Debug)]
pub enum ClientIo<I> {
#[cfg(feature = "boring")]
Boring(#[pin] boring::ClientIo<I>),
#[cfg(feature = "rustls")]
Rustls(#[pin] rustls::ClientIo<I>),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls(PhantomData<fn(I)>),
}
pub struct ClientIo<I>(tokio_rustls::client::TlsStream<I>);
// === impl NewClient ===
impl NewClient {
pub(crate) fn new(config: watch::Receiver<Arc<ClientConfig>>) -> Self {
Self { config }
}
}
impl NewService<ClientTls> for NewClient {
type Service = Connect;
#[inline]
fn new_service(&self, target: ClientTls) -> Self::Service {
match self {
#[cfg(feature = "boring")]
Self::Boring(new_client) => Connect::Boring(new_client.new_service(target)),
Connect::new(target, (*self.config.borrow()).clone())
}
}
#[cfg(feature = "rustls")]
Self::Rustls(new_client) => Connect::Rustls(new_client.new_service(target)),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(target),
}
impl std::fmt::Debug for NewClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NewClient").finish()
}
}
// === impl Connect ===
impl Connect {
pub(crate) fn new(client_tls: ClientTls, config: Arc<ClientConfig>) -> Self {
// If ALPN protocols are configured by the endpoint, we have to clone the entire
// configuration and set the protocols. If there are no ALPN options, clone the Arc'd base
// configuration without extra allocation.
//
// TODO it would be better to avoid cloning the whole TLS config per-connection, but the
// Rustls API doesn't give us a lot of options.
let config = match client_tls.alpn {
None => config,
Some(AlpnProtocols(protocols)) => {
let mut c = (*config).clone();
c.alpn_protocols = protocols;
Arc::new(c)
}
};
let server_name =
rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string())
.expect("identity must be a valid DNS name");
Self {
server_id: client_tls.server_id.into(),
server_name,
config,
}
}
}
fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> {
match c.peer_certificates().and_then(|certs| certs.first()) {
Some(leaf_cert) => io::Result::Ok(leaf_cert),
None => Err(io::Error::other("missing tls end cert")),
}
}
impl<I> Service<I> for Connect
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
@ -95,67 +96,31 @@ where
type Error = io::Error;
type Future = ConnectFuture<I>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self {
#[cfg(feature = "boring")]
Self::Boring(connect) => <boring::Connect as Service<I>>::poll_ready(connect, cx),
#[cfg(feature = "rustls")]
Self::Rustls(connect) => <rustls::Connect as Service<I>>::poll_ready(connect, cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, io: I) -> Self::Future {
match self {
#[cfg(feature = "boring")]
Self::Boring(connect) => ConnectFuture::Boring(connect.call(io)),
#[cfg(feature = "rustls")]
Self::Rustls(connect) => ConnectFuture::Rustls(connect.call(io)),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(io),
}
}
}
// === impl ConnectFuture ===
impl<I> Future for ConnectFuture<I>
where
I: io::AsyncRead + io::AsyncWrite + Unpin,
{
type Output = io::Result<(ClientIo<I>, Option<NegotiatedProtocol>)>;
#[inline]
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "boring")]
ConnectFutureProj::Boring(f) => {
let res = futures::ready!(f.poll(cx));
Poll::Ready(res.map(|io| {
let server_id = self.server_id.clone();
Box::pin(
// Connect to the server, sending the `server_name` SNI in the
// client handshake. The provided config should use the
// `AnySanVerifier` to ignore the server certificate's DNS SANs.
// Instead, we extract the server's leaf certificate after the
// handshake and verify that it matches the provided `server_id``.
tokio_rustls::TlsConnector::from(self.config.clone())
// XXX(eliza): it's a bummer that the server name has to be cloned here...
.connect(self.server_name.clone(), io)
.map(move |s| {
let s = s?;
let (_, conn) = s.get_ref();
let end_cert = extract_cert(conn)?;
verifier::verify_id(end_cert, &server_id)?;
let io = ClientIo(s);
let np = io.negotiated_protocol().map(|np| np.to_owned());
(ClientIo::Boring(io), np)
}))
}
#[cfg(feature = "rustls")]
ConnectFutureProj::Rustls(f) => {
let res = futures::ready!(f.poll(cx));
Poll::Ready(res.map(|io| {
let np = io.negotiated_protocol().map(|np| np.to_owned());
(ClientIo::Rustls(io), np)
}))
}
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
Ok((io, np))
}),
)
}
}
@ -164,104 +129,59 @@ where
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
#[inline]
fn poll_read(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_read(cx, buf),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_read(cx, buf),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, buf),
}
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_flush(cx),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_flush(cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_shutdown(cx),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_shutdown(cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_write(cx, buf),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_write(cx, buf),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, buf),
}
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_write_vectored(cx, bufs),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_write_vectored(cx, bufs),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, bufs),
}
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
match self {
#[cfg(feature = "boring")]
Self::Boring(io) => io.is_write_vectored(),
self.0.is_write_vectored()
}
}
#[cfg(feature = "rustls")]
Self::Rustls(io) => io.is_write_vectored(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
impl<I> ClientIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.get_ref()
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
match self {
#[cfg(feature = "boring")]
Self::Boring(io) => io.peer_addr(),
#[cfg(feature = "rustls")]
Self::Rustls(io) => io.peer_addr(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
self.0.get_ref().0.peer_addr()
}
}

View File

@ -1,122 +1,101 @@
use std::time::SystemTime;
mod receiver;
mod store;
pub(crate) mod verify;
use crate::{NewClient, Server};
pub use self::{receiver::Receiver, store::Store};
use linkerd_dns_name as dns;
use linkerd_error::Result;
use linkerd_identity::{Credentials, DerX509, Id};
use linkerd_identity as id;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{self};
use tracing::warn;
#[cfg(feature = "boring")]
pub use crate::boring;
#[derive(Debug, Error)]
#[error("invalid trust roots")]
pub struct InvalidTrustRoots(());
#[cfg(feature = "rustls")]
pub use crate::rustls;
pub enum Store {
#[cfg(feature = "boring")]
Boring(boring::creds::Store),
#[cfg(feature = "rustls")]
Rustls(rustls::creds::Store),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
}
#[derive(Clone, Debug)]
pub enum Receiver {
#[cfg(feature = "boring")]
Boring(boring::creds::Receiver),
#[cfg(feature = "rustls")]
Rustls(rustls::creds::Receiver),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
}
// === impl Store ===
impl Credentials for Store {
fn set_certificate(
&mut self,
leaf: DerX509,
chain: Vec<DerX509>,
key: Vec<u8>,
exp: SystemTime,
) -> Result<()> {
match self {
#[cfg(feature = "boring")]
Self::Boring(store) => store.set_certificate(leaf, chain, key, exp),
#[cfg(feature = "rustls")]
Self::Rustls(store) => store.set_certificate(leaf, chain, key, exp),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(leaf, chain, key, exp),
pub fn watch(
local_id: id::Id,
server_name: dns::Name,
roots_pem: &str,
) -> Result<(Store, Receiver)> {
let mut roots = rustls::RootCertStore::empty();
let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem))
.collect::<Result<Vec<_>, _>>()
{
Err(error) => {
warn!(%error, "invalid trust anchors file");
return Err(error.into());
}
Ok(certs) if certs.is_empty() => {
warn!("no valid certs in trust anchors file");
return Err("no trust roots in PEM file".into());
}
Ok(certs) => certs,
};
let (added, skipped) = roots.add_parsable_certificates(certs);
if skipped != 0 {
warn!("Skipped {} invalid trust anchors", skipped);
}
if added == 0 {
return Err("no trust roots loaded".into());
}
// XXX: Rustls's built-in verifiers don't let us tweak things as fully as we'd like (e.g.
// controlling the set of trusted signature algorithms), but they provide good enough
// defaults for now.
// TODO: lock down the verification further.
let server_cert_verifier = Arc::new(verify::AnySanVerifier::new(roots.clone()));
let (client_tx, client_rx) = {
// Since we don't have a certificate yet, build a client configuration
// that doesn't attempt client authentication. Once we get a
// certificate, the `Store` will publish a new configuration with a
// client certificate resolver.
let mut c =
store::client_config_builder(server_cert_verifier.clone()).with_no_client_auth();
// Disable session resumption for the time-being until resumption is
// more tested.
c.resumption = rustls::client::Resumption::disabled();
watch::channel(Arc::new(c))
};
let (server_tx, server_rx) = {
// Since we don't have a certificate yet, use an empty cert resolver so
// that handshaking always fails. Once we get a certificate, the `Store`
// will publish a new configuration with a server certificate resolver.
let empty_resolver = Arc::new(rustls::server::ResolvesServerCertUsingSni::new());
watch::channel(store::server_config(roots.clone(), empty_resolver))
};
let rx = Receiver::new(local_id.clone(), server_name.clone(), client_rx, server_rx);
let store = Store::new(
roots,
server_cert_verifier,
local_id,
server_name,
client_tx,
server_tx,
);
Ok((store, rx))
}
// === impl Receiver ===
#[cfg(feature = "boring")]
impl From<boring::creds::Receiver> for Receiver {
fn from(rx: boring::creds::Receiver) -> Self {
Self::Boring(rx)
}
#[cfg(feature = "test-util")]
pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) {
watch(
ent.name.parse().expect("id must be valid"),
ent.name.parse().expect("name must be valid"),
std::str::from_utf8(ent.trust_anchors).expect("roots must be PEM"),
)
.expect("credentials must be valid")
}
#[cfg(feature = "rustls")]
impl From<rustls::creds::Receiver> for Receiver {
fn from(rx: rustls::creds::Receiver) -> Self {
Self::Rustls(rx)
}
}
impl Receiver {
pub fn local_id(&self) -> &Id {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => receiver.local_id(),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => receiver.local_id(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
pub fn server_name(&self) -> &dns::Name {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => receiver.server_name(),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => receiver.server_name(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
pub fn new_client(&self) -> NewClient {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => NewClient::Boring(receiver.new_client()),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => NewClient::Rustls(receiver.new_client()),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
pub fn server(&self) -> Server {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => Server::Boring(receiver.server()),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => Server::Rustls(receiver.server()),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
#[cfg(feature = "test-util")]
pub fn default_for_test() -> (Store, Receiver) {
for_test(&linkerd_tls_test_util::FOO_NS1)
}

View File

@ -70,7 +70,7 @@ mod tests {
/// incoming handshakes, but that doesn't matter for these tests, where we
/// don't actually do any TLS.
fn empty_server_config() -> rustls::ServerConfig {
rustls::ServerConfig::builder_with_provider(Arc::new(crate::backend::default_provider()))
rustls::ServerConfig::builder_with_provider(linkerd_rustls::get_default_provider())
.with_protocol_versions(rustls::ALL_VERSIONS)
.expect("client config must be valid")
.with_client_cert_verifier(Arc::new(rustls::server::NoClientAuth))
@ -83,7 +83,7 @@ mod tests {
/// it doesn't trust any root certificates. However, that doesn't actually
/// matter for these tests, which don't actually do TLS.
fn empty_client_config() -> rustls::ClientConfig {
rustls::ClientConfig::builder_with_provider(Arc::new(crate::backend::default_provider()))
rustls::ClientConfig::builder_with_provider(linkerd_rustls::get_default_provider())
.with_protocol_versions(rustls::ALL_VERSIONS)
.expect("client config must be valid")
.with_root_certificates(rustls::RootCertStore::empty())

View File

@ -1,12 +1,15 @@
use super::{default_provider, params::*, InvalidKey};
use linkerd_dns_name as dns;
use linkerd_error::Result;
use linkerd_identity as id;
use linkerd_meshtls_verifier as verifier;
use ring::{rand, signature::EcdsaKeyPair};
use std::{convert::TryFrom, sync::Arc};
use tokio::sync::watch;
use tokio_rustls::rustls::{self, pki_types::UnixTime, server::WebPkiClientVerifier};
use tokio_rustls::rustls::{
self,
pki_types::{PrivatePkcs8KeyDer, UnixTime},
server::WebPkiClientVerifier,
sign::CertifiedKey,
};
use tracing::debug;
pub struct Store {
@ -16,20 +19,16 @@ pub struct Store {
server_name: dns::Name,
client_tx: watch::Sender<Arc<rustls::ClientConfig>>,
server_tx: watch::Sender<Arc<rustls::ServerConfig>>,
random: ring::rand::SystemRandom,
}
#[derive(Clone, Debug)]
struct Key(Arc<EcdsaKeyPair>);
#[derive(Clone, Debug)]
struct CertResolver(Arc<rustls::sign::CertifiedKey>);
pub(super) fn client_config_builder(
cert_verifier: Arc<dyn rustls::client::danger::ServerCertVerifier>,
) -> rustls::ConfigBuilder<rustls::ClientConfig, rustls::client::WantsClientCert> {
rustls::ClientConfig::builder_with_provider(Arc::new(default_provider()))
.with_protocol_versions(TLS_VERSIONS)
rustls::ClientConfig::builder_with_provider(linkerd_rustls::get_default_provider())
.with_protocol_versions(linkerd_rustls::TLS_VERSIONS)
.expect("client config must be valid")
// XXX: Rustls's built-in verifiers don't let us tweak things as fully
// as we'd like (e.g. controlling the set of trusted signature
@ -55,7 +54,7 @@ pub(super) fn server_config(
// controlling the set of trusted signature algorithms), but they provide good enough
// defaults for now.
// TODO: lock down the verification further.
let provider = Arc::new(default_provider());
let provider = linkerd_rustls::get_default_provider();
let client_cert_verifier =
WebPkiClientVerifier::builder_with_provider(Arc::new(roots), provider.clone())
@ -64,7 +63,7 @@ pub(super) fn server_config(
.expect("server verifier must be valid");
rustls::ServerConfig::builder_with_provider(provider)
.with_protocol_versions(TLS_VERSIONS)
.with_protocol_versions(linkerd_rustls::TLS_VERSIONS)
.expect("server config must be valid")
.with_client_cert_verifier(client_cert_verifier)
.with_cert_resolver(resolver)
@ -90,7 +89,6 @@ impl Store {
server_name,
client_tx,
server_tx,
random: ring::rand::SystemRandom::new(),
}
}
@ -147,13 +145,11 @@ impl id::Credentials for Store {
// Use the client's verifier to validate the certificate for our local name.
self.validate(&chain)?;
let key = EcdsaKeyPair::from_pkcs8(SIGNATURE_ALG_RING_SIGNING, &key, &self.random)
.map_err(InvalidKey)?;
let resolver = Arc::new(CertResolver(Arc::new(rustls::sign::CertifiedKey::new(
chain,
Arc::new(Key(Arc::new(key))),
))));
let key_der = PrivatePkcs8KeyDer::from(key);
let provider = rustls::crypto::CryptoProvider::get_default()
.expect("Failed to get default crypto provider");
let key = CertifiedKey::from_der(chain, key_der.into(), provider)?;
let resolver = Arc::new(CertResolver(Arc::new(key)));
// Build new client and server TLS configs.
let client = self.client_config(resolver.clone());
@ -167,39 +163,6 @@ impl id::Credentials for Store {
}
}
// === impl Key ===
impl rustls::sign::SigningKey for Key {
fn choose_scheme(
&self,
offered: &[rustls::SignatureScheme],
) -> Option<Box<dyn rustls::sign::Signer>> {
if !offered.contains(&SIGNATURE_ALG_RUSTLS_SCHEME) {
return None;
}
Some(Box::new(self.clone()))
}
fn algorithm(&self) -> rustls::SignatureAlgorithm {
SIGNATURE_ALG_RUSTLS_ALGORITHM
}
}
impl rustls::sign::Signer for Key {
fn sign(&self, message: &[u8]) -> Result<Vec<u8>, rustls::Error> {
let rng = rand::SystemRandom::new();
self.0
.sign(&rng, message)
.map(|signature| signature.as_ref().to_owned())
.map_err(|ring::error::Unspecified| rustls::Error::General("Signing Failed".to_owned()))
}
fn scheme(&self) -> rustls::SignatureScheme {
SIGNATURE_ALG_RUSTLS_SCHEME
}
}
// === impl CertResolver ===
impl CertResolver {
@ -208,7 +171,7 @@ impl CertResolver {
&self,
sigschemes: &[rustls::SignatureScheme],
) -> Option<Arc<rustls::sign::CertifiedKey>> {
if !sigschemes.contains(&SIGNATURE_ALG_RUSTLS_SCHEME) {
if !sigschemes.contains(&linkerd_rustls::SIGNATURE_ALG_RUSTLS_SCHEME) {
debug!("Signature scheme not supported -> no certificate");
return None;
}

View File

@ -1,4 +1,4 @@
use crate::creds::params::SUPPORTED_SIG_ALGS;
use linkerd_rustls::SUPPORTED_SIG_ALGS;
use std::{convert::TryFrom, sync::Arc};
use tokio_rustls::rustls::{
self,

View File

@ -15,132 +15,11 @@
mod client;
pub mod creds;
mod server;
#[cfg(test)]
mod tests;
pub use self::{
client::{ClientIo, Connect, ConnectFuture, NewClient},
creds::watch,
server::{Server, ServerIo, TerminateFuture},
};
use linkerd_dns_name as dns;
use linkerd_error::{Error, Result};
use linkerd_identity as id;
use std::str::FromStr;
#[cfg(feature = "boring")]
pub use linkerd_meshtls_boring as boring;
#[cfg(feature = "rustls")]
pub use linkerd_meshtls_rustls as rustls;
#[derive(Copy, Clone, Debug)]
pub enum Mode {
#[cfg(feature = "boring")]
Boring,
#[cfg(feature = "rustls")]
Rustls,
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
}
#[cfg(not(feature = "__has_any_tls_impls"))]
#[macro_export]
macro_rules! no_tls {
($($field:ident),*) => {
{
$(
let _ = $field;
)*
unreachable!("compiled without any TLS implementations enabled!");
}
};
}
// === impl Mode ===
#[cfg(feature = "rustls")]
impl Default for Mode {
fn default() -> Self {
Self::Rustls
}
}
// FIXME(ver) We should have a way to opt into boring by configuration when both are enabled.
#[cfg(all(feature = "boring", not(feature = "rustls")))]
impl Default for Mode {
fn default() -> Self {
Self::Boring
}
}
#[cfg(not(feature = "__has_any_tls_impls"))]
impl Default for Mode {
fn default() -> Self {
Self::NoTls
}
}
impl Mode {
pub fn watch(
self,
local_id: id::Id,
server_name: dns::Name,
roots_pem: &str,
) -> Result<(creds::Store, creds::Receiver)> {
match self {
#[cfg(feature = "boring")]
Self::Boring => {
let (store, receiver) = boring::creds::watch(local_id, server_name, roots_pem)?;
Ok((
creds::Store::Boring(store),
creds::Receiver::Boring(receiver),
))
}
#[cfg(feature = "rustls")]
Self::Rustls => {
let (store, receiver) = rustls::creds::watch(local_id, server_name, roots_pem)?;
Ok((
creds::Store::Rustls(store),
creds::Receiver::Rustls(receiver),
))
}
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => no_tls!(local_id, server_name, roots_pem),
}
}
}
impl FromStr for Mode {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
#[cfg(feature = "boring")]
if s.eq_ignore_ascii_case("boring") {
return Ok(Self::Boring);
}
#[cfg(feature = "rustls")]
if s.eq_ignore_ascii_case("rustls") {
return Ok(Self::Rustls);
}
Err(format!("unknown TLS backend: {s}").into())
}
}
impl std::fmt::Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
#[cfg(feature = "boring")]
Self::Boring => "boring".fmt(f),
#[cfg(feature = "rustls")]
Self::Rustls => "rustls".fmt(f),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => no_tls!(f),
}
}
}

View File

@ -1,159 +1,137 @@
use linkerd_error::Result;
use futures::prelude::*;
use linkerd_dns_name as dns;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{Param, Service};
use linkerd_tls::{ServerName, ServerTls};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
#[cfg(feature = "boring")]
use crate::boring;
#[cfg(feature = "rustls")]
use crate::rustls;
#[cfg(not(feature = "__has_any_tls_impls"))]
use std::marker::PhantomData;
use linkerd_tls::{ClientId, NegotiatedProtocol, NegotiatedProtocolRef, ServerName, ServerTls};
use std::{pin::Pin, sync::Arc, task::Context};
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{pki_types::CertificateDer, ServerConfig};
use tracing::debug;
/// A Service that terminates TLS connections using a dynamically updated server configuration.
#[derive(Clone)]
pub enum Server {
#[cfg(feature = "boring")]
Boring(boring::Server),
#[cfg(feature = "rustls")]
Rustls(rustls::Server),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
pub struct Server {
name: dns::Name,
rx: watch::Receiver<Arc<ServerConfig>>,
}
#[pin_project::pin_project(project = TerminateFutureProj)]
pub enum TerminateFuture<I> {
#[cfg(feature = "boring")]
Boring(#[pin] boring::TerminateFuture<I>),
pub type TerminateFuture<I> = futures::future::MapOk<
tokio_rustls::Accept<I>,
fn(tokio_rustls::server::TlsStream<I>) -> (ServerTls, ServerIo<I>),
>;
#[cfg(feature = "rustls")]
Rustls(#[pin] rustls::TerminateFuture<I>),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls(PhantomData<fn(I)>),
}
#[pin_project::pin_project(project = ServerIoProj)]
#[derive(Debug)]
pub enum ServerIo<I> {
#[cfg(feature = "boring")]
Boring(#[pin] boring::ServerIo<I>),
pub struct ServerIo<I>(tokio_rustls::server::TlsStream<I>);
#[cfg(feature = "rustls")]
Rustls(#[pin] rustls::ServerIo<I>),
#[derive(Debug, Error)]
#[error("credential store lost")]
pub struct LostStore(());
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls(PhantomData<fn(I)>),
}
impl Server {
pub(crate) fn new(name: dns::Name, rx: watch::Receiver<Arc<ServerConfig>>) -> Self {
Self { name, rx }
}
// === impl Server ===
#[cfg(test)]
pub(crate) fn config(&self) -> Arc<ServerConfig> {
(*self.rx.borrow()).clone()
}
impl Param<ServerName> for Server {
#[inline]
fn param(&self) -> ServerName {
match self {
#[cfg(feature = "boring")]
Self::Boring(srv) => srv.param(),
#[cfg(feature = "rustls")]
Self::Rustls(srv) => srv.param(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
/// Spawns a background task that watches for TLS configuration updates and creates an augmented
/// configuration with the provided ALPN protocols. The returned server uses this ALPN-aware
/// configuration.
pub fn spawn_with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self, LostStore> {
if alpn_protocols.is_empty() {
return Ok(self);
}
let mut orig_rx = self.rx;
let mut c = (**orig_rx.borrow_and_update()).clone();
c.alpn_protocols.clone_from(&alpn_protocols);
let (tx, rx) = watch::channel(c.into());
// Spawn a background task that watches the optional server configuration and publishes it
// as a reliable channel, including any ALPN overrides.
//
// The background task completes when the original sender is closed or when all receivers
// are dropped.
tokio::spawn(async move {
loop {
tokio::select! {
_ = tx.closed() => {
debug!("ALPN TLS config receivers dropped");
return;
}
res = orig_rx.changed() => {
if res.is_err() {
debug!("TLS config sender closed");
return;
}
}
}
let mut c = (*orig_rx.borrow().clone()).clone();
c.alpn_protocols.clone_from(&alpn_protocols);
let _ = tx.send(c.into());
}
});
Ok(Self::new(self.name, rx))
}
}
impl Server {
pub fn with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self> {
match self {
#[cfg(feature = "boring")]
Self::Boring(srv) => Ok(Self::Boring(srv.with_alpn(alpn_protocols))),
#[cfg(feature = "rustls")]
Self::Rustls(srv) => srv
.spawn_with_alpn(alpn_protocols)
.map(Self::Rustls)
.map_err(Into::into),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(alpn_protocols),
}
impl Param<ServerName> for Server {
fn param(&self) -> ServerName {
ServerName(self.name.clone())
}
}
impl<I> Service<I> for Server
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
I: io::AsyncRead + io::AsyncWrite + Send + Unpin,
{
type Response = (ServerTls, ServerIo<I>);
type Error = io::Error;
type Error = std::io::Error;
type Future = TerminateFuture<I>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self {
#[cfg(feature = "boring")]
Self::Boring(svc) => <boring::Server as Service<I>>::poll_ready(svc, cx),
#[cfg(feature = "rustls")]
Self::Rustls(svc) => <rustls::Server as Service<I>>::poll_ready(svc, cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, io: I) -> Self::Future {
match self {
#[cfg(feature = "boring")]
Self::Boring(svc) => TerminateFuture::Boring(svc.call(io)),
tokio_rustls::TlsAcceptor::from((*self.rx.borrow()).clone())
.accept(io)
.map_ok(|io| {
// Determine the peer's identity, if it exist.
let client_id = client_identity(&io);
#[cfg(feature = "rustls")]
Self::Rustls(svc) => TerminateFuture::Rustls(svc.call(io)),
let negotiated_protocol = io
.get_ref()
.1
.alpn_protocol()
.map(|b| NegotiatedProtocol(b.into()));
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(io),
}
debug!(client.id = ?client_id, alpn = ?negotiated_protocol, "Accepted TLS connection");
let tls = ServerTls::Established {
client_id,
negotiated_protocol,
};
(tls, ServerIo(io))
})
}
}
// === impl TerminateFuture ===
fn client_identity<I>(tls: &tokio_rustls::server::TlsStream<I>) -> Option<ClientId> {
let (_io, session) = tls.get_ref();
let certs = session.peer_certificates()?;
let c = certs.first().map(CertificateDer::as_ref)?;
impl<I> Future for TerminateFuture<I>
where
I: io::AsyncRead + io::AsyncWrite + Unpin,
{
type Output = io::Result<(ServerTls, ServerIo<I>)>;
#[inline]
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "boring")]
TerminateFutureProj::Boring(f) => {
let res = futures::ready!(f.poll(cx));
Poll::Ready(res.map(|(tls, io)| (tls, ServerIo::Boring(io))))
}
#[cfg(feature = "rustls")]
TerminateFutureProj::Rustls(f) => {
let res = futures::ready!(f.poll(cx));
Poll::Ready(res.map(|(tls, io)| (tls, ServerIo::Rustls(io))))
}
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
}
verifier::client_identity(c).map(ClientId)
}
// === impl ServerIo ===
@ -161,105 +139,59 @@ where
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
#[inline]
fn poll_read(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
match self.project() {
#[cfg(feature = "boring")]
ServerIoProj::Boring(io) => io.poll_read(cx, buf),
#[cfg(feature = "rustls")]
ServerIoProj::Rustls(io) => io.poll_read(cx, buf),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, buf),
}
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
match self.project() {
#[cfg(feature = "boring")]
ServerIoProj::Boring(io) => io.poll_flush(cx),
#[cfg(feature = "rustls")]
ServerIoProj::Rustls(io) => io.poll_flush(cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
match self.project() {
#[cfg(feature = "boring")]
ServerIoProj::Boring(io) => io.poll_shutdown(cx),
#[cfg(feature = "rustls")]
ServerIoProj::Rustls(io) => io.poll_shutdown(cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
match self.project() {
#[cfg(feature = "boring")]
ServerIoProj::Boring(io) => io.poll_write(cx, buf),
#[cfg(feature = "rustls")]
ServerIoProj::Rustls(io) => io.poll_write(cx, buf),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, buf),
}
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
match self.project() {
#[cfg(feature = "boring")]
ServerIoProj::Boring(io) => io.poll_write_vectored(cx, bufs),
#[cfg(feature = "rustls")]
ServerIoProj::Rustls(io) => io.poll_write_vectored(cx, bufs),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, bufs),
}
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
match self {
#[cfg(feature = "boring")]
Self::Boring(io) => io.is_write_vectored(),
self.0.is_write_vectored()
}
}
#[cfg(feature = "rustls")]
Self::Rustls(io) => io.is_write_vectored(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
impl<I> ServerIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.get_ref()
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
match self {
#[cfg(feature = "boring")]
Self::Boring(io) => io.peer_addr(),
#[cfg(feature = "rustls")]
Self::Rustls(io) => io.peer_addr(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
self.0.get_ref().0.peer_addr()
}
}

Some files were not shown because too many files have changed in this diff Show More