Compare commits

...

101 Commits

Author SHA1 Message Date
katelyn martin e8cc4ec47b
nit(app): remove frivolous code (#4094)
this commit removes a piece of code that has been commented out.

it additionally removes a variable binding that is not needed. `dst` is
not moved, so we do not need to bind the address of the destination
service to a variable, nor do we need to clone it.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-20 11:31:09 -04:00
dependabot[bot] b951a6c374
build(deps): bump tempfile from 3.20.0 to 3.21.0 (#4093)
Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.20.0 to 3.21.0.
- [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Stebalien/tempfile/commits)

---
updated-dependencies:
- dependency-name: tempfile
  dependency-version: 3.21.0
  dependency-type: indirect
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 10:13:45 -04:00
dependabot[bot] 7f6ac15f13
build(deps): bump cfg-if from 1.0.1 to 1.0.3 (#4092)
Bumps [cfg-if](https://github.com/rust-lang/cfg-if) from 1.0.1 to 1.0.3.
- [Release notes](https://github.com/rust-lang/cfg-if/releases)
- [Changelog](https://github.com/rust-lang/cfg-if/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cfg-if/compare/v1.0.1...v1.0.3)

---
updated-dependencies:
- dependency-name: cfg-if
  dependency-version: 1.0.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 10:13:04 -04:00
dependabot[bot] 75e9caaeae
build(deps): bump thiserror from 2.0.15 to 2.0.16 (#4091)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.15 to 2.0.16.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.15...2.0.16)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-version: 2.0.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-20 10:12:43 -04:00
dependabot[bot] 02bbb3d617
build(deps): bump prettyplease from 0.2.36 to 0.2.37 (#4090)
Bumps [prettyplease](https://github.com/dtolnay/prettyplease) from 0.2.36 to 0.2.37.
- [Release notes](https://github.com/dtolnay/prettyplease/releases)
- [Commits](https://github.com/dtolnay/prettyplease/compare/0.2.36...0.2.37)

---
updated-dependencies:
- dependency-name: prettyplease
  dependency-version: 0.2.37
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:53 -04:00
dependabot[bot] 103c69ca75
build(deps): bump serde_json from 1.0.142 to 1.0.143 (#4088)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.142 to 1.0.143.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.142...v1.0.143)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-version: 1.0.143
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:39 -04:00
dependabot[bot] 4663cc4eb6
build(deps): bump tinyvec from 1.9.0 to 1.10.0 (#4087)
Bumps [tinyvec](https://github.com/Lokathor/tinyvec) from 1.9.0 to 1.10.0.
- [Changelog](https://github.com/Lokathor/tinyvec/blob/main/CHANGELOG.md)
- [Commits](https://github.com/Lokathor/tinyvec/compare/v1.9.0...v1.10.0)

---
updated-dependencies:
- dependency-name: tinyvec
  dependency-version: 1.10.0
  dependency-type: indirect
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:31 -04:00
dependabot[bot] 03374b9543
build(deps): bump hyper from 1.6.0 to 1.7.0 (#4089)
Bumps [hyper](https://github.com/hyperium/hyper) from 1.6.0 to 1.7.0.
- [Release notes](https://github.com/hyperium/hyper/releases)
- [Changelog](https://github.com/hyperium/hyper/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper/compare/v1.6.0...v1.7.0)

---
updated-dependencies:
- dependency-name: hyper
  dependency-version: 1.7.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 15:40:03 -04:00
Scott Fleener 4c9ae74450 chore(metrics): Use `linkerd-rustls` for crypto provider metrics
Now that the `rustls` initialization/configuration has been decoupled from `linkerd-meshtls`, we can get the provider directly from there. This handles the uninitialized case better, which should be less of a problem now that we always directly initialize the provider in main.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-19 14:06:32 -04:00
katelyn martin 94572d174d
refactor(http/classify): remove unused classification middleware (#4085)
`NewBroadcastClassification<C, X, N>` is not used.

`BroadcastClassification<C, S>` is only used by the `gate` submodule in
this crate.

this commit removes `NewBroadcastClassification`, since it is unused.
this commit demotes `channel` to an internal submodule, since it has no
external users.

the reëxport of `BroadcastClassification` is unused, though it is left
intact because it _is_ exposed by `NewClassifyGateSet`'s implementation
of `NewService<T>`.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-19 09:33:29 -04:00
katelyn martin 897c7e85bc
refactor(app/core): remove unused `gate` reëxport (#4084)
`linkerd_app_core::classify` reëxports symbols from
`linkerd_proxy_http::classify::gate`.

nothing makes use of this, and these symbols are already reëxported from
`linkerd_proxy_http::classify`. existing callsites in the outbound proxy
import this middleware directly, or though the reëxport in
`linkerd_proxy_http`.

this commit removes this `pub use` directive, since it does nothing.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-18 20:58:18 +00:00
Scott Fleener 036ca75c00
chore(tls): Install default rustls provider in main (#4083)
* chore(tls): Move rustls into dedicated crate

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Remove extraneous provider installs from tests

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Install default rustls provider in main

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 19:55:09 +00:00
Scott Fleener 98e731d841
chore(tls): Move `rustls` configuration into dedicated crate (#4082)
* chore(tls): Move rustls into dedicated crate

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Remove extraneous provider installs from tests

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 12:48:28 -07:00
Scott Fleener d5b12ea5e2 chore(tls): Hoist rustls directly into linkerd-meshtls
Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 15:37:05 -04:00
Scott Fleener a64170bd61
chore(tls): Refactor `rustls` shims out of `linkerd-meshtls` (#4080)
* chore(tls): Refactor shims out of meshtls

Meshtls previously assumed that mutliple TLS implementations could be used. Now that we've consolidated on rustls as the TLS implementation, we can remove these shims.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Refactor mode out of meshtls

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 12:27:09 -07:00
katelyn martin 973dfa6f4d
refactor(app/inbound): remove unused `proxy_metrics()` method (#4079)
this commit removes `linkerd_app_inbound::Inbound::proxy_metrics()`.

this accessor is not used anywhere.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-18 14:36:31 -04:00
Scott Fleener 17bff6144a
feat(tls): Explicitly include post-quantum key exchange algorithms (#4070)
* feat(tls): Explicitly include post-quantum key exchange algorithms

This explicitly sets the key exchange algorithms the proxy uses. It adds `X25519MLKEM768` as the most preferred algorithm in non-FIPS mode, and `SECP256R1MLKEM768` in FIPS mode.

Note that `X25519MLKEM768` is still probably appropriate for FIPS environments according to [NIST's special publication 800-56Cr2](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Cr2.pdf) as it performs a FIPS-approved key-establishment first (`MLKEM768`), but we should evaluate this position more before committing to it.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-18 14:09:34 -04:00
katelyn martin d385094caa
nit(app/inbound): fix `InboundMetrics` doc comment (#4078)
this comment changes this comment in two ways:

1. fix a copy-paste typo. this should say "inbound", not "outbound".
2. add note that this is a "legacy" structure.

the equivalent structure in the outbound proxy was labeled as such
in https://github.com/linkerd/linkerd2-proxy/pull/2887.

see:

```rust
 /// Holds LEGACY outbound proxy metrics.
 #[derive(Clone, Debug)]
 pub struct OutboundMetrics {
     pub(crate) http_errors: error::Http,
     pub(crate) tcp_errors: error::Tcp,

     // pub(crate) http_route_backends: RouteBackendMetrics,
     // pub(crate) grpc_route_backends: RouteBackendMetrics,
     /// Holds metrics that are common to both inbound and outbound proxies. These metrics are
     /// reported separately
     pub(crate) proxy: Proxy,

     pub(crate) prom: PromMetrics,
 }
```

\- <dce6b61191/linkerd/app/outbound/src/metrics.rs (L22-L35)>

`authz::HttpAuthzMetrics`, `error::HttpErrorMetrics`,
`authz::TcpAuthzMetrics`, and `error::TcpErrorMetrics` all make use of
the "legacy" metrics implementation defined in `linkerd_metrics`.

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-18 14:01:37 -04:00
dependabot[bot] dce6b61191
build(deps): bump syn from 2.0.105 to 2.0.106 (#4077)
Bumps [syn](https://github.com/dtolnay/syn) from 2.0.105 to 2.0.106.
- [Release notes](https://github.com/dtolnay/syn/releases)
- [Commits](https://github.com/dtolnay/syn/compare/2.0.105...2.0.106)

---
updated-dependencies:
- dependency-name: syn
  dependency-version: 2.0.106
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 12:12:30 -04:00
dependabot[bot] 28ebc47a6b
build(deps): bump cc from 1.2.32 to 1.2.33 (#4076)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.32 to 1.2.33.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.32...cc-v1.2.33)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.33
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 11:38:09 -04:00
dependabot[bot] 4bae7e98f2
build(deps): bump proc-macro2 from 1.0.97 to 1.0.101 (#4075)
Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.97 to 1.0.101.
- [Release notes](https://github.com/dtolnay/proc-macro2/releases)
- [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.97...1.0.101)

---
updated-dependencies:
- dependency-name: proc-macro2
  dependency-version: 1.0.101
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 11:37:49 -04:00
dependabot[bot] b89c4902c6
build(deps): bump thiserror from 2.0.14 to 2.0.15 (#4074)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.14 to 2.0.15.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.14...2.0.15)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-version: 2.0.15
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 11:26:07 -04:00
dependabot[bot] 8a80f1ce95
build(deps): bump async-trait from 0.1.88 to 0.1.89 (#4073)
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.88 to 0.1.89.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.88...0.1.89)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-version: 0.1.89
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 10:47:36 -04:00
dependabot[bot] edc35d6e18
build(deps): bump derive_arbitrary from 1.4.1 to 1.4.2 (#4072)
Bumps [derive_arbitrary](https://github.com/rust-fuzz/arbitrary) from 1.4.1 to 1.4.2.
- [Changelog](https://github.com/rust-fuzz/arbitrary/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-fuzz/arbitrary/compare/v1.4.1...v1.4.2)

---
updated-dependencies:
- dependency-name: derive_arbitrary
  dependency-version: 1.4.2
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 10:44:51 -04:00
dependabot[bot] 99f322a9a0
build(deps): bump arbitrary from 1.4.1 to 1.4.2 (#4071)
Bumps [arbitrary](https://github.com/rust-fuzz/arbitrary) from 1.4.1 to 1.4.2.
- [Changelog](https://github.com/rust-fuzz/arbitrary/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-fuzz/arbitrary/compare/v1.4.1...v1.4.2)

---
updated-dependencies:
- dependency-name: arbitrary
  dependency-version: 1.4.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-18 07:26:46 -07:00
Scott Fleener 627a5aad21
feat(tls): Remove boring as a TLS implementation (#4038)
* chore(tls): Remove ring as crypto backend

The broader ecosystem has mostly moved to aws-lc-rs as the primary rustls backend, and we should follow suit. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

There will need to be some refactoring to clean up the rustls provider interfaces, but that will come in follow-ups.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* feat(tls): Remove boring as a TLS implementation

BoringSSL, as we use it today, doesn't integrate well with the broader rustls ecosystem, so this removes it. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

There will need to be some refactoring to clean up the rustls provider interfaces, but that will come in follow-ups.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Restore existing aws-lc feature names for compatibility

Signed-off-by: Scott Fleener <scott@buoyant.io>

* fix(tls): Use correct feature name for fips conditionals

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-14 23:27:58 -04:00
Scott Fleener 356f80b786
chore(tls): Improve `aws-lc` usage (#4069)
This adds a few small improvements to how we handle the `aws-lc` usage in the proxy:

- Pull provider customization to the `aws-lc` backend, reducing the amount that the module exposes
- Validate that the provider is actually FIPS compatible when fips is enabled
- Use the same signature verification algorithms in the `rustls` provider as we do in the cert verifier. Previously, the provider also included RSA_PSS_2048_8192_SHA256, which is marked as legacy and we don't have a strong reason to support.
- Add change detector tests for the cipher suites, key exchange groups, and signature algorithms. These should ideally never change unless `rustls` changes, at which point we can re-evaluate which algorithms are in use by the proxy.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-14 18:56:35 +00:00
dependabot[bot] 0b3bc61263
build(deps): bump hashbrown from 0.15.2 to 0.15.5 (#4068)
Bumps [hashbrown](https://github.com/rust-lang/hashbrown) from 0.15.2 to 0.15.5.
- [Release notes](https://github.com/rust-lang/hashbrown/releases)
- [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/hashbrown/commits/v0.15.5)

---
updated-dependencies:
- dependency-name: hashbrown
  dependency-version: 0.15.5
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-14 11:30:16 -04:00
Scott Fleener 2b0e723027
chore(deps): Update tonic deps to 0.13 (#4066)
* chore(deps): Update tonic deps to 0.13

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(deps): Update linkerd2-proxy-api to 0.17.0

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-13 10:45:42 -07:00
Scott Fleener 2156c3d5e3
chore(tls): Remove `ring` as `rustls` crypto backend (#4029)
* chore(tls): Remove ring as crypto backend

The broader ecosystem has mostly moved to aws-lc-rs as the primary rustls backend, and we should follow suit. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

There will need to be some refactoring to clean up the rustls provider interfaces, but that will come in follow-ups.

Signed-off-by: Scott Fleener <scott@buoyant.io>

* chore(tls): Restore existing aws-lc feature names for compatibility

Signed-off-by: Scott Fleener <scott@buoyant.io>

* fix(tls): Use correct feature name for fips conditionals

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-13 13:13:57 -04:00
dependabot[bot] c4cae21e11
build(deps): bump syn from 2.0.104 to 2.0.105 (#4067)
Bumps [syn](https://github.com/dtolnay/syn) from 2.0.104 to 2.0.105.
- [Release notes](https://github.com/dtolnay/syn/releases)
- [Commits](https://github.com/dtolnay/syn/compare/2.0.104...2.0.105)

---
updated-dependencies:
- dependency-name: syn
  dependency-version: 2.0.105
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-13 10:09:16 -07:00
Scott Fleener 89c88caf5c
chore(deps): Manually remove unused dependencies (#4065)
Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-13 10:44:43 -04:00
dependabot[bot] bb612d3aac
build(deps): bump the symbolic group with 2 updates (#4063)
Bumps the symbolic group with 2 updates: [symbolic-common](https://github.com/getsentry/symbolic) and [symbolic-demangle](https://github.com/getsentry/symbolic).


Updates `symbolic-common` from 12.16.1 to 12.16.2
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.1...12.16.2)

Updates `symbolic-demangle` from 12.16.1 to 12.16.2
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.1...12.16.2)

---
updated-dependencies:
- dependency-name: symbolic-common
  dependency-version: 12.16.2
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
- dependency-name: symbolic-demangle
  dependency-version: 12.16.2
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 17:18:00 -07:00
dependabot[bot] 7030cc51ed
build(deps): bump anyhow from 1.0.98 to 1.0.99 (#4062)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.98 to 1.0.99.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.98...1.0.99)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-version: 1.0.99
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:39:04 +00:00
dependabot[bot] af520dfd12
build(deps): bump proc-macro2 from 1.0.96 to 1.0.97 (#4061)
Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.96 to 1.0.97.
- [Release notes](https://github.com/dtolnay/proc-macro2/releases)
- [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.96...1.0.97)

---
updated-dependencies:
- dependency-name: proc-macro2
  dependency-version: 1.0.97
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 12:33:58 -04:00
dependabot[bot] ccf91dfb3e
build(deps): bump actions/checkout from 4.3.0 to 5.0.0 (#4060)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.3.0 to 5.0.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](08eba0b27e...08c6903cd8)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 16:18:00 +00:00
dependabot[bot] 69cd164da1
build(deps): bump thiserror from 2.0.12 to 2.0.14 (#4059)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.12 to 2.0.14.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.12...2.0.14)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-version: 2.0.14
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:56:38 +00:00
dependabot[bot] feb5f87713
build(deps): bump libc from 0.2.174 to 0.2.175 (#4057)
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.174 to 0.2.175.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.175/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.174...0.2.175)

---
updated-dependencies:
- dependency-name: libc
  dependency-version: 0.2.175
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 12:14:04 -04:00
dependabot[bot] fdd7f218a3
build(deps): bump actions/checkout from 4.2.2 to 4.3.0 (#4053)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 4.3.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](11bd71901b...08eba0b27e)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 4.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 11:06:10 -04:00
dependabot[bot] b4e2b7e24f
build(deps): bump glob from 0.3.2 to 0.3.3 (#4055)
Bumps [glob](https://github.com/rust-lang/glob) from 0.3.2 to 0.3.3.
- [Release notes](https://github.com/rust-lang/glob/releases)
- [Changelog](https://github.com/rust-lang/glob/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/glob/compare/v0.3.2...v0.3.3)

---
updated-dependencies:
- dependency-name: glob
  dependency-version: 0.3.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 11:05:55 -04:00
dependabot[bot] 1b07f277d7
build(deps): bump rustversion from 1.0.21 to 1.0.22 (#4056)
Bumps [rustversion](https://github.com/dtolnay/rustversion) from 1.0.21 to 1.0.22.
- [Release notes](https://github.com/dtolnay/rustversion/releases)
- [Commits](https://github.com/dtolnay/rustversion/compare/1.0.21...1.0.22)

---
updated-dependencies:
- dependency-name: rustversion
  dependency-version: 1.0.22
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 11:05:34 -04:00
dependabot[bot] 25cf0c7f11
build(deps): bump proc-macro2 from 1.0.95 to 1.0.96 (#4054)
Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.95 to 1.0.96.
- [Release notes](https://github.com/dtolnay/proc-macro2/releases)
- [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.95...1.0.96)

---
updated-dependencies:
- dependency-name: proc-macro2
  dependency-version: 1.0.96
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 14:52:05 +00:00
dependabot[bot] d46e7c0c82
build(deps): bump cc from 1.2.31 to 1.2.32 (#4051)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.31 to 1.2.32.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.31...cc-v1.2.32)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.32
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-08 13:28:21 -04:00
dependabot[bot] 3305a890b0
build(deps): bump slab from 0.4.10 to 0.4.11 (#4052)
Bumps [slab](https://github.com/tokio-rs/slab) from 0.4.10 to 0.4.11.
- [Release notes](https://github.com/tokio-rs/slab/releases)
- [Changelog](https://github.com/tokio-rs/slab/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/slab/compare/v0.4.10...v0.4.11)

---
updated-dependencies:
- dependency-name: slab
  dependency-version: 0.4.11
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-08 13:28:01 -04:00
katelyn martin d850fa6f73
refactor(metrics): introduce a `legacy` namespace (#4050)
`linkerd-metrics` contains a suite of facilities for defining,
registering, and serving Prometheus metrics. these predate the
[`prometheus-client`](https://crates.io/crates/prometheus-client/)
crate, which should now be used for our metrics.

`linkerd-metrics` defines a `prom` namespace, which reëxports symbols
from the `prometheus-client` library. as the documentation comment for
this submodule notes, this should be used for all new metrics.

6b323d8457/linkerd/metrics/src/lib.rs (L30-L60)

`linkerd-metrics` still provides its legacy types in the public surface
of this library today, which can make it difficult to differentiate
between our two metrics implementations.

this branch introduces a new `legacy` namespace, to help clarify the
distinction between these two Prometheus implementations, and to smooth
the road to further adoption of `prometheus-client` interfaces across
the proxy.

---

* refactor(metrics): introduce empty `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Counter` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Gauge` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Histogram` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Metric` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `FmtMetric` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `FmtMetrics` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `FmtLabels` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `LastUpdate` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Store` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `SharedStore` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Serve` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `NewMetrics` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor: move `Factor` into `legacy` namespace

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-08-07 15:33:17 -04:00
dependabot[bot] 6b323d8457
build(deps): bump governor from 0.10.0 to 0.10.1 (#4049)
Bumps [governor](https://github.com/boinkor-net/governor) from 0.10.0 to 0.10.1.
- [Release notes](https://github.com/boinkor-net/governor/releases)
- [Changelog](https://github.com/boinkor-net/governor/blob/master/release.toml)
- [Commits](https://github.com/boinkor-net/governor/compare/v0.10.0...v0.10.1)

---
updated-dependencies:
- dependency-name: governor
  dependency-version: 0.10.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-07 10:37:18 -04:00
dependabot[bot] 7f58cd56ed
build(deps): bump tokio-metrics from 0.4.3 to 0.4.4 (#4048)
Bumps [tokio-metrics](https://github.com/tokio-rs/tokio-metrics) from 0.4.3 to 0.4.4.
- [Release notes](https://github.com/tokio-rs/tokio-metrics/releases)
- [Changelog](https://github.com/tokio-rs/tokio-metrics/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/tokio-metrics/commits)

---
updated-dependencies:
- dependency-name: tokio-metrics
  dependency-version: 0.4.4
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-07 10:37:00 -04:00
Scott Fleener 43e3f630ec feat(tls): Include rustls crypto provider metrics
This includes a small set of metrics about the currently installed rustls crypto provider and the algorithms it is configured to use.

We don't have 100% assurance that a default crypto provider has been installed before registering the metric, but in local testing it never appeared to be a problem. When we refactor the rustls initialization we can add an extra guarantee that we've initialized it by this point.

Example metric:
```
# HELP rustls_info Proxy TLS info.
# TYPE rustls_info gauge
rustls_info{tls_suites="TLS13_AES_128_GCM_SHA256,TLS13_AES_256_GCM_SHA384,TLS13_CHACHA20_POLY1305_SHA256,",tls_kx_groups="X25519,secp256r1,secp384r1,X25519MLKEM768,",tls_rand="AwsLcRs",tls_key_provider="AwsLcRs",tls_fips="false"} 1
```

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-07 10:01:07 -04:00
Oliver Gould 7758436831
chore(ci): exercise all release platforms on workflow change (#4047)
We only build linux/amd64 during typical release CI runs. This means that the platform-
specific builds are not exercised. This change updates the release workflow so that
all platforms are built whenever the workflow itself is changed.
2025-08-06 14:29:05 +00:00
dependabot[bot] c0f921af33
build(deps): bump zerovec from 0.11.3 to 0.11.4 (#4046)
Bumps [zerovec](https://github.com/unicode-org/icu4x) from 0.11.3 to 0.11.4.
- [Release notes](https://github.com/unicode-org/icu4x/releases)
- [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md)
- [Commits](https://github.com/unicode-org/icu4x/compare/ind/zerovec@0.11.3...ind/zerovec@0.11.4)

---
updated-dependencies:
- dependency-name: zerovec
  dependency-version: 0.11.4
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-06 09:48:31 -04:00
Scott Fleener b558ce5320
chore(meshtls-rustls): use aws-lc as the default crypto backend (#4043)
The broader ecosystem has mostly moved to `aws-lc-rs` as the primary `rustls` backend, and we should follow suit. This will also simplify the maintenance of the proxy's TLS implementation in the long term.

This requires some extra configuration for successful cross-compilation, ideally we can remove this extra configuration once linkerd/dev v48 is available.

This doesn't remove `ring` as a crypto backend, that can come in a follow-up at https://github.com/linkerd/linkerd2-proxy/pull/4029
2025-08-05 13:22:26 -07:00
dependabot[bot] 894d3506df
build(deps): bump socket2 from 0.5.10 to 0.6.0 (#4003)
* build(deps): bump socket2 from 0.5.10 to 0.6.0

Bumps [socket2](https://github.com/rust-lang/socket2) from 0.5.10 to 0.6.0.
- [Release notes](https://github.com/rust-lang/socket2/releases)
- [Changelog](https://github.com/rust-lang/socket2/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/socket2/commits)

---
updated-dependencies:
- dependency-name: socket2
  dependency-version: 0.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore(proxy/transport): use `original_dst_v*` methods

these have been renamed.

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: katelyn martin <kate@buoyant.io>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: katelyn martin <kate@buoyant.io>
2025-08-05 13:09:56 -07:00
dependabot[bot] 01e7ec0820
build(deps): bump zerovec from 0.11.2 to 0.11.3 (#4044)
Bumps [zerovec](https://github.com/unicode-org/icu4x) from 0.11.2 to 0.11.3.
- [Release notes](https://github.com/unicode-org/icu4x/releases)
- [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md)
- [Commits](https://github.com/unicode-org/icu4x/commits/ind/zerovec@0.11.3)

---
updated-dependencies:
- dependency-name: zerovec
  dependency-version: 0.11.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 13:00:54 -07:00
dependabot[bot] 4f563fab68
build(deps): bump tokio-metrics from 0.4.2 to 0.4.3 (#3995)
Bumps [tokio-metrics](https://github.com/tokio-rs/tokio-metrics) from 0.4.2 to 0.4.3.
- [Release notes](https://github.com/tokio-rs/tokio-metrics/releases)
- [Changelog](https://github.com/tokio-rs/tokio-metrics/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/tokio-metrics/compare/v0.4.2...v0.4.3)

---
updated-dependencies:
- dependency-name: tokio-metrics
  dependency-version: 0.4.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 07:50:44 -07:00
dependabot[bot] 168c4bff7d
build(deps): bump tokio from 1.45.0 to 1.47.1 (#4040)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.45.0 to 1.47.1.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.45.0...tokio-1.47.1)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.47.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 09:44:53 -04:00
dependabot[bot] 0df8cdbedb
build(deps): bump cc from 1.2.30 to 1.2.31 (#4042)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.30 to 1.2.31.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.30...cc-v1.2.31)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.31
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 14:59:55 -07:00
dependabot[bot] 1b837b7f91
build(deps): bump tokio-util from 0.7.15 to 0.7.16 (#4041)
Bumps [tokio-util](https://github.com/tokio-rs/tokio) from 0.7.15 to 0.7.16.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-util-0.7.15...tokio-util-0.7.16)

---
updated-dependencies:
- dependency-name: tokio-util
  dependency-version: 0.7.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 14:59:00 -07:00
katelyn martin 40078c96ca
chore(deps): update from tokio 1.45 to 1.47 (#4032)
* build(deps): bump tokio from 1.45.0 to 1.47.0

Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.45.0 to 1.47.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.45.0...tokio-1.47.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.47.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

---

chore(deny): ignore socket2@v0.5

there is now a v0.6 used by the latest tokio.

while we wait for this new version to propagate through the ecosystem,
allow for two socket2 dependencies.

Signed-off-by: katelyn martin <kate@buoyant.io>

* chore(app/integration): remove inbound_io_err test

> @cratelyn I think it would be appropriate to remove these tests, given
> that they can no longer behave properly. I don't think that this test
> case is particularly meaningful or load bearing, it's best just to
> unblock the dependency updates.

\- <https://github.com/BuoyantIO/enterprise-linkerd/issues/1645#issuecomment-3046905516>

Co-authored-by: Oliver Gould <ver@buoyant.io>
Signed-off-by: katelyn martin <kate@buoyant.io>

* chore(app/integration): remove inbound_multi test

this test exercises the same thing that the previous two tests do, as
the comment at the top of it points out.

this test is redundant, and we have removed the i/o error coverage that
this was redunant with. let's remove it.

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
Co-authored-by: Oliver Gould <ver@buoyant.io>
2025-08-04 09:04:02 -07:00
dependabot[bot] 9eaf1425a7
build(deps): bump signal-hook-registry from 1.4.5 to 1.4.6 (#4039)
Bumps [signal-hook-registry](https://github.com/vorner/signal-hook) from 1.4.5 to 1.4.6.
- [Changelog](https://github.com/vorner/signal-hook/blob/master/CHANGELOG.md)
- [Commits](https://github.com/vorner/signal-hook/compare/registry-v1.4.5...registry-v1.4.6)

---
updated-dependencies:
- dependency-name: signal-hook-registry
  dependency-version: 1.4.6
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 08:08:41 -07:00
dependabot[bot] 842452368c
build(deps): bump rustc-demangle from 0.1.25 to 0.1.26 (#4026)
Bumps [rustc-demangle](https://github.com/rust-lang/rustc-demangle) from 0.1.25 to 0.1.26.
- [Release notes](https://github.com/rust-lang/rustc-demangle/releases)
- [Changelog](https://github.com/rust-lang/rustc-demangle/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/rustc-demangle/commits/rustc-demangle-v0.1.26)

---
updated-dependencies:
- dependency-name: rustc-demangle
  dependency-version: 0.1.26
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 11:07:17 -04:00
dependabot[bot] 09333dc2b2
build(deps): bump the symbolic group with 2 updates (#4035)
Bumps the symbolic group with 2 updates: [symbolic-common](https://github.com/getsentry/symbolic) and [symbolic-demangle](https://github.com/getsentry/symbolic).


Updates `symbolic-common` from 12.16.0 to 12.16.1
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.0...12.16.1)

Updates `symbolic-demangle` from 12.16.0 to 12.16.1
- [Release notes](https://github.com/getsentry/symbolic/releases)
- [Changelog](https://github.com/getsentry/symbolic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/symbolic/compare/12.16.0...12.16.1)

---
updated-dependencies:
- dependency-name: symbolic-common
  dependency-version: 12.16.1
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
- dependency-name: symbolic-demangle
  dependency-version: 12.16.1
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: symbolic
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Oliver Gould <ver@buoyant.io>
2025-08-01 18:28:55 +00:00
dependabot[bot] ddc847ccc4
build(deps): bump serde_json from 1.0.141 to 1.0.142 (#4036)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.141 to 1.0.142.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.141...v1.0.142)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-version: 1.0.142
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-01 11:23:49 -07:00
Scott Fleener 8d56746c1f
feat!(ci): Remove arm/v7 support (#4037)
This architecture has become too significant of a maintenance burden, and isn't used often enough to justify the associated maintenance cost.

This removes arm/v7 from all the build infrastructure/dockerfiles/etc. Note that arm64 targets are still widely used and well supported.

Related: https://github.com/linkerd/linkerd2/pull/14308

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-08-01 13:26:12 -04:00
dependabot[bot] db0ed46978
build(deps): bump rustls from 0.23.29 to 0.23.31 in the rustls group (#4034)
Bumps the rustls group with 1 update: [rustls](https://github.com/rustls/rustls).


Updates `rustls` from 0.23.29 to 0.23.31
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.23.29...v/0.23.31)

---
updated-dependencies:
- dependency-name: rustls
  dependency-version: 0.23.31
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: rustls
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-31 05:53:48 -07:00
katelyn martin 1d94082d4b
chore(deps): downgrade deranged from 0.4.1 to 0.4.0 (#4031)
- https://crates.io/crates/deranged/versions
- https://crates.io/crates/deranged/0.4.1

this version has been yanked. this commit addresses this cargo deny
warning:

```
warning[yanked]: detected yanked crate (try `cargo update -p deranged`)
   ┌─ /home/katie/linkerd/linkerd2-proxy/Cargo.lock:41:1
   │
41 │ deranged 0.4.1 registry+https://github.com/rust-lang/crates.io-index
   │ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ yanked version
   │
```

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-29 18:14:11 +00:00
dependabot[bot] 744e29e0bd
build(deps): bump rangemap from 1.5.1 to 1.6.0 (#4028)
Bumps [rangemap](https://github.com/jeffparsons/rangemap) from 1.5.1 to 1.6.0.
- [Changelog](https://github.com/jeffparsons/rangemap/blob/main/CHANGELOG.md)
- [Commits](https://github.com/jeffparsons/rangemap/commits)

---
updated-dependencies:
- dependency-name: rangemap
  dependency-version: 1.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-29 10:59:35 -04:00
katelyn martin 83373d6b89
chore(deps): bump h2 from 0.4.8 to 0.4.11 (#4024)
- https://github.com/hyperium/h2/compare/v0.4.8...v0.4.11

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-28 21:23:25 +00:00
Scott Fleener 1dcb7a7d1a
fix(rustls): Make `ring` and `aws-lc-rs` exclusive features (#4009)
Currently, disabling the `ring` feature does not actually disable the dependency across the tree. Doing so requires a couple of tightly coupled steps:

- Making `ring` and `aws-lc` exclusive features, raising a compile error if they are both enabled.
- Removing a direct dependency on some `ring` types, and instead going through `rustls` for equivalent functionality.
- Removing a direct dependency on the `ring` crypto provider for integration tests, and instead using the provider from `linkerd-meshtls`.
- Installing the default crypto provider globally for the process and re-using it when requested, mostly to make the tests pass.

This was tested using a temporary `cargo deny` config that forbid `ring` when `aws-lc-rs` was used, and vice-versa. Note that it doesn't completely remove ring for dev dependencies, but that can be done as a follow-up.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-07-28 15:28:47 -04:00
Scott Fleener 68a6b6d1e8
fix(tls): Update preferred cipher suite order (#4015)
This makes two changes to the preferred cipher suite order.
- Prefer AES algorithms over ChaCha20. AES is significantly faster when AES hardware is present, and AES hardware is on all x86 CPUs since ~2010, and all ARM server CPUs for a similar amount of time. For these reasons it's reasonable to default to AES for modern deployments, and it's the same default that `aws-lc-rs` makes anyway.
- Remove ChaCha20 when FIPS is enabled. It's no longer a supported algorithm, so we shouldn't have it as an option.

Signed-off-by: Scott Fleener <scott@buoyant.io>
2025-07-28 15:05:05 -04:00
Oliver Gould cdbb55fd53
build(linkerd2-proxy): make release artifacts auditable (#4023)
Auditing tools like Syft cannot inspect proxy dependencies, which makes it difficult to inspect the state of a binary. This change updates the release process to use cargo-auditable, which documents the proxy's crate dependencies in its release binary.
2025-07-28 17:00:06 +00:00
Oliver Gould 5997453393
build(deps): update linkerd/dev to v47 (#4022) 2025-07-28 11:49:06 -05:00
Joe 10643b9525
updating metrics descriptions (#4020)
Signed-off-by: Joe F <joe@buoyant.io>
2025-07-25 20:08:56 +00:00
dependabot[bot] fd0ea24b87
build(deps): bump serde_json from 1.0.140 to 1.0.141 (#4021)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.140 to 1.0.141.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.140...v1.0.141)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-version: 1.0.141
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-25 16:07:16 -04:00
dependabot[bot] 3a159be91a
build(deps): bump prettyplease from 0.2.35 to 0.2.36 (#4018)
Bumps [prettyplease](https://github.com/dtolnay/prettyplease) from 0.2.35 to 0.2.36.
- [Release notes](https://github.com/dtolnay/prettyplease/releases)
- [Commits](https://github.com/dtolnay/prettyplease/compare/0.2.35...0.2.36)

---
updated-dependencies:
- dependency-name: prettyplease
  dependency-version: 0.2.36
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-24 10:03:57 -04:00
dependabot[bot] b3bc6fe8cd
build(deps): bump hyper-util from 0.1.15 to 0.1.16 (#4019)
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.15 to 0.1.16.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.15...v0.1.16)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-24 10:02:38 -04:00
dependabot[bot] 5a00b70d11
build(deps): bump aws-lc-rs from 1.13.2 to 1.13.3 (#4016)
Bumps [aws-lc-rs](https://github.com/aws/aws-lc-rs) from 1.13.2 to 1.13.3.
- [Release notes](https://github.com/aws/aws-lc-rs/releases)
- [Commits](https://github.com/aws/aws-lc-rs/compare/v1.13.2...v1.13.3)

---
updated-dependencies:
- dependency-name: aws-lc-rs
  dependency-version: 1.13.3
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-23 07:02:52 -07:00
dependabot[bot] b91dd3e7af
build(deps): bump rcgen from 0.14.2 to 0.14.3 (#4017)
Bumps [rcgen](https://github.com/rustls/rcgen) from 0.14.2 to 0.14.3.
- [Release notes](https://github.com/rustls/rcgen/releases)
- [Commits](https://github.com/rustls/rcgen/compare/v0.14.2...v0.14.3)

---
updated-dependencies:
- dependency-name: rcgen
  dependency-version: 0.14.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-23 07:02:25 -07:00
dependabot[bot] a699b1cf58
build(deps): bump cc from 1.2.29 to 1.2.30 (#4014)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.29 to 1.2.30.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.29...cc-v1.2.30)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.30
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-22 09:35:32 -04:00
dependabot[bot] 9f3c45874e
build(deps): bump rand from 0.9.1 to 0.9.2 (#4013)
Bumps [rand](https://github.com/rust-random/rand) from 0.9.1 to 0.9.2.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/rand_core-0.9.1...rand_core-0.9.2)

---
updated-dependencies:
- dependency-name: rand
  dependency-version: 0.9.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-22 09:35:19 -04:00
Oliver Gould 1de179e178
chore(deps): update jemallocator 0.5 to tikv-jemallocator 0.6 (#4012)
tikv-jemallocator supersedes jemallocator. To enable jemalloc profiling, this change updates the dependency and adds a `jemalloc-profiling` feature so that profiling can be enabled at build time.
2025-07-18 17:42:41 -04:00
dependabot[bot] f9d7e08242
build(deps): bump the rustls group with 2 updates (#4010)
---
updated-dependencies:
- dependency-name: rustls-webpki
  dependency-version: 0.103.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: rustls
- dependency-name: rustls
  dependency-version: 0.23.29
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: rustls
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-18 07:48:31 -07:00
dependabot[bot] bd454b4be8
build(deps): bump the symbolic group with 2 updates (#4011)
---
updated-dependencies:
- dependency-name: symbolic-common
  dependency-version: 12.16.0
  dependency-type: indirect
  update-type: version-update:semver-minor
  dependency-group: symbolic
- dependency-name: symbolic-demangle
  dependency-version: 12.16.0
  dependency-type: indirect
  update-type: version-update:semver-minor
  dependency-group: symbolic
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-18 07:48:06 -07:00
Oliver Gould b1f8fa5419
chore(ci): enable overriding the runner in workflows (#4008)
We use the ubuntu-24.04 runner by default, but in forks this may not be appropriate. This change updates the runners to support overriding via the LINKERD2_PROXY_RUNNER variable.
2025-07-17 17:22:42 -07:00
dependabot[bot] e04947610a
build(deps): bump aws-lc-rs from 1.13.1 to 1.13.2 (#4006)
---
updated-dependencies:
- dependency-name: aws-lc-rs
  dependency-version: 1.13.2
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-17 11:05:10 -07:00
Oliver Gould 9c48e2471e
fix(app/env): limit default inbound connection pool size (#4007)
The inbound connection pool is effectively unlimited. This change configures a
default limit of 10K.
2025-07-17 16:47:32 +00:00
dependabot[bot] df38a5a2c9
build(deps): bump the rustls group across 1 directory with 3 updates (#3908)
* build(deps): bump the rustls group across 1 directory with 3 updates

Bumps the rustls group with 3 updates in the / directory: [rustls-webpki](https://github.com/rustls/webpki), [rustls](https://github.com/rustls/rustls) and [rustls-pki-types](https://github.com/rustls/pki-types).


Updates `rustls-webpki` from 0.103.1 to 0.103.2
- [Release notes](https://github.com/rustls/webpki/releases)
- [Commits](https://github.com/rustls/webpki/compare/v/0.103.1...v/0.103.2)

Updates `rustls` from 0.23.26 to 0.23.27
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.23.26...v/0.23.27)

Updates `rustls-pki-types` from 1.11.0 to 1.12.0
- [Release notes](https://github.com/rustls/pki-types/releases)
- [Commits](https://github.com/rustls/pki-types/compare/v/1.11.0...v/1.12.0)

---
updated-dependencies:
- dependency-name: rustls-webpki
  dependency-version: 0.103.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: rustls
- dependency-name: rustls
  dependency-version: 0.23.27
  dependency-type: indirect
  update-type: version-update:semver-patch
  dependency-group: rustls
- dependency-name: rustls-pki-types
  dependency-version: 1.12.0
  dependency-type: indirect
  update-type: version-update:semver-minor
  dependency-group: rustls
...

Signed-off-by: dependabot[bot] <support@github.com>

* fix(rustls): Remove dependency on most rustls internal types

We only used these types for generating a ClientHello message for testing. Instead, we can manually encode a sample message based on the TLS spec.

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Scott Fleener <scott@buoyant.io>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Scott Fleener <scott@buoyant.io>
2025-07-17 16:20:37 +00:00
dependabot[bot] 7c6882bb35
build(deps): bump rcgen from 0.13.2 to 0.14.2 (#4000)
* build(deps): bump rcgen from 0.13.2 to 0.14.2

Bumps [rcgen](https://github.com/rustls/rcgen) from 0.13.2 to 0.14.2.
- [Release notes](https://github.com/rustls/rcgen/releases)
- [Commits](https://github.com/rustls/rcgen/compare/v0.13.2...v0.14.2)

---
updated-dependencies:
- dependency-name: rcgen
  dependency-version: 0.14.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* fix(test): Fix breaking changes from rcgen 0.14

Signed-off-by: Scott Fleener <scott@buoyant.io>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Scott Fleener <scott@buoyant.io>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Scott Fleener <scott@buoyant.io>
2025-07-16 19:34:47 -04:00
katelyn martin a6e47d7e03
fix(app/env): a lower default maximum per-host connection limit (#4005)
* chore(app/env): fix typo

Signed-off-by: katelyn martin <kate@buoyant.io>

* fix(app/env): a lower default maximum per-host connection limit

see also:
* #4004
* linkerd/linkerd2#14204

in #4004 we fixed an issue related to our HTTP/1.1 client's connection
pool.

this further hedges against future issues related to our HTTP client
exhausting resources available to its container. today, the limit by
default is `usize::MAX`, which is dramatically higher than the practical
limit.

this commit changes the limit for outbound idle connections per-host to
10,000.

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-15 13:01:54 -04:00
Oliver Gould 2cc8c7d80e
fix(proxy/http): fix HTTP/1 client idle timeouts (#4004)
When constructing the HTTP/1 client, we configure connection pooling, but
notably do not provide a timer implementation to Hyper. This causes hyper's
connection pool to be configured without idle timeouts, which may lead to
resource leaks, especially for clients that communicate with many virtual hosts.

This change updates the HTTP/1 client builder to use a Tokio timer, which allows
Hyper to manage idle timeouts correctly.
2025-07-14 14:56:35 -07:00
dependabot[bot] 21f3ffc6c1
build(deps): bump crc32fast from 1.4.2 to 1.5.0 (#4002)
Bumps [crc32fast](https://github.com/srijs/rust-crc32fast) from 1.4.2 to 1.5.0.
- [Commits](https://github.com/srijs/rust-crc32fast/compare/v1.4.2...v1.5.0)

---
updated-dependencies:
- dependency-name: crc32fast
  dependency-version: 1.5.0
  dependency-type: indirect
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-14 11:49:32 -04:00
dependabot[bot] 1b85cf93a4
build(deps): bump memmap2 from 0.9.5 to 0.9.7 (#4001)
Bumps [memmap2](https://github.com/RazrFalcon/memmap2-rs) from 0.9.5 to 0.9.7.
- [Changelog](https://github.com/RazrFalcon/memmap2-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/RazrFalcon/memmap2-rs/commits)

---
updated-dependencies:
- dependency-name: memmap2
  dependency-version: 0.9.7
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-14 07:02:56 -07:00
dependabot[bot] ce5df7d026
build(deps): bump cc from 1.2.27 to 1.2.29 (#3999)
Bumps [cc](https://github.com/rust-lang/cc-rs) from 1.2.27 to 1.2.29.
- [Release notes](https://github.com/rust-lang/cc-rs/releases)
- [Changelog](https://github.com/rust-lang/cc-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/cc-rs/compare/cc-v1.2.27...cc-v1.2.29)

---
updated-dependencies:
- dependency-name: cc
  dependency-version: 1.2.29
  dependency-type: indirect
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-11 15:17:08 -04:00
Scott Fleener 79e612c2f9
feat(meshtls): Include AES_256_GCM as a supported ciphersuite (#3991)
This is a strong ciphersuite that's reasonable to include as a supported option. We still prefer CHACHA20_POLY1305 in non-FIPS modes for its speed, as well as keeping CHACHA20_POLY1305 as a backup for older proxies that only support it.

Signed-off-by: Scott Fleener <scott@buoyant.io>
Co-authored-by: Oliver Gould <ver@buoyant.io>
2025-07-10 15:23:47 -04:00
Oliver Gould 62ed64ea05
chore(build): bump linkerd/dev to v46 (#3984)
* Rust 1.88
2025-07-10 14:15:24 -04:00
dependabot[bot] e8de6359a5
build(deps): bump hyper-util from 0.1.14 to 0.1.15 (#3997)
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.14 to 0.1.15.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.14...v0.1.15)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.15
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-10 12:50:50 -04:00
dependabot[bot] 53a17808a7
build(deps): bump libfuzzer-sys from 0.4.9 to 0.4.10 (#3994)
Bumps [libfuzzer-sys](https://github.com/rust-fuzz/libfuzzer) from 0.4.9 to 0.4.10.
- [Changelog](https://github.com/rust-fuzz/libfuzzer/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-fuzz/libfuzzer/compare/0.4.9...0.4.10)

---
updated-dependencies:
- dependency-name: libfuzzer-sys
  dependency-version: 0.4.10
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-08 09:17:27 -04:00
katelyn martin 34b46ab6cd
refactor: `FmtLabels` impls use exhaustive bindings (#3988)
this is based on #3987.

in #3987 (_see https://github.com/linkerd/linkerd2/issues/13821_) we discovered that some of the types that implement [`FmtLabels`](085be9978d/linkerd/metrics/src/fmt.rs (L5)) could collide when used in registry keys; i.e., they might emit identical label sets, but distinct `Hash` values.

#3987 solves two bugs. this pull request proposes a follow-on change, introducing _exhaustive_ bindings to implementations of `FmtLabels`, to prevent this category of bug from reoccurring again in the future.

this change means that the introduction of an additional field to any of these label structures, e.g. `OutboundEndpointLabels` or `HTTPLocalRateLimitLabels`, will cause a compilation error unless said new field is handled in the corresponding `FmtLabels` implementation.

### 🔖 a note

in writing this pull request, i noticed one label that i believe is unintentionally being elided. i've refrained from changing behavior in this pull request. i do note it though, as an example of this syntax identifying the category of bug i hope to hedge against here.

---

* fix: do not key transport metrics registry on `ClientTls`

Signed-off-by: katelyn martin <kate@buoyant.io>

* fix: do not key transport metrics registry on `ServerTls`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(transport-metrics): exhaustive `Eos: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `ServerLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `TlsAccept: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `TargetAddr: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(metrics): exhaustive `Label: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(http/metrics): exhaustive `Status: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `ControlLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `ProfileRouteLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `InboundEndpointLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `ServerLabel: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `ServerAuthzLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `RouteLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `RouteAuthzLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `OutboundEndpointLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `Authority: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/core): exhaustive `StackLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/inbound): exhaustive `HTTPLocalRateLimitLabels: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* refactor(app/inbound): exhaustive `Key<L>: FmtLabels`

Signed-off-by: katelyn martin <kate@buoyant.io>

* nit(metrics): remove redundant banner comment

these impl blocks are all `FmtLabels`, following another series of the
same, above. we don't need another one of these comments.

Signed-off-by: katelyn martin <kate@buoyant.io>

* nit(metrics): exhaustive `AndThen: FmtMetrics`

Signed-off-by: katelyn martin <kate@buoyant.io>

* nit(app/core): note unused label

see #3262 (618838ec7), which introduced this label.

to preserve behavior, this label remains unused.

X-Ref: #3262
Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-03 11:56:14 -04:00
Oliver Gould 288fc74800
chore(app): increase default max backoff durations (#3992)
The inbound and outbound connect backoffs are now set at 500ms. This is very aggressive in practice, especially when an endpoint remains unavailable.

This change increases the maximum backoff durations:

* inbound: 10s
* outbound: 60s

The default minimum backoff durations remain unchanged at 100ms so that failed
connections are retried quickly. This change only increases the default _maximum_ backoff so that the timeout increases substantially when an endpoint is unavailable for a longer period of time.
2025-07-02 18:52:53 -07:00
katelyn martin 030fa28d55
fix: remove ambiguous metrics registry keys (#3987)
### 🖼️ background

the linkerd2 proxy implements, registers, and exports Prometheus metrics using a variety of systems, for historical reasons. new metrics broadly rely upon the official [`prometheus-client`](https://github.com/prometheus/client_rust/) library, whose interfaces are reexported for internal consumption in the [`linkerd_metrics::prom`](https://github.com/linkerd/linkerd2-proxy/blob/main/linkerd/metrics/src/lib.rs#L30-L60) namespace.

other metrics predate this library however, and rely on the metrics registry implemented in the workspace's [`linkerd-metrics`](https://github.com/linkerd/linkerd2-proxy/tree/main/linkerd/metrics) library.

### 🐛 bug report

* https://github.com/linkerd/linkerd2/issues/13821

linkerd/linkerd2#13821 reported a bug in which duplicate metrics could be observed and subsequently dropped by Prometheus when upgrading the control plane via helm with an existing workload running.

### 🦋 reproduction example

for posterity, i'll note the reproduction steps here.

i used these steps to identify the `2025.3.2` edge release as the affected release. upgrading from `2025.2.3` to `2025.3.1` did not exhibit this behavior. see below for more discussion about the cause.

generate certificates via <https://linkerd.io/2.18/tasks/generate-certificates/>

using these two deployments, courtesy of @GTRekter:

<details>
<summary>**💾 click to expand: app deployment**</summary>

```yaml
apiVersion: v1 
kind: Namespace 
metadata: 
  name: simple-app 
  annotations: 
    linkerd.io/inject: enabled 
---
apiVersion: v1 
kind: Service 
metadata: 
  name: simple-app-v1 
  namespace: simple-app 
spec: 
  selector: 
    app: simple-app-v1 
    version: v1 
  ports: 
    - port: 80 
      targetPort: 5678
---
apiVersion: apps/v1 
kind: Deployment 
metadata: 
  name: simple-app-v1 
  namespace: simple-app 
spec: 
  replicas: 1 
  selector: 
    matchLabels: 
      app: simple-app-v1 
      version: v1 
  template: 
    metadata: 
      labels: 
        app: simple-app-v1 
        version: v1 
    spec: 
      containers: 
        - name: http-app 
          image: hashicorp/http-echo:latest 
          args: 
            - "-text=Simple App v1" 
          ports: 
            - containerPort: 5678 
```
</details>

<details>
<summary>**🤠 click to expand: client deployment**</summary>

```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: traffic
  namespace: simple-app
spec:
  replicas: 1
  selector:
    matchLabels:
      app: traffic
  template:
    metadata:
      labels:
        app: traffic
    spec:
      containers:
      - name: traffic
        image: curlimages/curl:latest
        command:
          - /bin/sh
          - -c
          - |
            while true; do
              TIMESTAMP_SEND=$(date '+%Y-%m-%d %H:%M:%S')
              PAYLOAD="{\"timestamp\":\"$TIMESTAMP_SEND\",\"test_id\":\"sniff_me\",\"message\":\"hello-world\"}"
              echo "$TIMESTAMP_SEND - Sending payload: $PAYLOAD"
              RESPONSE=$(curl -s -X POST \
                -H "Content-Type: application/json" \
                -d "$PAYLOAD" \
                http://simple-app-v1.simple-app.svc.cluster.local:80)
              TIMESTAMP_RESPONSE=$(date '+%Y-%m-%d %H:%M:%S')
              echo "$TIMESTAMP_RESPONSE - RESPONSE: $RESPONSE"
              sleep 1
            done
```
</details>

and this prometheus configuration:

<details>
<summary>**🔥 click to expand: prometheus configuration**</summary>

```yaml
global:
  scrape_interval: 10s

scrape_configs:
  - job_name: 'pod'
    scrape_interval: 10s
    static_configs:
    - targets: ['localhost:4191']
      labels:
        group: 'traffic'
```
</details>

we will perform the following steps:

```sh
# install the edge release

# specify the versions we'll migrate between.
export FROM="2025.3.1"
export TO="2025.3.2"

# create a cluster, and add the helm charts.
kind create cluster
helm repo add linkerd-edge https://helm.linkerd.io/edge

# install linkerd's crd's and control plane.
helm install linkerd-crds linkerd-edge/linkerd-crds \
  -n linkerd --create-namespace --version $FROM

helm install linkerd-control-plane \
  -n linkerd \
  --set-file identityTrustAnchorsPEM=cert/ca.crt \
  --set-file identity.issuer.tls.crtPEM=cert/issuer.crt \
  --set-file identity.issuer.tls.keyPEM=cert/issuer.key \
  --version $FROM \
  linkerd-edge/linkerd-control-plane

# install a simple app and a client to drive traffic.
kubectl apply -f duplicate-metrics-simple-app.yml
kubectl apply -f duplicate-metrics-traffic.yml

# bind the traffic pod's metrics port to the host.
kubectl port-forward -n simple-app deploy/traffic 4191

# start prometheus, begin scraping metrics
prometheus --config.file=prometheus.yml
```

now, open a browser and query `irate(request_total[1m])`.

next, upgrade the control plane:

```
helm upgrade linkerd-crds linkerd-edge/linkerd-crds \
  -n linkerd --create-namespace --version $TO
helm upgrade linkerd-control-plane \
  -n linkerd \
  --set-file identityTrustAnchorsPEM=cert/ca.crt \
  --set-file identity.issuer.tls.crtPEM=cert/issuer.crt \
  --set-file identity.issuer.tls.keyPEM=cert/issuer.key \
  --version $TO \
  linkerd-edge/linkerd-control-plane
```

prometheus will begin emitting warnings regarding 34 time series being dropped.

in your browser, querying `irate(request_total[1m])` once more will show that
the rate of requests has stopped, due to the new time series being dropped.

next, restart the workloads...

```
kubectl rollout restart deployment -n simple-app simple-app-v1 traffic
```

prometheus warnings will go away, as reported in linkerd/linkerd2#13821.

### 🔍 related changes

* https://github.com/linkerd/linkerd2/pull/13699
* https://github.com/linkerd/linkerd2/pull/13715

in linkerd/linkerd2#13715 and linkerd/linkerd2##13699, we made some changes to the destination controller. from the "Cautions" section of the `2025.3.2` edge release:

> Additionally, this release changes the default for `outbound-transport-mode`
> to `transport-header`, which will result in all traffic between meshed
> proxies flowing on port 4143, rather than using the original destination
> port.

linkerd/linkerd2#13699 (_included in `edge-25.3.1`_) introduced this outbound transport-protocol configuration surface, but maintained the default behavior, while linkerd/linkerd2#13715 (_included in `edge-25.3.2`_) altered the default behavior to route meshed traffic via port 4143.

this is a visible change in behavior that can be observed when upgrading from a version that preceded this change to the mesh. this means that when upgrading across `edge-25.3.2`, such as from the `2025.2.1` to `2025.3.2` versions of the helm charts, or from the `2025.2.3` to the `2025.3.4` versions of the helm charts (_reported upstream in linkerd/linkerd2#13821_), the freshly upgraded destination controller pods will begin routing meshed traffic differently.

i'll state explicitly, _that_ is not a bug! it is, however, an important clue to bear in mind: data plane pods that were started with the previous control plane version, and continue running after the control plane upgrade, will have seen both routing patterns. reporting a duplicate time series for affected metrics indicates that there is a hashing collision in our metrics system.

### 🐛 the bug(s)

we define a collection to structures to model labels for inbound and outbound endpoints'
metrics:

```rust
// linkerd/app/core/src/metrics.rs

#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum EndpointLabels {
    Inbound(InboundEndpointLabels),
    Outbound(OutboundEndpointLabels),
}

#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct InboundEndpointLabels {
    pub tls: tls::ConditionalServerTls,
    pub authority: Option<http::uri::Authority>,
    pub target_addr: SocketAddr,
    pub policy: RouteAuthzLabels,
}

#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct OutboundEndpointLabels {
    pub server_id: tls::ConditionalClientTls,
    pub authority: Option<http::uri::Authority>,
    pub labels: Option<String>,
    pub zone_locality: OutboundZoneLocality,
    pub target_addr: SocketAddr,
}
```

\- <https://github.com/linkerd/linkerd2-proxy/blob/main/linkerd/app/core/src/metrics.rs>

bear particular attention to the derived `Hash` implementation. note the `tls::ConditionalClientTls` and `tls::ConditionalServerTls` types used in each of these labels. these are used by some of our types like `TlsConnect` to emit prometheus labels, using our legacy system's `FmtLabels` trait:

```rust
// linkerd/app/core/src/transport/labels.rs

impl FmtLabels for TlsConnect<'_> {
    fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        match self.0 {
            Conditional::None(tls::NoClientTls::Disabled) => {
                write!(f, "tls=\"disabled\"")
            }
            Conditional::None(why) => {
                write!(f, "tls=\"no_identity\",no_tls_reason=\"{}\"", why)
            }
            Conditional::Some(tls::ClientTls { server_id, .. }) => {
                write!(f, "tls=\"true\",server_id=\"{}\"", server_id)
            }
        }
    }
}
```

\- <99316f7898/linkerd/app/core/src/transport/labels.rs (L151-L165)>

note the `ClientTls` case, which ignores fields in the client tls information:

```rust
// linkerd/tls/src/client.rs

/// A stack parameter that configures a `Client` to establish a TLS connection.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct ClientTls {
    pub server_name: ServerName,
    pub server_id: ServerId,
    pub alpn: Option<AlpnProtocols>,
}
```

\- <99316f7898/linkerd/tls/src/client.rs (L20-L26)>

this means that there is potential for an identical set of labels to be emitted given two `ClientTls` structures with distinct server names or ALPN protocols. for brevity, i'll elide the equivalent issue with `ServerTls`, and its corresponding `TlsAccept<'_>` label implementation, though it exhibits the same issue.

### 🔨 the fix

this pull request introduces two new types: `ClientTlsLabels` and `ServerTlsLabels`. these continue to implement `Hash`, for use as a key in our metrics registry, and for use in formatting labels.

`ClientTlsLabels` and `ServerTlsLabels` each resemble `ClientTls` and `ServerTls`, respectively, but do not contain any fields that are elided in label formatting, to prevent duplicate metrics from being emitted.

relatedly, #3988 audits our existing `FmtLabels` implementations and makes use of exhaustive bindings, to prevent this category of problem in the short-term future. ideally, we might eventually consider replacing the metrics interfaces in `linkerd-metrics`, but that is strictly kept out-of-scope for the purposes of this particular fix.

---

* fix: do not key transport metrics registry on `ClientTls`

Signed-off-by: katelyn martin <kate@buoyant.io>

* fix: do not key transport metrics registry on `ServerTls`

Signed-off-by: katelyn martin <kate@buoyant.io>

---------

Signed-off-by: katelyn martin <kate@buoyant.io>
2025-07-02 12:38:04 -04:00
dependabot[bot] 085be9978d
build(deps): bump indexmap from 2.9.0 to 2.10.0 (#3986)
Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.9.0 to 2.10.0.
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.9.0...2.10.0)

---
updated-dependencies:
- dependency-name: indexmap
  dependency-version: 2.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-27 11:19:12 -04:00
dependabot[bot] 682421c98a
build(deps): bump Swatinem/rust-cache from 2.7.8 to 2.8.0 (#3983)
Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.8 to 2.8.0.
- [Release notes](https://github.com/swatinem/rust-cache/releases)
- [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md)
- [Commits](9d47c6ad4b...98c8021b55)

---
updated-dependencies:
- dependency-name: Swatinem/rust-cache
  dependency-version: 2.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-26 12:25:06 -04:00
210 changed files with 1812 additions and 3587 deletions

View File

@ -3,7 +3,7 @@
"build": {
"dockerfile": "Dockerfile",
"args": {
"DEV_VERSION": "v45",
"DEV_VERSION": "v47",
"http_proxy": "${localEnv:http_proxy}",
"https_proxy": "${localEnv:https_proxy}"
}

View File

@ -22,13 +22,13 @@ permissions:
jobs:
build:
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v45-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
timeout-minutes: 20
continue-on-error: true
steps:
- run: rustup toolchain install --profile=minimal beta
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: just toolchain=beta fetch
- run: just toolchain=beta build

View File

@ -21,9 +21,9 @@ env:
jobs:
meta:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- id: changed
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
@ -40,16 +40,16 @@ jobs:
codecov:
needs: meta
if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || needs.meta.outputs.any_changed == 'true'
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 30
container:
image: docker://ghcr.io/linkerd/dev:v45-rust
image: docker://ghcr.io/linkerd/dev:v47-rust
options: --security-opt seccomp=unconfined # 🤷
env:
CXX: "/usr/bin/clang++-19"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml
# Some tests are especially flakey in coverage tests. That's fine. We

View File

@ -26,11 +26,11 @@ permissions:
jobs:
list-changed:
timeout-minutes: 3
runs-on: ubuntu-24.04
container: docker://rust:1.83.0
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: docker://rust:1.88.0
steps:
- run: apt update && apt install -y jo
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
id: changed-files
@ -47,15 +47,15 @@ jobs:
build:
needs: [list-changed]
timeout-minutes: 40
runs-on: ubuntu-24.04
container: docker://rust:1.83.0
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: docker://rust:1.88.0
strategy:
matrix:
dir: ${{ fromJson(needs.list-changed.outputs.dirs) }}
steps:
- run: rustup toolchain add nightly
- run: cargo install cargo-fuzz
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- working-directory: ${{matrix.dir}}
run: cargo +nightly fetch

View File

@ -12,9 +12,9 @@ on:
jobs:
markdownlint:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e
with:
globs: "**/*.md"

View File

@ -22,13 +22,13 @@ permissions:
jobs:
build:
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v45-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
timeout-minutes: 20
continue-on-error: true
steps:
- run: rustup toolchain install --profile=minimal nightly
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: just toolchain=nightly fetch
- run: just toolchain=nightly profile=release build

View File

@ -14,9 +14,9 @@ concurrency:
jobs:
meta:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- id: build
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
@ -57,7 +57,7 @@ jobs:
info:
timeout-minutes: 3
needs: meta
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- name: Info
run: |
@ -74,25 +74,25 @@ jobs:
actions:
needs: meta
if: needs.meta.outputs.actions_changed == 'true'
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: linkerd/dev/actions/setup-tools@v45
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: linkerd/dev/actions/setup-tools@v47
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: just action-lint
- run: just action-dev-check
rust:
needs: meta
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v45-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
permissions:
contents: read
timeout-minutes: 20
steps:
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: just fetch
- run: cargo deny --all-features check bans licenses sources
- run: just check-fmt
@ -107,15 +107,15 @@ jobs:
needs: meta
if: needs.meta.outputs.cargo_changed == 'true'
timeout-minutes: 20
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v45-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
strategy:
matrix:
crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }}
steps:
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: just fetch
- run: just check-crate ${{ matrix.crate }}
@ -123,11 +123,11 @@ jobs:
needs: meta
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
timeout-minutes: 20
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
env:
WAIT_TIMEOUT: 2m
steps:
- uses: linkerd/dev/actions/setup-tools@v45
- uses: linkerd/dev/actions/setup-tools@v47
- name: scurl https://run.linkerd.io/install-edge | sh
run: |
scurl https://run.linkerd.io/install-edge | sh
@ -136,7 +136,7 @@ jobs:
tag=$(linkerd version --client --short)
echo "linkerd $tag"
echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: just docker
- run: just k3d-create
- run: just k3d-load-linkerd
@ -149,7 +149,7 @@ jobs:
timeout-minutes: 3
needs: [meta, actions, rust, rust-crates, linkerd-install]
if: always()
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
permissions:
contents: write
@ -168,7 +168,7 @@ jobs:
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
run: exit 1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
- name: "Merge dependabot changes"
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'

View File

@ -13,7 +13,7 @@ concurrency:
jobs:
last-release:
if: github.repository == 'linkerd/linkerd2-proxy' # Don't run this in forks.
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
env:
GH_REPO: ${{ github.repository }}
@ -41,10 +41,10 @@ jobs:
last-commit:
needs: last-release
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- name: Check if the most recent commit is after the last release
id: recency
env:
@ -62,7 +62,7 @@ jobs:
trigger-release:
needs: [last-release, last-commit]
if: needs.last-release.outputs.recent == 'false' && needs.last-commit.outputs.after-release == 'true'
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
permissions:
actions: write

View File

@ -46,6 +46,7 @@ on:
default: true
env:
CARGO: "cargo auditable"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable"
@ -58,9 +59,25 @@ concurrency:
jobs:
meta:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- id: meta
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
if: github.event_name == 'pull_request'
- id: workflow
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
.github/workflows/release.yml
- id: build
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
justfile
Cargo.toml
- id: version
env:
VERSION: ${{ inputs.version }}
shell: bash
@ -68,47 +85,45 @@ jobs:
set -euo pipefail
shopt -s extglob
if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then
echo version="0.0.0-test.${GITHUB_SHA:0:7}"
echo archs='["amd64"]'
echo oses='["linux"]'
echo version="0.0.0-test.${GITHUB_SHA:0:7}" >> "$GITHUB_OUTPUT"
exit 0
fi >> "$GITHUB_OUTPUT"
fi
if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then
echo "Invalid version: $VERSION" >&2
exit 1
fi
( echo version="${VERSION#v}"
echo archs='["amd64", "arm64", "arm"]'
echo version="${VERSION#v}" >> "$GITHUB_OUTPUT"
- id: platform
shell: bash
env:
WORKFLOW_CHANGED: ${{ steps.workflow.outputs.any_changed }}
run: |
if [[ "$GITHUB_EVENT_NAME" == pull_request && "$WORKFLOW_CHANGED" != 'true' ]]; then
( echo archs='["amd64"]'
echo oses='["linux"]' ) >> "$GITHUB_OUTPUT"
exit 0
fi
( echo archs='["amd64", "arm64"]'
echo oses='["linux", "windows"]'
) >> "$GITHUB_OUTPUT"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
if: github.event_name == 'pull_request'
- id: changed
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
.github/workflows/release.yml
justfile
Cargo.toml
outputs:
archs: ${{ steps.meta.outputs.archs }}
oses: ${{ steps.meta.outputs.oses }}
version: ${{ steps.meta.outputs.version }}
package: ${{ github.event_name == 'workflow_dispatch' || steps.changed.outputs.any_changed == 'true' }}
archs: ${{ steps.platform.outputs.archs }}
oses: ${{ steps.platform.outputs.oses }}
version: ${{ steps.version.outputs.version }}
package: ${{ github.event_name == 'workflow_dispatch' || steps.build.outputs.any_changed == 'true' || steps.workflow.outputs.any_changed == 'true' }}
profile: ${{ inputs.profile || 'release' }}
publish: ${{ inputs.publish }}
ref: ${{ inputs.ref || github.sha }}
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.meta.outputs.version }}"
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.version.outputs.version }}"
prerelease: ${{ inputs.prerelease }}
draft: ${{ inputs.draft }}
latest: ${{ inputs.latest }}
info:
needs: meta
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 3
steps:
- name: Inputs
@ -134,15 +149,13 @@ jobs:
exclude:
- os: windows
arch: arm64
- os: windows
arch: arm
# If we're not actually building on a release tag, don't short-circuit on
# errors. This helps us know whether a failure is platform-specific.
continue-on-error: ${{ needs.meta.outputs.publish != 'true' }}
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 40
container: docker://ghcr.io/linkerd/dev:v45-rust-musl
container: docker://ghcr.io/linkerd/dev:v47-rust-musl
env:
LINKERD2_PROXY_VENDOR: ${{ github.repository_owner }}
LINKERD2_PROXY_VERSION: ${{ needs.meta.outputs.version }}
@ -150,15 +163,19 @@ jobs:
# TODO: add to dev image
- name: Install MiniGW
if: matrix.os == 'windows'
run: apt-get update && apt-get install mingw-w64 -y
run: apt-get update && apt-get install -y mingw-w64
- name: Install cross compilation toolchain
if: matrix.arch == 'arm64'
run: apt-get update && apt-get install -y binutils-aarch64-linux-gnu
- name: Configure git
run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
ref: ${{ needs.meta.outputs.ref }}
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
with:
key: ${{ matrix.arch }}
key: ${{ matrix.os }}-${{ matrix.arch }}
- run: just fetch
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build
@ -170,7 +187,7 @@ jobs:
publish:
needs: [meta, package]
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
timeout-minutes: 5
permissions:
actions: write
@ -187,7 +204,7 @@ jobs:
git config --global user.name "$GITHUB_USERNAME"
git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com
# Tag the release.
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }}
ref: ${{ needs.meta.outputs.ref }}
@ -225,7 +242,7 @@ jobs:
needs: publish
if: always()
timeout-minutes: 3
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- name: Results
run: |

View File

@ -13,8 +13,8 @@ on:
jobs:
sh-lint:
timeout-minutes: 5
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: linkerd/dev/actions/setup-tools@v45
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: linkerd/dev/actions/setup-tools@v47
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: just sh-lint

View File

@ -13,10 +13,10 @@ permissions:
jobs:
devcontainer:
runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v45-rust
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
container: ghcr.io/linkerd/dev:v47-rust
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: |
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
@ -35,10 +35,10 @@ jobs:
workflows:
runs-on: ubuntu-24.04
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
steps:
- uses: linkerd/dev/actions/setup-tools@v45
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: linkerd/dev/actions/setup-tools@v47
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- shell: bash
run: |
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'

File diff suppressed because it is too large Load Diff

View File

@ -42,8 +42,6 @@ members = [
"linkerd/idle-cache",
"linkerd/io",
"linkerd/meshtls",
"linkerd/meshtls/boring",
"linkerd/meshtls/rustls",
"linkerd/meshtls/verifier",
"linkerd/metrics",
"linkerd/mock/http-body",
@ -71,6 +69,7 @@ members = [
"linkerd/reconnect",
"linkerd/retry",
"linkerd/router",
"linkerd/rustls",
"linkerd/service-profiles",
"linkerd/signal",
"linkerd/stack",
@ -115,11 +114,10 @@ prost = { version = "0.13" }
prost-build = { version = "0.13", default-features = false }
prost-types = { version = "0.13" }
tokio-rustls = { version = "0.26", default-features = false, features = [
"ring",
"logging",
] }
tonic = { version = "0.12", default-features = false }
tonic-build = { version = "0.12", default-features = false }
tonic = { version = "0.13", default-features = false }
tonic-build = { version = "0.13", default-features = false }
tower = { version = "0.5", default-features = false }
tower-service = { version = "0.3" }
tower-test = { version = "0.4" }
@ -136,4 +134,4 @@ default-features = false
features = ["tokio", "tracing"]
[workspace.dependencies.linkerd2-proxy-api]
version = "0.16.0"
version = "0.17.0"

View File

@ -3,7 +3,7 @@
# This is intended **DEVELOPMENT ONLY**, i.e. so that proxy developers can
# easily test the proxy in the context of the larger `linkerd2` project.
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v45-rust
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v47-rust
# Use an arbitrary ~recent edge release image to get the proxy
# identity-initializing and linkerd-await wrappers.
@ -14,11 +14,16 @@ FROM $LINKERD2_IMAGE as linkerd2
FROM --platform=$BUILDPLATFORM $RUST_IMAGE as fetch
ARG PROXY_FEATURES=""
ARG TARGETARCH="amd64"
RUN apt-get update && \
apt-get install -y time && \
if [[ "$PROXY_FEATURES" =~ .*meshtls-boring.* ]] ; then \
apt-get install -y golang ; \
fi && \
case "$TARGETARCH" in \
amd64) true ;; \
arm64) apt-get install --no-install-recommends -y binutils-aarch64-linux-gnu ;; \
esac && \
rm -rf /var/lib/apt/lists/*
ENV CARGO_NET_RETRY=10
@ -33,7 +38,6 @@ RUN --mount=type=cache,id=cargo,target=/usr/local/cargo/registry \
FROM fetch as build
ENV CARGO_INCREMENTAL=0
ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable"
ARG TARGETARCH="amd64"
ARG PROFILE="release"
ARG LINKERD2_PROXY_VERSION=""
ARG LINKERD2_PROXY_VENDOR=""

View File

@ -2,7 +2,6 @@
targets = [
{ triple = "x86_64-unknown-linux-gnu" },
{ triple = "aarch64-unknown-linux-gnu" },
{ triple = "armv7-unknown-linux-gnu" },
]
[advisories]
@ -24,11 +23,6 @@ allow = [
private = { ignore = true }
confidence-threshold = 0.8
exceptions = [
{ allow = [
"ISC",
"MIT",
"OpenSSL",
], name = "ring", version = "*" },
{ allow = [
"ISC",
"OpenSSL",
@ -39,14 +33,6 @@ exceptions = [
], name = "aws-lc-fips-sys", version = "*" },
]
[[licenses.clarify]]
name = "ring"
version = "*"
expression = "MIT AND ISC AND OpenSSL"
license-files = [
{ path = "LICENSE", hash = 0xbd0eed23 },
]
[bans]
multiple-versions = "deny"
# Wildcard dependencies are used for all workspace-local crates.
@ -56,6 +42,8 @@ deny = [
{ name = "rustls", wrappers = ["tokio-rustls"] },
# rustls-webpki should be used instead.
{ name = "webpki" },
# aws-lc-rs should be used instead.
{ name = "ring" }
]
skip = [
# `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64`
@ -76,6 +64,8 @@ skip-tree = [
{ name = "pprof" },
# aws-lc-rs uses a slightly outdated version of bindgen
{ name = "bindgen", version = "0.69.5" },
# socket v0.6 is still propagating through the ecosystem
{ name = "socket2", version = "0.5" },
]
[sources]

View File

@ -18,6 +18,10 @@ features := ""
export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`)
export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`)
# TODO: these variables will be included in dev v48
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
# The version name to use for packages.
package_version := "v" + LINKERD2_PROXY_VERSION
@ -26,7 +30,7 @@ docker-repo := "localhost/linkerd/proxy"
docker-tag := `git rev-parse --abbrev-ref HEAD | sed 's|/|.|g'` + "." + `git rev-parse --short HEAD`
docker-image := docker-repo + ":" + docker-tag
# The architecture name to use for packages. Either 'amd64', 'arm64', or 'arm'.
# The architecture name to use for packages. Either 'amd64' or 'arm64'.
arch := "amd64"
# The OS name to use for packages. Either 'linux' or 'windows'.
os := "linux"
@ -39,8 +43,6 @@ _target := if os + '-' + arch == "linux-amd64" {
"x86_64-unknown-linux-" + libc
} else if os + '-' + arch == "linux-arm64" {
"aarch64-unknown-linux-" + libc
} else if os + '-' + arch == "linux-arm" {
"armv7-unknown-linux-" + libc + "eabihf"
} else if os + '-' + arch == "windows-amd64" {
"x86_64-pc-windows-" + libc
} else {
@ -139,7 +141,7 @@ _strip:
_package_bin := _package_dir / "bin" / "linkerd2-proxy"
# XXX {aarch64,arm}-musl builds do not enable PIE, so we use target-specific
# XXX aarch64-musl builds do not enable PIE, so we use target-specific
# files to document those differences.
_expected_checksec := '.checksec' / arch + '-' + libc + '.json'

View File

@ -100,15 +100,11 @@ impl Addr {
// them ourselves.
format!("[{}]", a.ip())
};
http::uri::Authority::from_str(&ip).unwrap_or_else(|err| {
panic!("SocketAddr ({}) must be valid authority: {}", a, err)
})
}
Addr::Socket(a) => {
http::uri::Authority::from_str(&a.to_string()).unwrap_or_else(|err| {
panic!("SocketAddr ({}) must be valid authority: {}", a, err)
})
http::uri::Authority::from_str(&ip)
.unwrap_or_else(|err| panic!("SocketAddr ({a}) must be valid authority: {err}"))
}
Addr::Socket(a) => http::uri::Authority::from_str(&a.to_string())
.unwrap_or_else(|err| panic!("SocketAddr ({a}) must be valid authority: {err}")),
}
}
@ -265,14 +261,14 @@ mod tests {
];
for (host, expected_result) in cases {
let a = Addr::from_str(host).unwrap();
assert_eq!(a.is_loopback(), *expected_result, "{:?}", host)
assert_eq!(a.is_loopback(), *expected_result, "{host:?}")
}
}
fn test_to_http_authority(cases: &[&str]) {
let width = cases.iter().map(|s| s.len()).max().unwrap_or(0);
for host in cases {
print!("trying {:1$} ... ", host, width);
print!("trying {host:width$} ... ");
Addr::from_str(host).unwrap().to_http_authority();
println!("ok");
}

View File

@ -13,7 +13,7 @@
use futures::future::{self, TryFutureExt};
use http::StatusCode;
use linkerd_app_core::{
metrics::{self as metrics, FmtMetrics},
metrics::{self as metrics, legacy::FmtMetrics},
proxy::http::{Body, BoxBody, ClientHandle, Request, Response},
trace, Error, Result,
};
@ -32,7 +32,7 @@ pub use self::readiness::{Latch, Readiness};
#[derive(Clone)]
pub struct Admin<M> {
metrics: metrics::Serve<M>,
metrics: metrics::legacy::Serve<M>,
tracing: trace::Handle,
ready: Readiness,
shutdown_tx: mpsc::UnboundedSender<()>,
@ -52,7 +52,7 @@ impl<M> Admin<M> {
tracing: trace::Handle,
) -> Self {
Self {
metrics: metrics::Serve::new(metrics),
metrics: metrics::legacy::Serve::new(metrics),
ready,
shutdown_tx,
enable_shutdown,

View File

@ -27,7 +27,7 @@ where
.into_body()
.collect()
.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?
.map_err(io::Error::other)?
.aggregate();
match level.set_from(body.chunk()) {
Ok(_) => mk_rsp(StatusCode::NO_CONTENT, BoxBody::empty()),

View File

@ -2,7 +2,7 @@ use linkerd_app_core::{
classify,
config::ServerConfig,
drain, errors, identity,
metrics::{self, FmtMetrics},
metrics::{self, legacy::FmtMetrics},
proxy::http,
serve,
svc::{self, ExtractParam, InsertParam, Param},
@ -214,7 +214,7 @@ impl Config {
impl Param<transport::labels::Key> for Tcp {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
self.tls.clone(),
self.tls.as_ref().map(|t| t.labels()),
self.addr.into(),
self.policy.server_label(),
)
@ -272,7 +272,7 @@ impl Param<metrics::ServerLabel> for Http {
impl Param<metrics::EndpointLabels> for Permitted {
fn param(&self) -> metrics::EndpointLabels {
metrics::InboundEndpointLabels {
tls: self.http.tcp.tls.clone(),
tls: self.http.tcp.tls.as_ref().map(|t| t.labels()),
authority: None,
target_addr: self.http.tcp.addr.into(),
policy: self.permit.labels.clone(),

View File

@ -13,31 +13,23 @@ independently of the inbound and outbound proxy logic.
"""
[dependencies]
bytes = { workspace = true }
drain = { workspace = true, features = ["retain"] }
http = { workspace = true }
http-body = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["http1", "http2"] }
hyper-util = { workspace = true }
futures = { version = "0.3", default-features = false }
ipnet = "2.11"
prometheus-client = { workspace = true }
regex = "1"
serde_json = "1"
thiserror = "2"
tokio = { version = "1", features = ["macros", "sync", "parking_lot"] }
tokio-stream = { version = "0.1", features = ["time"] }
tonic = { workspace = true, default-features = false, features = ["prost"] }
tracing = { workspace = true }
parking_lot = "0.12"
pin-project = "1"
linkerd-addr = { path = "../../addr" }
linkerd-conditional = { path = "../../conditional" }
linkerd-dns = { path = "../../dns" }
linkerd-duplex = { path = "../../duplex" }
linkerd-errno = { path = "../../errno" }
linkerd-error = { path = "../../error" }
linkerd-error-respond = { path = "../../error-respond" }
linkerd-exp-backoff = { path = "../../exp-backoff" }
@ -64,6 +56,7 @@ linkerd-proxy-tcp = { path = "../../proxy/tcp" }
linkerd-proxy-transport = { path = "../../proxy/transport" }
linkerd-reconnect = { path = "../../reconnect" }
linkerd-router = { path = "../../router" }
linkerd-rustls = { path = "../../rustls" }
linkerd-service-profiles = { path = "../../service-profiles" }
linkerd-stack = { path = "../../stack" }
linkerd-stack-metrics = { path = "../../stack/metrics" }
@ -83,5 +76,6 @@ features = ["make", "spawn-ready", "timeout", "util", "limit"]
semver = "1"
[dev-dependencies]
bytes = { workspace = true }
http-body-util = { workspace = true }
linkerd-mock-http-body = { path = "../../mock/http-body" }
quickcheck = { version = "1", default-features = false }

View File

@ -4,11 +4,11 @@ fn set_env(name: &str, cmd: &mut Command) {
let value = match cmd.output() {
Ok(output) => String::from_utf8(output.stdout).unwrap(),
Err(err) => {
println!("cargo:warning={}", err);
println!("cargo:warning={err}");
"".to_string()
}
};
println!("cargo:rustc-env={}={}", name, value);
println!("cargo:rustc-env={name}={value}");
}
fn version() -> String {

View File

@ -1,5 +1,4 @@
use crate::profiles;
pub use classify::gate;
use linkerd_error::Error;
use linkerd_proxy_client_policy as client_policy;
use linkerd_proxy_http::{classify, HasH2Reason, ResponseTimeoutError};
@ -214,7 +213,7 @@ fn h2_error(err: &Error) -> String {
if let Some(reason) = err.h2_reason() {
// This should output the error code in the same format as the spec,
// for example: PROTOCOL_ERROR
format!("h2({:?})", reason)
format!("h2({reason:?})")
} else {
trace!("classifying found non-h2 error: {:?}", err);
String::from("unclassified")

View File

@ -101,7 +101,7 @@ impl Config {
identity: identity::NewClient,
) -> svc::ArcNewService<
(),
svc::BoxCloneSyncService<http::Request<tonic::body::BoxBody>, http::Response<RspBody>>,
svc::BoxCloneSyncService<http::Request<tonic::body::Body>, http::Response<RspBody>>,
> {
let addr = self.addr;
tracing::trace!(%addr, "Building");

View File

@ -25,6 +25,7 @@ pub mod metrics;
pub mod proxy;
pub mod serve;
pub mod svc;
pub mod tls_info;
pub mod transport;
pub use self::build_info::{BuildInfo, BUILD_INFO};

View File

@ -54,7 +54,7 @@ pub struct Proxy {
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ControlLabels {
addr: Addr,
server_id: tls::ConditionalClientTls,
server_id: tls::ConditionalClientTlsLabels,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
@ -65,7 +65,7 @@ pub enum EndpointLabels {
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct InboundEndpointLabels {
pub tls: tls::ConditionalServerTls,
pub tls: tls::ConditionalServerTlsLabels,
pub authority: Option<http::uri::Authority>,
pub target_addr: SocketAddr,
pub policy: RouteAuthzLabels,
@ -98,7 +98,7 @@ pub struct RouteAuthzLabels {
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct OutboundEndpointLabels {
pub server_id: tls::ConditionalClientTls,
pub server_id: tls::ConditionalClientTlsLabels,
pub authority: Option<http::uri::Authority>,
pub labels: Option<String>,
pub zone_locality: OutboundZoneLocality,
@ -155,10 +155,10 @@ where
I: Iterator<Item = (&'i String, &'i String)>,
{
let (k0, v0) = labels_iter.next()?;
let mut out = format!("{}_{}=\"{}\"", prefix, k0, v0);
let mut out = format!("{prefix}_{k0}=\"{v0}\"");
for (k, v) in labels_iter {
write!(out, ",{}_{}=\"{}\"", prefix, k, v).expect("label concat must succeed");
write!(out, ",{prefix}_{k}=\"{v}\"").expect("label concat must succeed");
}
Some(out)
}
@ -166,7 +166,7 @@ where
// === impl Metrics ===
impl Metrics {
pub fn new(retain_idle: Duration) -> (Self, impl FmtMetrics + Clone + Send + 'static) {
pub fn new(retain_idle: Duration) -> (Self, impl legacy::FmtMetrics + Clone + Send + 'static) {
let (control, control_report) = {
let m = http_metrics::Requests::<ControlLabels, Class>::default();
let r = m.clone().into_report(retain_idle).with_prefix("control");
@ -223,6 +223,7 @@ impl Metrics {
opentelemetry,
};
use legacy::FmtMetrics as _;
let report = endpoint_report
.and_report(profile_route_report)
.and_report(retry_report)
@ -243,15 +244,17 @@ impl svc::Param<ControlLabels> for control::ControlAddr {
fn param(&self) -> ControlLabels {
ControlLabels {
addr: self.addr.clone(),
server_id: self.identity.clone(),
server_id: self.identity.as_ref().map(tls::ClientTls::labels),
}
}
}
impl FmtLabels for ControlLabels {
impl legacy::FmtLabels for ControlLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "addr=\"{}\",", self.addr)?;
TlsConnect::from(&self.server_id).fmt_labels(f)?;
let Self { addr, server_id } = self;
write!(f, "addr=\"{addr}\",")?;
TlsConnect::from(server_id).fmt_labels(f)?;
Ok(())
}
@ -279,13 +282,19 @@ impl ProfileRouteLabels {
}
}
impl FmtLabels for ProfileRouteLabels {
impl legacy::FmtLabels for ProfileRouteLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.direction.fmt_labels(f)?;
write!(f, ",dst=\"{}\"", self.addr)?;
let Self {
direction,
addr,
labels,
} = self;
if let Some(labels) = self.labels.as_ref() {
write!(f, ",{}", labels)?;
direction.fmt_labels(f)?;
write!(f, ",dst=\"{addr}\"")?;
if let Some(labels) = labels.as_ref() {
write!(f, ",{labels}")?;
}
Ok(())
@ -306,7 +315,7 @@ impl From<OutboundEndpointLabels> for EndpointLabels {
}
}
impl FmtLabels for EndpointLabels {
impl legacy::FmtLabels for EndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Inbound(i) => (Direction::In, i).fmt_labels(f),
@ -315,32 +324,36 @@ impl FmtLabels for EndpointLabels {
}
}
impl FmtLabels for InboundEndpointLabels {
impl legacy::FmtLabels for InboundEndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(a) = self.authority.as_ref() {
let Self {
tls,
authority,
target_addr,
policy,
} = self;
if let Some(a) = authority.as_ref() {
Authority(a).fmt_labels(f)?;
write!(f, ",")?;
}
(
(TargetAddr(self.target_addr), TlsAccept::from(&self.tls)),
&self.policy,
)
.fmt_labels(f)?;
((TargetAddr(*target_addr), TlsAccept::from(tls)), policy).fmt_labels(f)?;
Ok(())
}
}
impl FmtLabels for ServerLabel {
impl legacy::FmtLabels for ServerLabel {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(meta, port) = self;
write!(
f,
"srv_group=\"{}\",srv_kind=\"{}\",srv_name=\"{}\",srv_port=\"{}\"",
self.0.group(),
self.0.kind(),
self.0.name(),
self.1
meta.group(),
meta.kind(),
meta.name(),
port
)
}
}
@ -362,41 +375,47 @@ impl prom::EncodeLabelSetMut for ServerLabel {
}
}
impl FmtLabels for ServerAuthzLabels {
impl legacy::FmtLabels for ServerAuthzLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.server.fmt_labels(f)?;
let Self { server, authz } = self;
server.fmt_labels(f)?;
write!(
f,
",authz_group=\"{}\",authz_kind=\"{}\",authz_name=\"{}\"",
self.authz.group(),
self.authz.kind(),
self.authz.name()
authz.group(),
authz.kind(),
authz.name()
)
}
}
impl FmtLabels for RouteLabels {
impl legacy::FmtLabels for RouteLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.server.fmt_labels(f)?;
let Self { server, route } = self;
server.fmt_labels(f)?;
write!(
f,
",route_group=\"{}\",route_kind=\"{}\",route_name=\"{}\"",
self.route.group(),
self.route.kind(),
self.route.name(),
route.group(),
route.kind(),
route.name(),
)
}
}
impl FmtLabels for RouteAuthzLabels {
impl legacy::FmtLabels for RouteAuthzLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.route.fmt_labels(f)?;
let Self { route, authz } = self;
route.fmt_labels(f)?;
write!(
f,
",authz_group=\"{}\",authz_kind=\"{}\",authz_name=\"{}\"",
self.authz.group(),
self.authz.kind(),
self.authz.name(),
authz.group(),
authz.kind(),
authz.name(),
)
}
}
@ -407,19 +426,28 @@ impl svc::Param<OutboundZoneLocality> for OutboundEndpointLabels {
}
}
impl FmtLabels for OutboundEndpointLabels {
impl legacy::FmtLabels for OutboundEndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(a) = self.authority.as_ref() {
let Self {
server_id,
authority,
labels,
// TODO(kate): this label is not currently emitted.
zone_locality: _,
target_addr,
} = self;
if let Some(a) = authority.as_ref() {
Authority(a).fmt_labels(f)?;
write!(f, ",")?;
}
let ta = TargetAddr(self.target_addr);
let tls = TlsConnect::from(&self.server_id);
let ta = TargetAddr(*target_addr);
let tls = TlsConnect::from(server_id);
(ta, tls).fmt_labels(f)?;
if let Some(labels) = self.labels.as_ref() {
write!(f, ",{}", labels)?;
if let Some(labels) = labels.as_ref() {
write!(f, ",{labels}")?;
}
Ok(())
@ -435,19 +463,20 @@ impl fmt::Display for Direction {
}
}
impl FmtLabels for Direction {
impl legacy::FmtLabels for Direction {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "direction=\"{}\"", self)
write!(f, "direction=\"{self}\"")
}
}
impl FmtLabels for Authority<'_> {
impl legacy::FmtLabels for Authority<'_> {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "authority=\"{}\"", self.0)
let Self(authority) = self;
write!(f, "authority=\"{authority}\"")
}
}
impl FmtLabels for Class {
impl legacy::FmtLabels for Class {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let class = |ok: bool| if ok { "success" } else { "failure" };
@ -469,8 +498,7 @@ impl FmtLabels for Class {
Class::Error(msg) => write!(
f,
"classification=\"failure\",grpc_status=\"\",error=\"{}\"",
msg
"classification=\"failure\",grpc_status=\"\",error=\"{msg}\""
),
}
}
@ -496,9 +524,15 @@ impl StackLabels {
}
}
impl FmtLabels for StackLabels {
impl legacy::FmtLabels for StackLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.direction.fmt_labels(f)?;
write!(f, ",protocol=\"{}\",name=\"{}\"", self.protocol, self.name)
let Self {
direction,
protocol,
name,
} = self;
direction.fmt_labels(f)?;
write!(f, ",protocol=\"{protocol}\",name=\"{name}\"")
}
}

View File

@ -0,0 +1,70 @@
use linkerd_metrics::prom;
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
use std::{
fmt::{Error, Write},
sync::{Arc, OnceLock},
};
static TLS_INFO: OnceLock<Arc<TlsInfo>> = OnceLock::new();
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, EncodeLabelSet)]
pub struct TlsInfo {
tls_suites: MetricValueList,
tls_kx_groups: MetricValueList,
tls_rand: String,
tls_key_provider: String,
tls_fips: bool,
}
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
struct MetricValueList {
values: Vec<&'static str>,
}
impl FromIterator<&'static str> for MetricValueList {
fn from_iter<T: IntoIterator<Item = &'static str>>(iter: T) -> Self {
MetricValueList {
values: iter.into_iter().collect(),
}
}
}
impl EncodeLabelValue for MetricValueList {
fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), Error> {
for value in &self.values {
value.encode(encoder)?;
encoder.write_char(',')?;
}
Ok(())
}
}
pub fn metric() -> prom::Family<TlsInfo, prom::ConstGauge> {
let fam = prom::Family::<TlsInfo, prom::ConstGauge>::new_with_constructor(|| {
prom::ConstGauge::new(1)
});
let tls_info = TLS_INFO.get_or_init(|| {
let provider = linkerd_rustls::get_default_provider();
let tls_suites = provider
.cipher_suites
.iter()
.flat_map(|cipher_suite| cipher_suite.suite().as_str())
.collect::<MetricValueList>();
let tls_kx_groups = provider
.kx_groups
.iter()
.flat_map(|suite| suite.name().as_str())
.collect::<MetricValueList>();
Arc::new(TlsInfo {
tls_suites,
tls_kx_groups,
tls_rand: format!("{:?}", provider.secure_random),
tls_key_provider: format!("{:?}", provider.key_provider),
tls_fips: provider.fips(),
})
});
let _ = fam.get_or_create(tls_info);
fam
}

View File

@ -1,7 +1,7 @@
use crate::metrics::ServerLabel as PolicyServerLabel;
pub use crate::metrics::{Direction, OutboundEndpointLabels};
use linkerd_conditional::Conditional;
use linkerd_metrics::FmtLabels;
use linkerd_metrics::legacy::FmtLabels;
use linkerd_tls as tls;
use std::{fmt, net::SocketAddr};
@ -20,16 +20,16 @@ pub enum Key {
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct ServerLabels {
direction: Direction,
tls: tls::ConditionalServerTls,
tls: tls::ConditionalServerTlsLabels,
target_addr: SocketAddr,
policy: Option<PolicyServerLabel>,
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct TlsAccept<'t>(pub &'t tls::ConditionalServerTls);
pub struct TlsAccept<'t>(pub &'t tls::ConditionalServerTlsLabels);
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) struct TlsConnect<'t>(&'t tls::ConditionalClientTls);
pub(crate) struct TlsConnect<'t>(pub &'t tls::ConditionalClientTlsLabels);
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct TargetAddr(pub SocketAddr);
@ -38,7 +38,7 @@ pub struct TargetAddr(pub SocketAddr);
impl Key {
pub fn inbound_server(
tls: tls::ConditionalServerTls,
tls: tls::ConditionalServerTlsLabels,
target_addr: SocketAddr,
server: PolicyServerLabel,
) -> Self {
@ -62,7 +62,7 @@ impl FmtLabels for Key {
}
Self::InboundClient => {
const NO_TLS: tls::client::ConditionalClientTls =
const NO_TLS: tls::client::ConditionalClientTlsLabels =
Conditional::None(tls::NoClientTls::Loopback);
Direction::In.fmt_labels(f)?;
@ -75,7 +75,7 @@ impl FmtLabels for Key {
impl ServerLabels {
fn inbound(
tls: tls::ConditionalServerTls,
tls: tls::ConditionalServerTlsLabels,
target_addr: SocketAddr,
policy: PolicyServerLabel,
) -> Self {
@ -90,7 +90,7 @@ impl ServerLabels {
fn outbound(target_addr: SocketAddr) -> Self {
ServerLabels {
direction: Direction::Out,
tls: tls::ConditionalServerTls::None(tls::NoServerTls::Loopback),
tls: tls::ConditionalServerTlsLabels::None(tls::NoServerTls::Loopback),
target_addr,
policy: None,
}
@ -99,14 +99,17 @@ impl ServerLabels {
impl FmtLabels for ServerLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.direction.fmt_labels(f)?;
let Self {
direction,
tls,
target_addr,
policy,
} = self;
direction.fmt_labels(f)?;
f.write_str(",peer=\"src\",")?;
(
(TargetAddr(self.target_addr), TlsAccept(&self.tls)),
self.policy.as_ref(),
)
.fmt_labels(f)?;
((TargetAddr(*target_addr), TlsAccept(tls)), policy.as_ref()).fmt_labels(f)?;
Ok(())
}
@ -114,27 +117,28 @@ impl FmtLabels for ServerLabels {
// === impl TlsAccept ===
impl<'t> From<&'t tls::ConditionalServerTls> for TlsAccept<'t> {
fn from(c: &'t tls::ConditionalServerTls) -> Self {
impl<'t> From<&'t tls::ConditionalServerTlsLabels> for TlsAccept<'t> {
fn from(c: &'t tls::ConditionalServerTlsLabels) -> Self {
TlsAccept(c)
}
}
impl FmtLabels for TlsAccept<'_> {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
let Self(tls) = self;
match tls {
Conditional::None(tls::NoServerTls::Disabled) => {
write!(f, "tls=\"disabled\"")
}
Conditional::None(why) => {
write!(f, "tls=\"no_identity\",no_tls_reason=\"{}\"", why)
write!(f, "tls=\"no_identity\",no_tls_reason=\"{why}\"")
}
Conditional::Some(tls::ServerTls::Established { client_id, .. }) => match client_id {
Some(id) => write!(f, "tls=\"true\",client_id=\"{}\"", id),
Conditional::Some(tls::ServerTlsLabels::Established { client_id }) => match client_id {
Some(id) => write!(f, "tls=\"true\",client_id=\"{id}\""),
None => write!(f, "tls=\"true\",client_id=\"\""),
},
Conditional::Some(tls::ServerTls::Passthru { sni }) => {
write!(f, "tls=\"opaque\",sni=\"{}\"", sni)
Conditional::Some(tls::ServerTlsLabels::Passthru { sni }) => {
write!(f, "tls=\"opaque\",sni=\"{sni}\"")
}
}
}
@ -142,23 +146,25 @@ impl FmtLabels for TlsAccept<'_> {
// === impl TlsConnect ===
impl<'t> From<&'t tls::ConditionalClientTls> for TlsConnect<'t> {
fn from(s: &'t tls::ConditionalClientTls) -> Self {
impl<'t> From<&'t tls::ConditionalClientTlsLabels> for TlsConnect<'t> {
fn from(s: &'t tls::ConditionalClientTlsLabels) -> Self {
TlsConnect(s)
}
}
impl FmtLabels for TlsConnect<'_> {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
let Self(tls) = self;
match tls {
Conditional::None(tls::NoClientTls::Disabled) => {
write!(f, "tls=\"disabled\"")
}
Conditional::None(why) => {
write!(f, "tls=\"no_identity\",no_tls_reason=\"{}\"", why)
write!(f, "tls=\"no_identity\",no_tls_reason=\"{why}\"")
}
Conditional::Some(tls::ClientTls { server_id, .. }) => {
write!(f, "tls=\"true\",server_id=\"{}\"", server_id)
Conditional::Some(tls::ClientTlsLabels { server_id }) => {
write!(f, "tls=\"true\",server_id=\"{server_id}\"")
}
}
}
@ -168,12 +174,13 @@ impl FmtLabels for TlsConnect<'_> {
impl FmtLabels for TargetAddr {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(target_addr) = self;
write!(
f,
"target_addr=\"{}\",target_ip=\"{}\",target_port=\"{}\"",
self.0,
self.0.ip(),
self.0.port()
target_addr,
target_addr.ip(),
target_addr.port()
)
}
}
@ -194,9 +201,8 @@ mod tests {
use std::sync::Arc;
let labels = ServerLabels::inbound(
tls::ConditionalServerTls::Some(tls::ServerTls::Established {
tls::ConditionalServerTlsLabels::Some(tls::ServerTlsLabels::Established {
client_id: Some("foo.id.example.com".parse().unwrap()),
negotiated_protocol: None,
}),
([192, 0, 2, 4], 40000).into(),
PolicyServerLabel(

View File

@ -90,7 +90,7 @@ impl Gateway {
detect_timeout,
queue,
addr,
meta,
meta.into(),
),
None => {
tracing::debug!(

View File

@ -153,7 +153,7 @@ fn mk_routes(profile: &profiles::Profile) -> Option<outbound::http::Routes> {
if let Some((addr, metadata)) = profile.endpoint.clone() {
return Some(outbound::http::Routes::Endpoint(
Remote(ServerAddr(addr)),
metadata,
metadata.into(),
));
}

View File

@ -13,8 +13,7 @@ Configures and runs the inbound proxy
test-util = [
"linkerd-app-test",
"linkerd-idle-cache/test-util",
"linkerd-meshtls/rustls",
"linkerd-meshtls-rustls/test-util",
"linkerd-meshtls/test-util",
]
[dependencies]
@ -25,8 +24,7 @@ linkerd-app-core = { path = "../core" }
linkerd-app-test = { path = "../test", optional = true }
linkerd-http-access-log = { path = "../../http/access-log" }
linkerd-idle-cache = { path = "../../idle-cache" }
linkerd-meshtls = { path = "../../meshtls", optional = true }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
linkerd-proxy-client-policy = { path = "../../proxy/client-policy" }
linkerd-tonic-stream = { path = "../../tonic-stream" }
linkerd-tonic-watch = { path = "../../tonic-watch" }
@ -49,7 +47,7 @@ hyper = { workspace = true, features = ["http1", "http2"] }
linkerd-app-test = { path = "../test" }
arbitrary = { version = "1", features = ["derive"] }
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../meshtls", features = [
"test-util",
] }
@ -62,8 +60,7 @@ linkerd-http-metrics = { path = "../../http/metrics", features = ["test-util"] }
linkerd-http-box = { path = "../../http/box" }
linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] }
linkerd-io = { path = "../../io", features = ["tokio-test"] }
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../meshtls", features = [
"test-util",
] }
linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [

View File

@ -18,8 +18,7 @@ linkerd-app-core = { path = "../../core" }
linkerd-app-inbound = { path = ".." }
linkerd-app-test = { path = "../../test" }
linkerd-idle-cache = { path = "../../../idle-cache", features = ["test-util"] }
linkerd-meshtls = { path = "../../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../../meshtls", features = [
"test-util",
] }
linkerd-tracing = { path = "../../../tracing", features = ["ansi"] }

View File

@ -325,7 +325,7 @@ impl svc::Param<Remote<ServerAddr>> for Forward {
impl svc::Param<transport::labels::Key> for Forward {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
self.tls.clone(),
self.tls.as_ref().map(|t| t.labels()),
self.orig_dst_addr.into(),
self.permit.labels.server.clone(),
)
@ -429,7 +429,7 @@ impl svc::Param<ServerLabel> for Http {
impl svc::Param<transport::labels::Key> for Http {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
self.tls.status.clone(),
self.tls.status.as_ref().map(|t| t.labels()),
self.tls.orig_dst_addr.into(),
self.tls.policy.server_label(),
)

View File

@ -117,7 +117,7 @@ impl<N> Inbound<N> {
let identity = rt
.identity
.server()
.with_alpn(vec![transport_header::PROTOCOL.into()])
.spawn_with_alpn(vec![transport_header::PROTOCOL.into()])
.expect("TLS credential store must be held");
inner
@ -311,9 +311,8 @@ impl Param<Remote<ServerAddr>> for AuthorizedLocalTcp {
impl Param<transport::labels::Key> for AuthorizedLocalTcp {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
tls::ConditionalServerTls::Some(tls::ServerTls::Established {
tls::ConditionalServerTlsLabels::Some(tls::ServerTlsLabels::Established {
client_id: Some(self.client_id.clone()),
negotiated_protocol: None,
}),
self.addr.into(),
self.permit.labels.server.clone(),
@ -344,9 +343,8 @@ impl Param<Remote<ClientAddr>> for LocalHttp {
impl Param<transport::labels::Key> for LocalHttp {
fn param(&self) -> transport::labels::Key {
transport::labels::Key::inbound_server(
tls::ConditionalServerTls::Some(tls::ServerTls::Established {
tls::ConditionalServerTlsLabels::Some(tls::ServerTlsLabels::Established {
client_id: Some(self.client.client_id.clone()),
negotiated_protocol: None,
}),
self.addr.into(),
self.policy.server_label(),
@ -435,6 +433,14 @@ impl Param<tls::ConditionalServerTls> for GatewayTransportHeader {
}
}
impl Param<tls::ConditionalServerTlsLabels> for GatewayTransportHeader {
fn param(&self) -> tls::ConditionalServerTlsLabels {
tls::ConditionalServerTlsLabels::Some(tls::ServerTlsLabels::Established {
client_id: Some(self.client.client_id.clone()),
})
}
}
impl Param<tls::ClientId> for GatewayTransportHeader {
fn param(&self) -> tls::ClientId {
self.client.client_id.clone()

View File

@ -395,7 +395,7 @@ fn endpoint_labels(
) -> impl svc::ExtractParam<metrics::EndpointLabels, Logical> + Clone {
move |t: &Logical| -> metrics::EndpointLabels {
metrics::InboundEndpointLabels {
tls: t.tls.clone(),
tls: t.tls.as_ref().map(|t| t.labels()),
authority: unsafe_authority_labels
.then(|| t.logical.as_ref().map(|d| d.as_http_authority()))
.flatten(),

View File

@ -664,7 +664,7 @@ async fn grpc_response_class() {
let response_total = metrics
.get_response_total(
&metrics::EndpointLabels::Inbound(metrics::InboundEndpointLabels {
tls: Target::meshed_h2().1,
tls: Target::meshed_h2().1.map(|t| t.labels()),
authority: None,
target_addr: "127.0.0.1:80".parse().unwrap(),
policy: metrics::RouteAuthzLabels {
@ -762,7 +762,7 @@ async fn test_unsafe_authority_labels(
let response_total = metrics
.get_response_total(
&metrics::EndpointLabels::Inbound(metrics::InboundEndpointLabels {
tls: Target::meshed_http1().1,
tls: Target::meshed_http1().1.as_ref().map(|t| t.labels()),
authority: expected_authority,
target_addr: "127.0.0.1:80".parse().unwrap(),
policy: metrics::RouteAuthzLabels {
@ -861,12 +861,7 @@ fn grpc_status_server(
#[tracing::instrument]
fn connect_error() -> impl Fn(Remote<ServerAddr>) -> io::Result<io::BoxedIo> {
move |_| {
Err(io::Error::new(
io::ErrorKind::Other,
"server is not listening",
))
}
move |_| Err(io::Error::other("server is not listening"))
}
#[tracing::instrument]

View File

@ -113,10 +113,6 @@ impl<S> Inbound<S> {
&self.runtime.identity
}
pub fn proxy_metrics(&self) -> &metrics::Proxy {
&self.runtime.metrics.proxy
}
/// A helper for gateways to instrument policy checks.
pub fn authorize_http<N>(
&self,

View File

@ -13,7 +13,7 @@ pub(crate) mod error;
pub use linkerd_app_core::metrics::*;
/// Holds outbound proxy metrics.
/// Holds LEGACY inbound proxy metrics.
#[derive(Clone, Debug)]
pub struct InboundMetrics {
pub http_authz: authz::HttpAuthzMetrics,
@ -50,7 +50,7 @@ impl InboundMetrics {
}
}
impl FmtMetrics for InboundMetrics {
impl legacy::FmtMetrics for InboundMetrics {
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.http_authz.fmt_metrics(f)?;
self.http_errors.fmt_metrics(f)?;

View File

@ -1,8 +1,9 @@
use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit};
use linkerd_app_core::{
metrics::{
metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels,
ServerLabel, TargetAddr, TlsAccept,
legacy::{Counter, FmtLabels, FmtMetrics},
metrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, ServerLabel, TargetAddr,
TlsAccept,
},
tls,
transport::OrigDstAddr,
@ -67,7 +68,7 @@ pub struct HTTPLocalRateLimitLabels {
#[derive(Debug, Hash, PartialEq, Eq)]
struct Key<L> {
target: TargetAddr,
tls: tls::ConditionalServerTls,
tls: tls::ConditionalServerTlsLabels,
labels: L,
}
@ -80,7 +81,7 @@ type HttpLocalRateLimitKey = Key<HTTPLocalRateLimitLabels>;
// === impl HttpAuthzMetrics ===
impl HttpAuthzMetrics {
pub fn allow(&self, permit: &HttpRoutePermit, tls: tls::ConditionalServerTls) {
pub fn allow(&self, permit: &HttpRoutePermit, tls: tls::ConditionalServerTlsLabels) {
self.0
.allow
.lock()
@ -93,7 +94,7 @@ impl HttpAuthzMetrics {
&self,
labels: ServerLabel,
dst: OrigDstAddr,
tls: tls::ConditionalServerTls,
tls: tls::ConditionalServerTlsLabels,
) {
self.0
.route_not_found
@ -103,7 +104,12 @@ impl HttpAuthzMetrics {
.incr();
}
pub fn deny(&self, labels: RouteLabels, dst: OrigDstAddr, tls: tls::ConditionalServerTls) {
pub fn deny(
&self,
labels: RouteLabels,
dst: OrigDstAddr,
tls: tls::ConditionalServerTlsLabels,
) {
self.0
.deny
.lock()
@ -116,7 +122,7 @@ impl HttpAuthzMetrics {
&self,
labels: HTTPLocalRateLimitLabels,
dst: OrigDstAddr,
tls: tls::ConditionalServerTls,
tls: tls::ConditionalServerTlsLabels,
) {
self.0
.http_local_rate_limit
@ -187,7 +193,7 @@ impl FmtMetrics for HttpAuthzMetrics {
// === impl TcpAuthzMetrics ===
impl TcpAuthzMetrics {
pub fn allow(&self, permit: &ServerPermit, tls: tls::ConditionalServerTls) {
pub fn allow(&self, permit: &ServerPermit, tls: tls::ConditionalServerTlsLabels) {
self.0
.allow
.lock()
@ -196,7 +202,7 @@ impl TcpAuthzMetrics {
.incr();
}
pub fn deny(&self, policy: &AllowPolicy, tls: tls::ConditionalServerTls) {
pub fn deny(&self, policy: &AllowPolicy, tls: tls::ConditionalServerTlsLabels) {
self.0
.deny
.lock()
@ -205,7 +211,7 @@ impl TcpAuthzMetrics {
.incr();
}
pub fn terminate(&self, policy: &AllowPolicy, tls: tls::ConditionalServerTls) {
pub fn terminate(&self, policy: &AllowPolicy, tls: tls::ConditionalServerTlsLabels) {
self.0
.terminate
.lock()
@ -246,18 +252,24 @@ impl FmtMetrics for TcpAuthzMetrics {
impl FmtLabels for HTTPLocalRateLimitLabels {
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.server.fmt_labels(f)?;
if let Some(rl) = &self.rate_limit {
let Self {
server,
rate_limit,
scope,
} = self;
server.fmt_labels(f)?;
if let Some(rl) = rate_limit {
write!(
f,
",ratelimit_group=\"{}\",ratelimit_kind=\"{}\",ratelimit_name=\"{}\",ratelimit_scope=\"{}\"",
rl.group(),
rl.kind(),
rl.name(),
self.scope,
scope,
)
} else {
write!(f, ",ratelimit_scope=\"{}\"", self.scope)
write!(f, ",ratelimit_scope=\"{scope}\"")
}
}
}
@ -265,7 +277,7 @@ impl FmtLabels for HTTPLocalRateLimitLabels {
// === impl Key ===
impl<L> Key<L> {
fn new(labels: L, dst: OrigDstAddr, tls: tls::ConditionalServerTls) -> Self {
fn new(labels: L, dst: OrigDstAddr, tls: tls::ConditionalServerTlsLabels) -> Self {
Self {
tls,
target: TargetAddr(dst.into()),
@ -276,24 +288,30 @@ impl<L> Key<L> {
impl<L: FmtLabels> FmtLabels for Key<L> {
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
(self.target, (&self.labels, TlsAccept(&self.tls))).fmt_labels(f)
let Self {
target,
tls,
labels,
} = self;
(target, (labels, TlsAccept(tls))).fmt_labels(f)
}
}
impl ServerKey {
fn from_policy(policy: &AllowPolicy, tls: tls::ConditionalServerTls) -> Self {
fn from_policy(policy: &AllowPolicy, tls: tls::ConditionalServerTlsLabels) -> Self {
Self::new(policy.server_label(), policy.dst_addr(), tls)
}
}
impl RouteAuthzKey {
fn from_permit(permit: &HttpRoutePermit, tls: tls::ConditionalServerTls) -> Self {
fn from_permit(permit: &HttpRoutePermit, tls: tls::ConditionalServerTlsLabels) -> Self {
Self::new(permit.labels.clone(), permit.dst, tls)
}
}
impl ServerAuthzKey {
fn from_permit(permit: &ServerPermit, tls: tls::ConditionalServerTls) -> Self {
fn from_permit(permit: &ServerPermit, tls: tls::ConditionalServerTlsLabels) -> Self {
Self::new(permit.labels.clone(), permit.dst, tls)
}
}

View File

@ -8,7 +8,7 @@ use crate::{
};
use linkerd_app_core::{
errors::{FailFastError, LoadShedError},
metrics::FmtLabels,
metrics::legacy::FmtLabels,
tls,
};
use std::fmt;

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics, ServerLabel},
metrics::{
legacy::{Counter, FmtMetrics},
metrics, ServerLabel,
},
svc::{self, stack::NewMonitor},
transport::{labels::TargetAddr, OrigDstAddr},
Error,

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics},
metrics::{
legacy::{Counter, FmtMetrics},
metrics,
},
svc::{self, stack::NewMonitor},
transport::{labels::TargetAddr, OrigDstAddr},
Error,

View File

@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ServerPolicy> = once_cell::sync
impl<S> Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
{
pub(super) fn new(
@ -57,7 +57,7 @@ where
impl<S> Service<u16> for Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
S::Future: Send + 'static,

View File

@ -40,7 +40,7 @@ impl Config {
limits: ReceiveLimits,
) -> impl GetPolicy + Clone + Send + Sync + 'static
where
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static,

View File

@ -248,8 +248,11 @@ impl<T, N> HttpPolicyService<T, N> {
);
}
}
self.metrics
.deny(labels, self.connection.dst, self.connection.tls.clone());
self.metrics.deny(
labels,
self.connection.dst,
self.connection.tls.as_ref().map(|t| t.labels()),
);
return Err(HttpRouteUnauthorized(()).into());
}
};
@ -279,14 +282,19 @@ impl<T, N> HttpPolicyService<T, N> {
}
};
self.metrics.allow(&permit, self.connection.tls.clone());
self.metrics
.allow(&permit, self.connection.tls.as_ref().map(|t| t.labels()));
Ok((permit, r#match, route))
}
fn mk_route_not_found(&self) -> Error {
let labels = self.policy.server_label();
self.metrics
.route_not_found(labels, self.connection.dst, self.connection.tls.clone());
self.metrics.route_not_found(
labels,
self.connection.dst,
self.connection.tls.as_ref().map(|t| t.labels()),
);
HttpRouteNotFound(()).into()
}
@ -306,7 +314,7 @@ impl<T, N> HttpPolicyService<T, N> {
self.metrics.ratelimit(
self.policy.ratelimit_label(&err),
self.connection.dst,
self.connection.tls.clone(),
self.connection.tls.as_ref().map(|t| t.labels()),
);
err.into()
})

View File

@ -74,7 +74,7 @@ impl<S> Store<S> {
opaque_ports: RangeInclusiveSet<u16>,
) -> Self
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::Future: Send,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
@ -138,7 +138,7 @@ impl<S> Store<S> {
impl<S> GetPolicy for Store<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::Future: Send,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,

View File

@ -77,7 +77,8 @@ where
// This new services requires a ClientAddr, so it must necessarily be built for each
// connection. So we can just increment the counter here since the service can only
// be used at most once.
self.metrics.allow(&permit, tls.clone());
self.metrics
.allow(&permit, tls.as_ref().map(|t| t.labels()));
let inner = self.inner.new_service((permit, target));
TcpPolicy::Authorized(Authorized {
@ -97,7 +98,7 @@ where
?tls, %client,
"Connection denied"
);
self.metrics.deny(&policy, tls);
self.metrics.deny(&policy, tls.as_ref().map(|t| t.labels()));
TcpPolicy::Unauthorized(deny)
}
}
@ -167,7 +168,7 @@ where
%client,
"Connection terminated due to policy change",
);
metrics.terminate(&policy, tls);
metrics.terminate(&policy, tls.as_ref().map(|t| t.labels()));
return Err(denied.into());
}
}

View File

@ -263,7 +263,7 @@ fn orig_dst_addr() -> OrigDstAddr {
OrigDstAddr(([192, 0, 2, 2], 1000).into())
}
impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
impl tonic::client::GrpcService<tonic::body::Body> for MockSvc {
type ResponseBody = linkerd_app_core::control::RspBody;
type Error = Error;
type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>;
@ -275,7 +275,7 @@ impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
unreachable!()
}
fn call(&mut self, _req: http::Request<tonic::body::BoxBody>) -> Self::Future {
fn call(&mut self, _req: http::Request<tonic::body::Body>) -> Self::Future {
unreachable!()
}
}

View File

@ -27,7 +27,7 @@ impl Inbound<()> {
limits: ReceiveLimits,
) -> impl policy::GetPolicy + Clone + Send + Sync + 'static
where
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static,

View File

@ -3,9 +3,7 @@ pub use futures::prelude::*;
use linkerd_app_core::{
config,
dns::Suffix,
drain, exp_backoff,
identity::rustls,
metrics,
drain, exp_backoff, identity, metrics,
proxy::{
http::{h1, h2},
tap,
@ -98,7 +96,7 @@ pub fn runtime() -> (ProxyRuntime, drain::Signal) {
let (tap, _) = tap::new();
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
let runtime = ProxyRuntime {
identity: rustls::creds::default_for_test().1.into(),
identity: identity::creds::default_for_test().1,
metrics: metrics.proxy,
tap,
span_sink: None,

View File

@ -28,17 +28,19 @@ ipnet = "2"
linkerd-app = { path = "..", features = ["allow-loopback"] }
linkerd-app-core = { path = "../core" }
linkerd-app-test = { path = "../test" }
linkerd-meshtls = { path = "../../meshtls", features = ["test-util"] }
linkerd-metrics = { path = "../../metrics", features = ["test_util"] }
linkerd-rustls = { path = "../../rustls" }
linkerd-tracing = { path = "../../tracing" }
maplit = "1"
parking_lot = "0.12"
regex = "1"
rustls-pemfile = "2.2"
socket2 = "0.5"
socket2 = "0.6"
tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] }
tokio-rustls = { workspace = true }
tokio-stream = { version = "0.1", features = ["sync"] }
tonic = { workspace = true, features = ["transport"], default-features = false }
tonic = { workspace = true, features = ["transport", "router"], default-features = false }
tower = { workspace = true, default-features = false }
tracing = { workspace = true }
@ -72,8 +74,5 @@ flate2 = { version = "1", default-features = false, features = [
] }
# Log streaming isn't enabled by default globally, but we want to test it.
linkerd-app-admin = { path = "../admin", features = ["log-streaming"] }
# No code from this crate is actually used; only necessary to enable the Rustls
# implementation.
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
serde_json = "1"

View File

@ -262,10 +262,7 @@ impl pb::destination_server::Destination for Controller {
}
tracing::warn!(?dst, ?updates, "request does not match");
let msg = format!(
"expected get call for {:?} but got get call for {:?}",
dst, req
);
let msg = format!("expected get call for {dst:?} but got get call for {req:?}");
calls.push_front(Dst::Call(dst, updates));
return Err(grpc::Status::new(grpc::Code::Unavailable, msg));
}

View File

@ -8,7 +8,8 @@ use std::{
};
use linkerd2_proxy_api::identity as pb;
use tokio_rustls::rustls::{self, pki_types::CertificateDer, server::WebPkiClientVerifier};
use linkerd_rustls::get_default_provider;
use tokio_rustls::rustls::{self, server::WebPkiClientVerifier};
use tonic as grpc;
pub struct Identity {
@ -34,10 +35,6 @@ type Certify = Box<
> + Send,
>;
static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
&[rustls::crypto::ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256];
struct Certificates {
pub leaf: Vec<u8>,
pub intermediates: Vec<Vec<u8>>,
@ -54,13 +51,13 @@ impl Certificates {
let leaf = certs
.next()
.expect("no leaf cert in pemfile")
.map_err(|_| io::Error::new(io::ErrorKind::Other, "rustls error reading certs"))?
.map_err(|_| io::Error::other("rustls error reading certs"))?
.as_ref()
.to_vec();
let intermediates = certs
.map(|cert| cert.map(|cert| cert.as_ref().to_vec()))
.collect::<Result<Vec<_>, _>>()
.map_err(|_| io::Error::new(io::ErrorKind::Other, "rustls error reading certs"))?;
.map_err(|_| io::Error::other("rustls error reading certs"))?;
Ok(Certificates {
leaf,
@ -104,19 +101,16 @@ impl Identity {
use std::io::Cursor;
let mut roots = rustls::RootCertStore::empty();
let trust_anchors = rustls_pemfile::certs(&mut Cursor::new(trust_anchors))
.map(|bytes| bytes.map(CertificateDer::from))
.collect::<Result<Vec<_>, _>>()
.expect("error parsing pemfile");
let (added, skipped) = roots.add_parsable_certificates(trust_anchors);
assert_ne!(added, 0, "trust anchors must include at least one cert");
assert_eq!(skipped, 0, "no certs in pemfile should be invalid");
let mut provider = rustls::crypto::ring::default_provider();
provider.cipher_suites = TLS_SUPPORTED_CIPHERSUITES.to_vec();
let provider = Arc::new(provider);
let provider = get_default_provider();
let client_config = rustls::ClientConfig::builder_with_provider(provider.clone())
.with_protocol_versions(TLS_VERSIONS)
.with_safe_default_protocol_versions()
.expect("client config must be valid")
.with_root_certificates(roots.clone())
.with_no_client_auth();
@ -128,7 +122,7 @@ impl Identity {
.expect("server verifier must be valid");
let server_config = rustls::ServerConfig::builder_with_provider(provider)
.with_protocol_versions(TLS_VERSIONS)
.with_safe_default_protocol_versions()
.expect("server config must be valid")
.with_client_cert_verifier(client_cert_verifier)
.with_single_cert(certs.chain(), key)
@ -219,7 +213,7 @@ impl Controller {
let f = f.take().expect("called twice?");
let fut = f(req)
.map_ok(grpc::Response::new)
.map_err(|e| grpc::Status::new(grpc::Code::Internal, format!("{}", e)));
.map_err(|e| grpc::Status::new(grpc::Code::Internal, format!("{e}")));
Box::pin(fut)
});
self.expect_calls.lock().push_back(func);

View File

@ -3,6 +3,7 @@
#![warn(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
#![forbid(unsafe_code)]
#![recursion_limit = "256"]
#![allow(clippy::result_large_err)]
mod test_env;
@ -247,7 +248,7 @@ impl fmt::Display for HumanDuration {
let secs = self.0.as_secs();
let subsec_ms = self.0.subsec_nanos() as f64 / 1_000_000f64;
if secs == 0 {
write!(fmt, "{}ms", subsec_ms)
write!(fmt, "{subsec_ms}ms")
} else {
write!(fmt, "{}s", secs as f64 + subsec_ms)
}

View File

@ -302,7 +302,7 @@ impl Controller {
}
pub async fn run(self) -> controller::Listening {
let svc = grpc::transport::Server::builder()
let routes = grpc::service::Routes::default()
.add_service(
inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new(
self.inbound,
@ -310,9 +310,9 @@ impl Controller {
)
.add_service(outbound_policies_server::OutboundPoliciesServer::new(
Server(Arc::new(self.outbound)),
))
.into_service();
controller::run(RoutesSvc(svc), "support policy controller", None).await
));
controller::run(RoutesSvc(routes), "support policy controller", None).await
}
}
@ -525,7 +525,9 @@ impl Service<Request<hyper::body::Incoming>> for RoutesSvc {
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let Self(routes) = self;
routes.poll_ready(cx)
<grpc::service::Routes as Service<Request<UnsyncBoxBody<Bytes, grpc::Status>>>>::poll_ready(
routes, cx,
)
}
fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future {

View File

@ -108,7 +108,7 @@ impl fmt::Debug for MockOrigDst {
match self {
Self::Addr(addr) => f
.debug_tuple("MockOrigDst::Addr")
.field(&format_args!("{}", addr))
.field(&format_args!("{addr}"))
.finish(),
Self::Direct => f.debug_tuple("MockOrigDst::Direct").finish(),
Self::None => f.debug_tuple("MockOrigDst::None").finish(),
@ -416,9 +416,9 @@ async fn run(proxy: Proxy, mut env: TestEnv, random_ports: bool) -> Listening {
use std::fmt::Write;
let mut ports = inbound_default_ports.iter();
if let Some(port) = ports.next() {
let mut var = format!("{}", port);
let mut var = format!("{port}");
for port in ports {
write!(&mut var, ",{}", port).expect("writing to String should never fail");
write!(&mut var, ",{port}").expect("writing to String should never fail");
}
info!("{}={:?}", app::env::ENV_INBOUND_PORTS, var);
env.put(app::env::ENV_INBOUND_PORTS, var);

View File

@ -137,28 +137,28 @@ impl TapEventExt for pb::TapEvent {
fn request_init_authority(&self) -> &str {
match self.event() {
pb::tap_event::http::Event::RequestInit(ev) => &ev.authority,
e => panic!("not RequestInit event: {:?}", e),
e => panic!("not RequestInit event: {e:?}"),
}
}
fn request_init_path(&self) -> &str {
match self.event() {
pb::tap_event::http::Event::RequestInit(ev) => &ev.path,
e => panic!("not RequestInit event: {:?}", e),
e => panic!("not RequestInit event: {e:?}"),
}
}
fn response_init_status(&self) -> u16 {
match self.event() {
pb::tap_event::http::Event::ResponseInit(ev) => ev.http_status as u16,
e => panic!("not ResponseInit event: {:?}", e),
e => panic!("not ResponseInit event: {e:?}"),
}
}
fn response_end_bytes(&self) -> u64 {
match self.event() {
pb::tap_event::http::Event::ResponseEnd(ev) => ev.response_bytes,
e => panic!("not ResponseEnd event: {:?}", e),
e => panic!("not ResponseEnd event: {e:?}"),
}
}
@ -170,7 +170,7 @@ impl TapEventExt for pb::TapEvent {
}) => code,
_ => panic!("not Eos GrpcStatusCode: {:?}", ev.eos),
},
ev => panic!("not ResponseEnd event: {:?}", ev),
ev => panic!("not ResponseEnd event: {ev:?}"),
}
}
}

View File

@ -381,7 +381,7 @@ mod cross_version {
}
fn default_dst_name(port: u16) -> String {
format!("{}:{}", HOST, port)
format!("{HOST}:{port}")
}
fn send_default_dst(

View File

@ -63,7 +63,7 @@ async fn wait_for_profile_stage(client: &client::Client, metrics: &client::Clien
for _ in 0i32..10 {
assert_eq!(client.get("/load-profile").await, "");
let m = metrics.get("/metrics").await;
let stage_metric = format!("rt_load_profile=\"{}\"", stage);
let stage_metric = format!("rt_load_profile=\"{stage}\"");
if m.contains(stage_metric.as_str()) {
break;
}

View File

@ -88,12 +88,12 @@ impl TestBuilder {
let port = srv.addr.port();
let ctrl = controller::new();
let dst_tx = ctrl.destination_tx(format!("{}:{}", host, port));
let dst_tx = ctrl.destination_tx(format!("{host}:{port}"));
dst_tx.send_addr(srv.addr);
let ctrl = controller::new();
let dst_tx = ctrl.destination_tx(format!("{}:{}", host, port));
let dst_tx = ctrl.destination_tx(format!("{host}:{port}"));
dst_tx.send_addr(srv.addr);
let profile_tx = ctrl.profile_tx(srv.addr.to_string());

View File

@ -1318,9 +1318,9 @@ async fn metrics_compression() {
body.copy_to_bytes(body.remaining()),
));
let mut scrape = String::new();
decoder.read_to_string(&mut scrape).unwrap_or_else(|_| {
panic!("decode gzip (requested Accept-Encoding: {})", encoding)
});
decoder
.read_to_string(&mut scrape)
.unwrap_or_else(|_| panic!("decode gzip (requested Accept-Encoding: {encoding})"));
scrape
}
};

View File

@ -26,7 +26,7 @@ async fn is_valid_json() {
assert!(!json.is_empty());
for obj in json {
println!("{}\n", obj);
println!("{obj}\n");
}
}
@ -53,7 +53,7 @@ async fn query_is_valid_json() {
assert!(!json.is_empty());
for obj in json {
println!("{}\n", obj);
println!("{obj}\n");
}
}
@ -74,12 +74,9 @@ async fn valid_get_does_not_error() {
let json = logs.await.unwrap();
for obj in json {
println!("{}\n", obj);
println!("{obj}\n");
if obj.get("error").is_some() {
panic!(
"expected the log stream to contain no error responses!\njson = {}",
obj
);
panic!("expected the log stream to contain no error responses!\njson = {obj}");
}
}
}
@ -101,12 +98,9 @@ async fn valid_query_does_not_error() {
let json = logs.await.unwrap();
for obj in json {
println!("{}\n", obj);
println!("{obj}\n");
if obj.get("error").is_some() {
panic!(
"expected the log stream to contain no error responses!\njson = {}",
obj
);
panic!("expected the log stream to contain no error responses!\njson = {obj}");
}
}
}
@ -142,9 +136,7 @@ async fn multi_filter() {
level.and_then(|value| value.as_str()),
Some("DEBUG") | Some("INFO") | Some("WARN") | Some("ERROR")
),
"level must be DEBUG, INFO, WARN, or ERROR\n level: {:?}\n json: {:#?}",
level,
obj
"level must be DEBUG, INFO, WARN, or ERROR\n level: {level:?}\n json: {obj:#?}"
);
}
@ -175,7 +167,7 @@ async fn get_log_stream(
let req = client
.request_body(
client
.request_builder(&format!("{}?{}", PATH, filter))
.request_builder(&format!("{PATH}?{filter}"))
.method(http::Method::GET)
.body(http_body_util::Full::new(Bytes::from(filter)))
.unwrap(),
@ -231,7 +223,7 @@ where
}
}
Err(e) => {
println!("body failed: {}", e);
println!("body failed: {e}");
break;
}
};

View File

@ -80,10 +80,7 @@ impl Test {
.await
};
env.put(
app::env::ENV_INBOUND_DETECT_TIMEOUT,
format!("{:?}", TIMEOUT),
);
env.put(app::env::ENV_INBOUND_DETECT_TIMEOUT, format!("{TIMEOUT:?}"));
(self.set_env)(&mut env);
@ -127,26 +124,6 @@ async fn inbound_timeout() {
.await;
}
/// Tests that the detect metric is labeled and incremented on I/O error.
#[tokio::test]
async fn inbound_io_err() {
let _trace = trace_init();
let (proxy, metrics) = Test::default().run().await;
let client = crate::tcp::client(proxy.inbound);
let tcp_client = client.connect().await;
tcp_client.write(TcpFixture::HELLO_MSG).await;
drop(tcp_client);
metric(&proxy)
.label("error", "i/o")
.value(1u64)
.assert_in(&metrics)
.await;
}
/// Tests that the detect metric is not incremented when TLS is successfully
/// detected.
#[tokio::test]
@ -192,44 +169,6 @@ async fn inbound_success() {
metric.assert_in(&metrics).await;
}
/// Tests both of the above cases together.
#[tokio::test]
async fn inbound_multi() {
let _trace = trace_init();
let (proxy, metrics) = Test::default().run().await;
let client = crate::tcp::client(proxy.inbound);
let metric = metric(&proxy);
let timeout_metric = metric.clone().label("error", "tls detection timeout");
let io_metric = metric.label("error", "i/o");
let tcp_client = client.connect().await;
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
.await;
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
drop(tcp_client);
let tcp_client = client.connect().await;
tcp_client.write(TcpFixture::HELLO_MSG).await;
drop(tcp_client);
io_metric.clone().value(1u64).assert_in(&metrics).await;
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
let tcp_client = client.connect().await;
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
.await;
io_metric.clone().value(1u64).assert_in(&metrics).await;
timeout_metric.clone().value(2u64).assert_in(&metrics).await;
drop(tcp_client);
}
/// Tests that TLS detect failure metrics are collected for the direct stack.
#[tokio::test]
async fn inbound_direct_multi() {

View File

@ -13,7 +13,7 @@ Configures and runs the outbound proxy
default = []
allow-loopback = []
test-subscriber = []
test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util", "dep:http-body"]
test-util = ["linkerd-app-test", "linkerd-meshtls/test-util", "dep:http-body"]
prometheus-client-rust-242 = [] # TODO
@ -42,7 +42,7 @@ linkerd-http-prom = { path = "../../http/prom" }
linkerd-http-retry = { path = "../../http/retry" }
linkerd-http-route = { path = "../../http/route" }
linkerd-identity = { path = "../../identity" }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
linkerd-opaq-route = { path = "../../opaq-route" }
linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [
"proto",
@ -67,8 +67,7 @@ linkerd-app-test = { path = "../test", features = ["client-policy"] }
linkerd-http-box = { path = "../../http/box" }
linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] }
linkerd-io = { path = "../../io", features = ["tokio-test"] }
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
linkerd-meshtls = { path = "../../meshtls", features = [
"test-util",
] }
linkerd-mock-http-body = { path = "../../mock/http-body" }

View File

@ -134,7 +134,13 @@ impl<N> Outbound<N> {
.unwrap_or_else(|| (orig_dst, Default::default()));
// TODO(ver) We should be able to figure out resource coordinates for
// the endpoint?
synthesize_forward_policy(&META, detect_timeout, queue, addr, meta)
synthesize_forward_policy(
&META,
detect_timeout,
queue,
addr,
meta.into(),
)
},
);
return Ok((Some(profile), policy));
@ -189,7 +195,7 @@ pub fn synthesize_forward_policy(
timeout: Duration,
queue: policy::Queue,
addr: SocketAddr,
metadata: policy::EndpointMetadata,
metadata: Arc<policy::EndpointMetadata>,
) -> ClientPolicy {
policy_for_backend(
meta,

View File

@ -32,7 +32,7 @@ pub use self::balance::BalancerMetrics;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Dispatch {
Balance(NameAddr, EwmaConfig),
Forward(Remote<ServerAddr>, Metadata),
Forward(Remote<ServerAddr>, Arc<Metadata>),
/// A backend dispatcher that explicitly fails all requests.
Fail {
message: Arc<str>,
@ -49,7 +49,7 @@ pub struct DispatcherFailed(Arc<str>);
pub struct Endpoint<T> {
addr: Remote<ServerAddr>,
is_local: bool,
metadata: Metadata,
metadata: Arc<Metadata>,
parent: T,
queue: QueueConfig,
close_server_connection_on_remote_proxy_error: bool,
@ -279,6 +279,13 @@ impl<T> svc::Param<tls::ConditionalClientTls> for Endpoint<T> {
}
}
impl<T> svc::Param<tls::ConditionalClientTlsLabels> for Endpoint<T> {
fn param(&self) -> tls::ConditionalClientTlsLabels {
let tls: tls::ConditionalClientTls = self.param();
tls.as_ref().map(tls::ClientTls::labels)
}
}
impl<T> svc::Param<http::Variant> for Endpoint<T>
where
T: svc::Param<http::Variant>,

View File

@ -121,7 +121,7 @@ where
let http2 = http2.override_from(metadata.http2_client_params());
Endpoint {
addr: Remote(ServerAddr(addr)),
metadata,
metadata: metadata.into(),
is_local,
parent: target.parent,
queue: http_queue,

View File

@ -289,6 +289,12 @@ impl svc::Param<tls::ConditionalClientTls> for Endpoint {
}
}
impl svc::Param<tls::ConditionalClientTlsLabels> for Endpoint {
fn param(&self) -> tls::ConditionalClientTlsLabels {
tls::ConditionalClientTlsLabels::None(tls::NoClientTls::Disabled)
}
}
impl svc::Param<Option<tcp::tagged_transport::PortOverride>> for Endpoint {
fn param(&self) -> Option<tcp::tagged_transport::PortOverride> {
None

View File

@ -8,7 +8,7 @@ use linkerd_app_core::{
transport::addrs::*,
Addr, Error, Infallible, NameAddr, CANONICAL_DST_HEADER,
};
use std::{fmt::Debug, hash::Hash};
use std::{fmt::Debug, hash::Hash, sync::Arc};
use tokio::sync::watch;
pub mod policy;
@ -32,7 +32,7 @@ pub enum Routes {
/// Fallback endpoint forwarding.
// TODO(ver) Remove this variant when policy routes are fully wired up.
Endpoint(Remote<ServerAddr>, Metadata),
Endpoint(Remote<ServerAddr>, Arc<Metadata>),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
@ -64,7 +64,7 @@ enum RouterParams<T: Clone + Debug + Eq + Hash> {
Profile(profile::Params<T>),
// TODO(ver) Remove this variant when policy routes are fully wired up.
Endpoint(Remote<ServerAddr>, Metadata, T),
Endpoint(Remote<ServerAddr>, Arc<Metadata>, T),
}
// Only applies to requests with profiles.

View File

@ -54,7 +54,7 @@ impl retry::Policy for RetryPolicy {
if let Some(codes) = self.retryable_grpc_statuses.as_ref() {
let grpc_status = Self::grpc_status(rsp);
let retryable = grpc_status.map_or(false, |c| codes.contains(c));
let retryable = grpc_status.is_some_and(|c| codes.contains(c));
tracing::debug!(retryable, grpc.status = ?grpc_status);
if retryable {
return true;

View File

@ -214,7 +214,7 @@ impl Outbound<()> {
detect_timeout,
queue,
addr,
meta,
meta.into(),
);
}

View File

@ -146,7 +146,7 @@ impl Outbound<()> {
export_hostname_labels: bool,
) -> impl policy::GetPolicy
where
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static,

View File

@ -130,7 +130,7 @@ impl OutboundMetrics {
}
}
impl FmtMetrics for OutboundMetrics {
impl legacy::FmtMetrics for OutboundMetrics {
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.http_errors.fmt_metrics(f)?;
self.tcp_errors.fmt_metrics(f)?;
@ -243,7 +243,7 @@ impl EncodeLabelSet for RouteRef {
// === impl ConcreteLabels ===
impl FmtLabels for ConcreteLabels {
impl legacy::FmtLabels for ConcreteLabels {
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ConcreteLabels(parent, backend) = self;

View File

@ -5,7 +5,7 @@ pub(crate) use self::{http::Http, tcp::Tcp};
use crate::http::IdentityRequired;
use linkerd_app_core::{
errors::{FailFastError, LoadShedError},
metrics::FmtLabels,
metrics::legacy::FmtLabels,
proxy::http::ResponseTimeoutError,
};
use std::fmt;

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics},
metrics::{
legacy::{Counter, FmtMetrics},
metrics,
},
svc, Error,
};
use parking_lot::RwLock;

View File

@ -1,6 +1,9 @@
use super::ErrorKind;
use linkerd_app_core::{
metrics::{metrics, Counter, FmtMetrics},
metrics::{
legacy::{Counter, FmtMetrics},
metrics,
},
svc,
transport::{labels::TargetAddr, OrigDstAddr},
Error,

View File

@ -32,7 +32,7 @@ use tracing::info_span;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Dispatch {
Balance(NameAddr, balance::EwmaConfig),
Forward(Remote<ServerAddr>, Metadata),
Forward(Remote<ServerAddr>, Arc<Metadata>),
/// A backend dispatcher that explicitly fails all requests.
Fail {
message: Arc<str>,
@ -57,7 +57,7 @@ pub struct ConcreteError {
pub struct Endpoint<T> {
addr: Remote<ServerAddr>,
is_local: bool,
metadata: Metadata,
metadata: Arc<Metadata>,
parent: T,
}
@ -196,7 +196,7 @@ impl<C> Outbound<C> {
let is_local = inbound_ips.contains(&addr.ip());
Endpoint {
addr: Remote(ServerAddr(addr)),
metadata,
metadata: metadata.into(),
is_local,
parent: target.parent,
}
@ -419,3 +419,10 @@ impl<T> svc::Param<tls::ConditionalClientTls> for Endpoint<T> {
))
}
}
impl<T> svc::Param<tls::ConditionalClientTlsLabels> for Endpoint<T> {
fn param(&self) -> tls::ConditionalClientTlsLabels {
let tls: tls::ConditionalClientTls = self.param();
tls.as_ref().map(tls::ClientTls::labels)
}
}

View File

@ -160,9 +160,7 @@ async fn balances() {
}
assert!(
seen0 && seen1,
"Both endpoints must be used; ep0={} ep1={}",
seen0,
seen1
"Both endpoints must be used; ep0={seen0} ep1={seen1}"
);
// When we remove the ep0, all traffic goes to ep1:
@ -190,8 +188,7 @@ async fn balances() {
task.abort();
assert!(
errors::is_caused_by::<FailFastError>(&*err),
"unexpected error: {}",
err
"unexpected error: {err}"
);
assert!(resolved.only_configured(), "Resolution must be reused");
}

View File

@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ClientPolicy> = once_cell::sync
impl<S> Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
{
pub(crate) fn new(
@ -59,7 +59,7 @@ where
impl<S> Service<Addr> for Api<S>
where
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
S: Clone + Send + Sync + 'static,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
S::Future: Send + 'static,

View File

@ -60,7 +60,7 @@ pub(crate) fn runtime() -> (ProxyRuntime, drain::Signal) {
let (tap, _) = tap::new();
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
let runtime = ProxyRuntime {
identity: linkerd_meshtls_rustls::creds::default_for_test().1.into(),
identity: linkerd_meshtls::creds::default_for_test().1,
metrics: metrics.proxy,
tap,
span_sink: None,

View File

@ -31,7 +31,7 @@ use tracing::info_span;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Dispatch {
Balance(NameAddr, balance::EwmaConfig),
Forward(Remote<ServerAddr>, Metadata),
Forward(Remote<ServerAddr>, Arc<Metadata>),
/// A backend dispatcher that explicitly fails all requests.
Fail {
message: Arc<str>,
@ -56,7 +56,7 @@ pub struct ConcreteError {
pub struct Endpoint<T> {
addr: Remote<ServerAddr>,
is_local: bool,
metadata: Metadata,
metadata: Arc<Metadata>,
parent: T,
}
@ -174,7 +174,7 @@ impl<C> Outbound<C> {
let is_local = inbound_ips.contains(&addr.ip());
Endpoint {
addr: Remote(ServerAddr(addr)),
metadata,
metadata: metadata.into(),
is_local,
parent: target.parent,
}
@ -385,3 +385,10 @@ impl<T> svc::Param<tls::ConditionalClientTls> for Endpoint<T> {
))
}
}
impl<T> svc::Param<tls::ConditionalClientTlsLabels> for Endpoint<T> {
fn param(&self) -> tls::ConditionalClientTlsLabels {
let tls: tls::ConditionalClientTls = self.param();
tls.as_ref().map(tls::ClientTls::labels)
}
}

View File

@ -11,13 +11,18 @@ use linkerd_proxy_client_policy::{self as client_policy, tls::sni};
use parking_lot::Mutex;
use std::{
collections::HashMap,
marker::PhantomData,
net::SocketAddr,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use tokio::sync::watch;
use tokio_rustls::rustls::pki_types::DnsName;
use tokio_rustls::rustls::{
internal::msgs::codec::{Codec, Reader},
pki_types::DnsName,
InvalidMessage,
};
mod basic;
@ -142,7 +147,7 @@ fn default_backend(addr: SocketAddr) -> client_policy::Backend {
capacity: 100,
failfast_timeout: Duration::from_secs(10),
},
dispatcher: BackendDispatcher::Forward(addr, EndpointMetadata::default()),
dispatcher: BackendDispatcher::Forward(addr, EndpointMetadata::default().into()),
}
}
@ -170,44 +175,57 @@ fn sni_route(backend: client_policy::Backend, sni: sni::MatchSni) -> client_poli
// generates a sample ClientHello TLS message for testing
fn generate_client_hello(sni: &str) -> Vec<u8> {
use tokio_rustls::rustls::{
internal::msgs::{
base::Payload,
codec::{Codec, Reader},
enums::Compression,
handshake::{
ClientExtension, ClientHelloPayload, HandshakeMessagePayload, HandshakePayload,
Random, ServerName, SessionId,
},
message::{MessagePayload, PlainMessage},
},
CipherSuite, ContentType, HandshakeType, ProtocolVersion,
internal::msgs::{base::Payload, codec::Codec, message::PlainMessage},
ContentType, ProtocolVersion,
};
let sni = DnsName::try_from(sni.to_string()).unwrap();
let sni = trim_hostname_trailing_dot_for_sni(&sni);
let mut server_name_bytes = vec![];
0u8.encode(&mut server_name_bytes); // encode the type first
(sni.as_ref().len() as u16).encode(&mut server_name_bytes); // then the length as u16
server_name_bytes.extend_from_slice(sni.as_ref().as_bytes()); // then the server name itself
// rustls has internal-only types that can encode a ClientHello, but they are mostly
// inaccessible and an unstable part of the public API anyway. Manually encode one here for
// testing only instead.
let server_name =
ServerName::read(&mut Reader::init(&server_name_bytes)).expect("Server name is valid");
let mut hs_payload_bytes = vec![];
1u8.encode(&mut hs_payload_bytes); // client hello ID
let hs_payload = HandshakeMessagePayload {
typ: HandshakeType::ClientHello,
payload: HandshakePayload::ClientHello(ClientHelloPayload {
client_version: ProtocolVersion::TLSv1_2,
random: Random::from([0; 32]),
session_id: SessionId::read(&mut Reader::init(&[0])).unwrap(),
cipher_suites: vec![CipherSuite::TLS_NULL_WITH_NULL_NULL],
compression_methods: vec![Compression::Null],
extensions: vec![ClientExtension::ServerName(vec![server_name])],
}),
let client_hello_body = {
let mut payload = LengthPayload::<U24>::empty();
payload.buf.extend_from_slice(&[0x03, 0x03]); // client version, TLSv1.2
payload.buf.extend_from_slice(&[0u8; 32]); // random
0u8.encode(&mut payload.buf); // session ID
LengthPayload::<u16>::from_slice(&[0x00, 0x00] /* TLS_NULL_WITH_NULL_NULL */)
.encode(&mut payload.buf);
LengthPayload::<u8>::from_slice(&[0x00] /* no compression */).encode(&mut payload.buf);
let extensions = {
let mut payload = LengthPayload::<u16>::empty();
0u16.encode(&mut payload.buf); // server name extension ID
let server_name_extension = {
let mut payload = LengthPayload::<u16>::empty();
let server_name = {
let mut payload = LengthPayload::<u16>::empty();
0u8.encode(&mut payload.buf); // DNS hostname ID
LengthPayload::<u16>::from_slice(sni.as_ref().as_bytes())
.encode(&mut payload.buf);
payload
};
server_name.encode(&mut payload.buf);
payload
};
server_name_extension.encode(&mut payload.buf);
payload
};
extensions.encode(&mut payload.buf);
payload
};
let mut hs_payload_bytes = Vec::default();
MessagePayload::handshake(hs_payload).encode(&mut hs_payload_bytes);
client_hello_body.encode(&mut hs_payload_bytes);
let message = PlainMessage {
typ: ContentType::Handshake,
@ -218,6 +236,65 @@ fn generate_client_hello(sni: &str) -> Vec<u8> {
message.into_unencrypted_opaque().encode()
}
#[derive(Debug)]
struct LengthPayload<T> {
buf: Vec<u8>,
_boo: PhantomData<fn() -> T>,
}
impl<T> LengthPayload<T> {
fn empty() -> Self {
Self {
buf: vec![],
_boo: PhantomData,
}
}
fn from_slice(s: &[u8]) -> Self {
Self {
buf: s.to_vec(),
_boo: PhantomData,
}
}
}
impl Codec<'_> for LengthPayload<u8> {
fn encode(&self, bytes: &mut Vec<u8>) {
(self.buf.len() as u8).encode(bytes);
bytes.extend_from_slice(&self.buf);
}
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
unimplemented!()
}
}
impl Codec<'_> for LengthPayload<u16> {
fn encode(&self, bytes: &mut Vec<u8>) {
(self.buf.len() as u16).encode(bytes);
bytes.extend_from_slice(&self.buf);
}
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
unimplemented!()
}
}
#[derive(Debug)]
struct U24;
impl Codec<'_> for LengthPayload<U24> {
fn encode(&self, bytes: &mut Vec<u8>) {
let len = self.buf.len() as u32;
bytes.extend_from_slice(&len.to_be_bytes()[1..]);
bytes.extend_from_slice(&self.buf);
}
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
unimplemented!()
}
}
fn trim_hostname_trailing_dot_for_sni(dns_name: &DnsName<'_>) -> DnsName<'static> {
let dns_name_str = dns_name.as_ref();

View File

@ -43,7 +43,7 @@ impl Config {
) -> Result<
Dst<
impl svc::Service<
http::Request<tonic::body::BoxBody>,
http::Request<tonic::body::Body>,
Response = http::Response<control::RspBody>,
Error = Error,
Future = impl Send,

View File

@ -294,7 +294,7 @@ const DEFAULT_INBOUND_HTTP_FAILFAST_TIMEOUT: Duration = Duration::from_secs(1);
const DEFAULT_INBOUND_DETECT_TIMEOUT: Duration = Duration::from_secs(10);
const DEFAULT_INBOUND_CONNECT_TIMEOUT: Duration = Duration::from_millis(300);
const DEFAULT_INBOUND_CONNECT_BACKOFF: ExponentialBackoff =
ExponentialBackoff::new_unchecked(Duration::from_millis(100), Duration::from_millis(500), 0.1);
ExponentialBackoff::new_unchecked(Duration::from_millis(100), Duration::from_secs(10), 0.1);
const DEFAULT_OUTBOUND_TCP_QUEUE_CAPACITY: usize = 10_000;
const DEFAULT_OUTBOUND_TCP_FAILFAST_TIMEOUT: Duration = Duration::from_secs(3);
@ -303,7 +303,7 @@ const DEFAULT_OUTBOUND_HTTP_FAILFAST_TIMEOUT: Duration = Duration::from_secs(3);
const DEFAULT_OUTBOUND_DETECT_TIMEOUT: Duration = Duration::from_secs(10);
const DEFAULT_OUTBOUND_CONNECT_TIMEOUT: Duration = Duration::from_secs(1);
const DEFAULT_OUTBOUND_CONNECT_BACKOFF: ExponentialBackoff =
ExponentialBackoff::new_unchecked(Duration::from_millis(100), Duration::from_millis(500), 0.1);
ExponentialBackoff::new_unchecked(Duration::from_millis(100), Duration::from_secs(60), 0.1);
const DEFAULT_CONTROL_QUEUE_CAPACITY: usize = 100;
const DEFAULT_CONTROL_FAILFAST_TIMEOUT: Duration = Duration::from_secs(10);
@ -343,12 +343,12 @@ const DEFAULT_INBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::f
// TODO(ver) This should be configurable at the load balancer level.
const DEFAULT_OUTBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::from_secs(3);
// By default, we don't limit the number of connections a connection pol may
// use, as doing so can severely impact CPU utilization for applications with
// many concurrent requests. It's generally preferable to use the MAX_IDLE_AGE
// limitations to quickly drop idle connections.
const DEFAULT_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = usize::MAX;
const DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = usize::MAX;
// By default, we limit the number of connections that may be opened per-host.
// We pick a high number (10k) that shouldn't interfere with most workloads, but
// will prevent issues with our outbound HTTP client from exhausting the file
// descriptors available to the process.
const DEFAULT_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000;
const DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000;
// These settings limit the number of requests that have not received responses,
// including those buffered in the proxy and dispatched to the destination
@ -1117,11 +1117,11 @@ pub fn parse_backoff<S: Strings>(
base: &str,
default: ExponentialBackoff,
) -> Result<ExponentialBackoff, EnvError> {
let min_env = format!("LINKERD2_PROXY_{}_EXP_BACKOFF_MIN", base);
let min_env = format!("LINKERD2_PROXY_{base}_EXP_BACKOFF_MIN");
let min = parse(strings, &min_env, parse_duration);
let max_env = format!("LINKERD2_PROXY_{}_EXP_BACKOFF_MAX", base);
let max_env = format!("LINKERD2_PROXY_{base}_EXP_BACKOFF_MAX");
let max = parse(strings, &max_env, parse_duration);
let jitter_env = format!("LINKERD2_PROXY_{}_EXP_BACKOFF_JITTER", base);
let jitter_env = format!("LINKERD2_PROXY_{base}_EXP_BACKOFF_JITTER");
let jitter = parse(strings, &jitter_env, parse_number::<f64>);
match (min?, max?, jitter?) {
@ -1256,7 +1256,7 @@ pub fn parse_linkerd_identity_config<S: Strings>(
Ok((control, certify))
}
(addr, end_entity_dir, token, _minr, _maxr) => {
let s = format!("{0}_ADDR and {0}_NAME", ENV_IDENTITY_SVC_BASE);
let s = format!("{ENV_IDENTITY_SVC_BASE}_ADDR and {ENV_IDENTITY_SVC_BASE}_NAME");
let svc_env: &str = s.as_str();
for (unset, name) in &[
(addr.is_none(), svc_env),

View File

@ -172,11 +172,11 @@ mod tests {
fn test_unit<F: Fn(u64) -> Duration>(unit: &str, to_duration: F) {
for v in &[0, 1, 23, 456_789] {
let d = to_duration(*v);
let text = format!("{}{}", v, unit);
assert_eq!(parse_duration(&text), Ok(d), "text=\"{}\"", text);
let text = format!("{v}{unit}");
assert_eq!(parse_duration(&text), Ok(d), "text=\"{text}\"");
let text = format!(" {}{}\t", v, unit);
assert_eq!(parse_duration(&text), Ok(d), "text=\"{}\"", text);
let text = format!(" {v}{unit}\t");
assert_eq!(parse_duration(&text), Ok(d), "text=\"{text}\"");
}
}
@ -245,7 +245,7 @@ mod tests {
fn p(s: &str) -> Result<Vec<String>, ParseError> {
let mut sfxs = parse_dns_suffixes(s)?
.into_iter()
.map(|s| format!("{}", s))
.map(|s| format!("{s}"))
.collect::<Vec<_>>();
sfxs.sort();
Ok(sfxs)

View File

@ -4,7 +4,8 @@ pub use linkerd_app_core::identity::{client, Id};
use linkerd_app_core::{
control, dns,
identity::{
client::linkerd::Certify, creds, CertMetrics, Credentials, DerX509, Mode, WithCertMetrics,
client::linkerd::Certify, creds, watch as watch_identity, CertMetrics, Credentials,
DerX509, WithCertMetrics,
},
metrics::{prom, ControlHttp as ClientMetrics},
Result,
@ -137,8 +138,7 @@ fn watch(
watch::Receiver<bool>,
)> {
let (tx, ready) = watch::channel(false);
let (store, receiver) =
Mode::default().watch(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
let (store, receiver) = watch_identity(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
let cred = WithCertMetrics::new(metrics, NotifyReady { store, tx });
Ok((cred, receiver, ready))
}

View File

@ -19,9 +19,10 @@ use linkerd_app_core::{
config::ServerConfig,
control::{ControlAddr, Metrics as ControlMetrics},
dns, drain,
metrics::{prom, FmtMetrics},
metrics::{legacy::FmtMetrics, prom},
serve,
svc::Param,
tls_info,
transport::{addrs::*, listen::Bind},
Error, ProxyRuntime,
};
@ -251,9 +252,6 @@ impl Config {
export_hostname_labels,
);
let dst_addr = dst.addr.clone();
// registry.sub_registry_with_prefix("gateway"),
let gateway = gateway::Gateway::new(gateway, inbound.clone(), outbound.clone()).stack(
dst.resolve.clone(),
dst.profiles.clone(),
@ -304,6 +302,7 @@ impl Config {
error!(%error, "Failed to register process metrics");
}
registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric());
registry.register("rustls_info", "Proxy TLS info", tls_info::metric());
let admin = {
let identity = identity.receiver().server();
@ -330,7 +329,7 @@ impl Config {
Ok(App {
admin,
dst: dst_addr,
dst: dst.addr,
drain: drain_tx,
identity,
inbound_addr,
@ -398,7 +397,7 @@ impl App {
pub fn tracing_addr(&self) -> Option<&ControlAddr> {
match self.trace_collector {
trace_collector::TraceCollector::Disabled { .. } => None,
trace_collector::TraceCollector::Disabled => None,
crate::trace_collector::TraceCollector::Enabled(ref oc) => Some(&oc.addr),
}
}

View File

@ -46,7 +46,7 @@ impl Config {
) -> Result<
Policy<
impl svc::Service<
http::Request<tonic::body::BoxBody>,
http::Request<tonic::body::Body>,
Response = http::Response<control::RspBody>,
Error = Error,
Future = impl Send,

View File

@ -6,7 +6,7 @@ use linkerd_opencensus::{self as opencensus, metrics, proto};
use std::{collections::HashMap, time::SystemTime};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{body::BoxBody, client::GrpcService};
use tonic::{body::Body as TonicBody, client::GrpcService};
use tracing::Instrument;
pub(super) fn create_collector<S>(
@ -18,7 +18,7 @@ pub(super) fn create_collector<S>(
legacy_metrics: metrics::Registry,
) -> EnabledCollector
where
S: GrpcService<BoxBody> + Clone + Send + 'static,
S: GrpcService<TonicBody> + Clone + Send + 'static,
S::Error: Into<Error>,
S::Future: Send,
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,

View File

@ -15,7 +15,7 @@ use std::{
};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{body::BoxBody, client::GrpcService};
use tonic::{body::Body as TonicBody, client::GrpcService};
use tracing::Instrument;
pub(super) struct OtelCollectorAttributes {
@ -31,7 +31,7 @@ pub(super) fn create_collector<S>(
legacy_metrics: metrics::Registry,
) -> EnabledCollector
where
S: GrpcService<BoxBody> + Clone + Send + 'static,
S: GrpcService<TonicBody> + Clone + Send + 'static,
S::Error: Into<Error>,
S::Future: Send,
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,

View File

@ -70,8 +70,7 @@ impl<E: fmt::Debug> tower::Service<E> for NoRawTcp {
fn call(&mut self, endpoint: E) -> Self::Future {
panic!(
"no raw TCP connections expected in this test, but tried to connect to {:?}",
endpoint
"no raw TCP connections expected in this test, but tried to connect to {endpoint:?}"
);
}
}

View File

@ -279,8 +279,7 @@ impl<T: Param<ConcreteAddr>, E> tower::Service<T> for NoDst<E> {
fn call(&mut self, target: T) -> Self::Future {
let ConcreteAddr(addr) = target.param();
panic!(
"no destination resolutions were expected in this test, but tried to resolve {}",
addr
"no destination resolutions were expected in this test, but tried to resolve {addr}"
);
}
}
@ -296,10 +295,7 @@ impl<T: Param<profiles::LookupAddr>> tower::Service<T> for NoProfiles {
fn call(&mut self, target: T) -> Self::Future {
let profiles::LookupAddr(addr) = target.param();
panic!(
"no profile resolutions were expected in this test, but tried to resolve {}",
addr
);
panic!("no profile resolutions were expected in this test, but tried to resolve {addr}");
}
}

View File

@ -11,7 +11,6 @@ ahash = "0.8"
linkerd-stack = { path = "../stack" }
parking_lot = "0.12"
rand = { version = "0.9", features = ["small_rng"] }
tokio = { version = "1", features = ["macros"] }
tracing = { workspace = true }
[dev-dependencies]

View File

@ -7,14 +7,15 @@ edition = { workspace = true }
publish = { workspace = true }
[dependencies]
futures = { version = "0.3", default-features = false }
hickory-resolver = "0.25.2"
linkerd-dns-name = { path = "./name" }
linkerd-error = { path = "../error" }
prometheus-client = { workspace = true }
thiserror = "2"
tokio = { version = "1", features = ["rt", "sync", "time"] }
tracing = { workspace = true }
[dev-dependencies]
linkerd-error = { path = "../error" }
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] }

View File

@ -312,7 +312,7 @@ mod tests {
for &(left, right, expected_result) in CASES {
let l = left.parse::<Name>().unwrap();
let r = right.parse::<Name>().unwrap();
assert_eq!(l == r, expected_result, "{:?} vs {:?}", l, r);
assert_eq!(l == r, expected_result, "{l:?} vs {r:?}");
}
}
@ -327,7 +327,7 @@ mod tests {
];
for (host, expected_result) in cases {
let dns_name = host.parse::<Name>().unwrap();
assert_eq!(dns_name.is_localhost(), *expected_result, "{:?}", dns_name)
assert_eq!(dns_name.is_localhost(), *expected_result, "{dns_name:?}")
}
}
@ -343,12 +343,11 @@ mod tests {
for (host, expected_result) in cases {
let dns_name = host
.parse::<Name>()
.unwrap_or_else(|_| panic!("'{}' was invalid", host));
.unwrap_or_else(|_| panic!("'{host}' was invalid"));
assert_eq!(
dns_name.without_trailing_dot(),
*expected_result,
"{:?}",
dns_name
"{dns_name:?}"
)
}
assert!(".".parse::<Name>().is_err());

View File

@ -338,7 +338,7 @@ mod tests {
] {
let n = Name::from_str(name).unwrap();
let s = Suffix::from_str(suffix).unwrap();
assert!(s.contains(&n), "{} should contain {}", suffix, name);
assert!(s.contains(&n), "{suffix} should contain {name}");
}
}
@ -352,7 +352,7 @@ mod tests {
] {
let n = Name::from_str(name).unwrap();
let s = Suffix::from_str(suffix).unwrap();
assert!(!s.contains(&n), "{} should not contain {}", suffix, name);
assert!(!s.contains(&n), "{suffix} should not contain {name}");
}
assert!(Suffix::from_str("").is_err(), "suffix must not be empty");

View File

@ -15,10 +15,10 @@ tokio = { version = "1", default-features = false }
tracing = { workspace = true }
linkerd-error = { path = "../../error" }
linkerd-http-box = { path = "../../http/box" }
linkerd-stack = { path = "../../stack" }
[dev-dependencies]
tokio-test = "0.4"
tower-test = { workspace = true }
linkerd-http-box = { path = "../../http/box" }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }

View File

@ -2,7 +2,7 @@ use super::{ClassifyEos, ClassifyResponse};
use futures::{prelude::*, ready};
use http_body::Frame;
use linkerd_error::Error;
use linkerd_stack::{layer, ExtractParam, NewService, Service};
use linkerd_stack::Service;
use pin_project::{pin_project, pinned_drop};
use std::{
fmt::Debug,
@ -12,18 +12,6 @@ use std::{
};
use tokio::sync::mpsc;
/// Constructs new [`BroadcastClassification`] services.
///
/// `X` is an [`ExtractParam`] implementation that extracts a [`Tx`] from each
/// target. The [`Tx`] is used to broadcast the classification of each response
/// from the constructed [`BroadcastClassification`] service.
#[derive(Debug)]
pub struct NewBroadcastClassification<C, X, N> {
inner: N,
extract: X,
_marker: PhantomData<fn() -> C>,
}
/// A HTTP `Service` that applies a [`ClassifyResponse`] to each response, and
/// broadcasts the classification over a [`mpsc`] channel.
#[derive(Debug)]
@ -33,14 +21,6 @@ pub struct BroadcastClassification<C: ClassifyResponse, S> {
_marker: PhantomData<fn() -> C>,
}
/// A handle to a [`mpsc`] channel over which response classifications are
/// broadcasted.
///
/// This is extracted from a target value by [`NewBroadcastClassification`] when
/// constructing a [`BroadcastClassification`] service.
#[derive(Clone, Debug)]
pub struct Tx<C>(pub mpsc::Sender<C>);
#[pin_project]
pub struct ResponseFuture<C: ClassifyResponse, B, F> {
#[pin]
@ -62,59 +42,6 @@ struct State<C, T> {
tx: mpsc::Sender<T>,
}
// === impl NewBroadcastClassification ===
impl<C, X: Clone, N> NewBroadcastClassification<C, X, N> {
pub fn new(extract: X, inner: N) -> Self {
Self {
inner,
extract,
_marker: PhantomData,
}
}
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
/// [`NewService`]s, using the provided [`ExtractParam`] implementation to
/// extract a classification [`Tx`] from the target.
pub fn layer_via(extract: X) -> impl layer::Layer<N, Service = Self> + Clone {
layer::mk(move |inner| Self::new(extract.clone(), inner))
}
}
impl<C, N> NewBroadcastClassification<C, (), N> {
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
/// [`NewService`]s when the target type implements
/// [`linkerd_stack::Param`]`<`[`Tx`]`>`.
pub fn layer() -> impl layer::Layer<N, Service = Self> + Clone {
Self::layer_via(())
}
}
impl<T, C, X, N> NewService<T> for NewBroadcastClassification<C, X, N>
where
C: ClassifyResponse,
X: ExtractParam<Tx<C::Class>, T>,
N: NewService<T>,
{
type Service = BroadcastClassification<C, N::Service>;
fn new_service(&self, target: T) -> Self::Service {
let Tx(tx) = self.extract.extract_param(&target);
let inner = self.inner.new_service(target);
BroadcastClassification::new(tx, inner)
}
}
impl<C, X: Clone, N: Clone> Clone for NewBroadcastClassification<C, X, N> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
extract: self.extract.clone(),
_marker: PhantomData,
}
}
}
// === impl BroadcastClassification ===
impl<C: ClassifyResponse, S> BroadcastClassification<C, S> {

View File

@ -4,12 +4,12 @@
use linkerd_error::Error;
pub use self::{
channel::{BroadcastClassification, NewBroadcastClassification, Tx},
channel::BroadcastClassification,
gate::{NewClassifyGate, NewClassifyGateSet},
insert::{InsertClassifyResponse, NewInsertClassifyResponse},
};
pub mod channel;
mod channel;
pub mod gate;
mod insert;

View File

@ -10,11 +10,9 @@ publish = { workspace = true }
test-util = []
[dependencies]
bytes = { workspace = true }
futures = { version = "0.3", default-features = false }
http = { workspace = true }
http-body = { workspace = true }
hyper = { workspace = true, features = ["http1", "http2"] }
parking_lot = "0.12"
pin-project = "1"
tokio = { version = "1", features = ["time"] }

Some files were not shown because too many files have changed in this diff Show More