Compare commits
95 Commits
release/v2
...
main
Author | SHA1 | Date |
---|---|---|
|
e8cc4ec47b | |
|
b951a6c374 | |
|
7f6ac15f13 | |
|
75e9caaeae | |
|
02bbb3d617 | |
|
103c69ca75 | |
|
4663cc4eb6 | |
|
03374b9543 | |
|
4c9ae74450 | |
|
94572d174d | |
|
897c7e85bc | |
|
036ca75c00 | |
|
98e731d841 | |
|
d5b12ea5e2 | |
|
a64170bd61 | |
|
973dfa6f4d | |
|
17bff6144a | |
|
d385094caa | |
|
dce6b61191 | |
|
28ebc47a6b | |
|
4bae7e98f2 | |
|
b89c4902c6 | |
|
8a80f1ce95 | |
|
edc35d6e18 | |
|
99f322a9a0 | |
|
627a5aad21 | |
|
356f80b786 | |
|
0b3bc61263 | |
|
2b0e723027 | |
|
2156c3d5e3 | |
|
c4cae21e11 | |
|
89c88caf5c | |
|
bb612d3aac | |
|
7030cc51ed | |
|
af520dfd12 | |
|
ccf91dfb3e | |
|
69cd164da1 | |
|
feb5f87713 | |
|
fdd7f218a3 | |
|
b4e2b7e24f | |
|
1b07f277d7 | |
|
25cf0c7f11 | |
|
d46e7c0c82 | |
|
3305a890b0 | |
|
d850fa6f73 | |
|
6b323d8457 | |
|
7f58cd56ed | |
|
43e3f630ec | |
|
7758436831 | |
|
c0f921af33 | |
|
b558ce5320 | |
|
894d3506df | |
|
01e7ec0820 | |
|
4f563fab68 | |
|
168c4bff7d | |
|
0df8cdbedb | |
|
1b837b7f91 | |
|
40078c96ca | |
|
9eaf1425a7 | |
|
842452368c | |
|
09333dc2b2 | |
|
ddc847ccc4 | |
|
8d56746c1f | |
|
db0ed46978 | |
|
1d94082d4b | |
|
744e29e0bd | |
|
83373d6b89 | |
|
1dcb7a7d1a | |
|
68a6b6d1e8 | |
|
cdbb55fd53 | |
|
5997453393 | |
|
10643b9525 | |
|
fd0ea24b87 | |
|
3a159be91a | |
|
b3bc6fe8cd | |
|
5a00b70d11 | |
|
b91dd3e7af | |
|
a699b1cf58 | |
|
9f3c45874e | |
|
1de179e178 | |
|
f9d7e08242 | |
|
bd454b4be8 | |
|
b1f8fa5419 | |
|
e04947610a | |
|
9c48e2471e | |
|
df38a5a2c9 | |
|
7c6882bb35 | |
|
a6e47d7e03 | |
|
2cc8c7d80e | |
|
21f3ffc6c1 | |
|
1b85cf93a4 | |
|
ce5df7d026 | |
|
79e612c2f9 | |
|
62ed64ea05 | |
|
e8de6359a5 |
|
@ -3,7 +3,7 @@
|
|||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
"DEV_VERSION": "v45",
|
||||
"DEV_VERSION": "v47",
|
||||
"http_proxy": "${localEnv:http_proxy}",
|
||||
"https_proxy": "${localEnv:https_proxy}"
|
||||
}
|
||||
|
|
|
@ -22,13 +22,13 @@ permissions:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
container: ghcr.io/linkerd/dev:v45-rust
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
timeout-minutes: 20
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- run: rustup toolchain install --profile=minimal beta
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: just toolchain=beta fetch
|
||||
- run: just toolchain=beta build
|
||||
|
|
|
@ -21,9 +21,9 @@ env:
|
|||
jobs:
|
||||
meta:
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- id: changed
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
|
@ -40,15 +40,15 @@ jobs:
|
|||
codecov:
|
||||
needs: meta
|
||||
if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || needs.meta.outputs.any_changed == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 30
|
||||
container:
|
||||
image: docker://ghcr.io/linkerd/dev:v45-rust
|
||||
image: docker://ghcr.io/linkerd/dev:v47-rust
|
||||
options: --security-opt seccomp=unconfined # 🤷
|
||||
env:
|
||||
CXX: "/usr/bin/clang++-19"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run
|
||||
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml
|
||||
|
|
|
@ -26,11 +26,11 @@ permissions:
|
|||
jobs:
|
||||
list-changed:
|
||||
timeout-minutes: 3
|
||||
runs-on: ubuntu-24.04
|
||||
container: docker://rust:1.83.0
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: docker://rust:1.88.0
|
||||
steps:
|
||||
- run: apt update && apt install -y jo
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
id: changed-files
|
||||
|
@ -47,15 +47,15 @@ jobs:
|
|||
build:
|
||||
needs: [list-changed]
|
||||
timeout-minutes: 40
|
||||
runs-on: ubuntu-24.04
|
||||
container: docker://rust:1.83.0
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: docker://rust:1.88.0
|
||||
strategy:
|
||||
matrix:
|
||||
dir: ${{ fromJson(needs.list-changed.outputs.dirs) }}
|
||||
steps:
|
||||
- run: rustup toolchain add nightly
|
||||
- run: cargo install cargo-fuzz
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- working-directory: ${{matrix.dir}}
|
||||
run: cargo +nightly fetch
|
||||
|
|
|
@ -12,9 +12,9 @@ on:
|
|||
jobs:
|
||||
markdownlint:
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e
|
||||
with:
|
||||
globs: "**/*.md"
|
||||
|
|
|
@ -22,13 +22,13 @@ permissions:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
container: ghcr.io/linkerd/dev:v45-rust
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
timeout-minutes: 20
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- run: rustup toolchain install --profile=minimal nightly
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: just toolchain=nightly fetch
|
||||
- run: just toolchain=nightly profile=release build
|
||||
|
|
|
@ -14,9 +14,9 @@ concurrency:
|
|||
jobs:
|
||||
meta:
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- id: build
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
|
@ -57,7 +57,7 @@ jobs:
|
|||
info:
|
||||
timeout-minutes: 3
|
||||
needs: meta
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- name: Info
|
||||
run: |
|
||||
|
@ -74,24 +74,24 @@ jobs:
|
|||
actions:
|
||||
needs: meta
|
||||
if: needs.meta.outputs.actions_changed == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v45
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just action-lint
|
||||
- run: just action-dev-check
|
||||
|
||||
rust:
|
||||
needs: meta
|
||||
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
container: ghcr.io/linkerd/dev:v45-rust
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: just fetch
|
||||
- run: cargo deny --all-features check bans licenses sources
|
||||
|
@ -107,14 +107,14 @@ jobs:
|
|||
needs: meta
|
||||
if: needs.meta.outputs.cargo_changed == 'true'
|
||||
timeout-minutes: 20
|
||||
runs-on: ubuntu-24.04
|
||||
container: ghcr.io/linkerd/dev:v45-rust
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
strategy:
|
||||
matrix:
|
||||
crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }}
|
||||
steps:
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: just fetch
|
||||
- run: just check-crate ${{ matrix.crate }}
|
||||
|
@ -123,11 +123,11 @@ jobs:
|
|||
needs: meta
|
||||
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
|
||||
timeout-minutes: 20
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
env:
|
||||
WAIT_TIMEOUT: 2m
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v45
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- name: scurl https://run.linkerd.io/install-edge | sh
|
||||
run: |
|
||||
scurl https://run.linkerd.io/install-edge | sh
|
||||
|
@ -136,7 +136,7 @@ jobs:
|
|||
tag=$(linkerd version --client --short)
|
||||
echo "linkerd $tag"
|
||||
echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV"
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just docker
|
||||
- run: just k3d-create
|
||||
- run: just k3d-load-linkerd
|
||||
|
@ -149,7 +149,7 @@ jobs:
|
|||
timeout-minutes: 3
|
||||
needs: [meta, actions, rust, rust-crates, linkerd-install]
|
||||
if: always()
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
@ -168,7 +168,7 @@ jobs:
|
|||
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
|
||||
run: exit 1
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
|
||||
- name: "Merge dependabot changes"
|
||||
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
|
||||
|
|
|
@ -13,7 +13,7 @@ concurrency:
|
|||
jobs:
|
||||
last-release:
|
||||
if: github.repository == 'linkerd/linkerd2-proxy' # Don't run this in forks.
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 5
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
|
@ -41,10 +41,10 @@ jobs:
|
|||
|
||||
last-commit:
|
||||
needs: last-release
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- name: Check if the most recent commit is after the last release
|
||||
id: recency
|
||||
env:
|
||||
|
@ -62,7 +62,7 @@ jobs:
|
|||
trigger-release:
|
||||
needs: [last-release, last-commit]
|
||||
if: needs.last-release.outputs.recent == 'false' && needs.last-commit.outputs.after-release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
actions: write
|
||||
|
|
|
@ -46,6 +46,7 @@ on:
|
|||
default: true
|
||||
|
||||
env:
|
||||
CARGO: "cargo auditable"
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_RETRY: 10
|
||||
RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable"
|
||||
|
@ -58,9 +59,25 @@ concurrency:
|
|||
jobs:
|
||||
meta:
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- id: meta
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
if: github.event_name == 'pull_request'
|
||||
- id: workflow
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
files: |
|
||||
.github/workflows/release.yml
|
||||
- id: build
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
files: |
|
||||
justfile
|
||||
Cargo.toml
|
||||
|
||||
- id: version
|
||||
env:
|
||||
VERSION: ${{ inputs.version }}
|
||||
shell: bash
|
||||
|
@ -68,47 +85,45 @@ jobs:
|
|||
set -euo pipefail
|
||||
shopt -s extglob
|
||||
if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then
|
||||
echo version="0.0.0-test.${GITHUB_SHA:0:7}"
|
||||
echo archs='["amd64"]'
|
||||
echo oses='["linux"]'
|
||||
echo version="0.0.0-test.${GITHUB_SHA:0:7}" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then
|
||||
echo "Invalid version: $VERSION" >&2
|
||||
exit 1
|
||||
fi
|
||||
( echo version="${VERSION#v}"
|
||||
echo archs='["amd64", "arm64", "arm"]'
|
||||
echo version="${VERSION#v}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- id: platform
|
||||
shell: bash
|
||||
env:
|
||||
WORKFLOW_CHANGED: ${{ steps.workflow.outputs.any_changed }}
|
||||
run: |
|
||||
if [[ "$GITHUB_EVENT_NAME" == pull_request && "$WORKFLOW_CHANGED" != 'true' ]]; then
|
||||
( echo archs='["amd64"]'
|
||||
echo oses='["linux"]' ) >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
( echo archs='["amd64", "arm64"]'
|
||||
echo oses='["linux", "windows"]'
|
||||
) >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
if: github.event_name == 'pull_request'
|
||||
- id: changed
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
files: |
|
||||
.github/workflows/release.yml
|
||||
justfile
|
||||
Cargo.toml
|
||||
|
||||
outputs:
|
||||
archs: ${{ steps.meta.outputs.archs }}
|
||||
oses: ${{ steps.meta.outputs.oses }}
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
package: ${{ github.event_name == 'workflow_dispatch' || steps.changed.outputs.any_changed == 'true' }}
|
||||
archs: ${{ steps.platform.outputs.archs }}
|
||||
oses: ${{ steps.platform.outputs.oses }}
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
package: ${{ github.event_name == 'workflow_dispatch' || steps.build.outputs.any_changed == 'true' || steps.workflow.outputs.any_changed == 'true' }}
|
||||
profile: ${{ inputs.profile || 'release' }}
|
||||
publish: ${{ inputs.publish }}
|
||||
ref: ${{ inputs.ref || github.sha }}
|
||||
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.meta.outputs.version }}"
|
||||
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.version.outputs.version }}"
|
||||
prerelease: ${{ inputs.prerelease }}
|
||||
draft: ${{ inputs.draft }}
|
||||
latest: ${{ inputs.latest }}
|
||||
|
||||
info:
|
||||
needs: meta
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- name: Inputs
|
||||
|
@ -134,15 +149,13 @@ jobs:
|
|||
exclude:
|
||||
- os: windows
|
||||
arch: arm64
|
||||
- os: windows
|
||||
arch: arm
|
||||
|
||||
# If we're not actually building on a release tag, don't short-circuit on
|
||||
# errors. This helps us know whether a failure is platform-specific.
|
||||
continue-on-error: ${{ needs.meta.outputs.publish != 'true' }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 40
|
||||
container: docker://ghcr.io/linkerd/dev:v45-rust-musl
|
||||
container: docker://ghcr.io/linkerd/dev:v47-rust-musl
|
||||
env:
|
||||
LINKERD2_PROXY_VENDOR: ${{ github.repository_owner }}
|
||||
LINKERD2_PROXY_VERSION: ${{ needs.meta.outputs.version }}
|
||||
|
@ -150,15 +163,19 @@ jobs:
|
|||
# TODO: add to dev image
|
||||
- name: Install MiniGW
|
||||
if: matrix.os == 'windows'
|
||||
run: apt-get update && apt-get install mingw-w64 -y
|
||||
run: apt-get update && apt-get install -y mingw-w64
|
||||
- name: Install cross compilation toolchain
|
||||
if: matrix.arch == 'arm64'
|
||||
run: apt-get update && apt-get install -y binutils-aarch64-linux-gnu
|
||||
|
||||
- name: Configure git
|
||||
run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
ref: ${{ needs.meta.outputs.ref }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
with:
|
||||
key: ${{ matrix.arch }}
|
||||
key: ${{ matrix.os }}-${{ matrix.arch }}
|
||||
- run: just fetch
|
||||
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup
|
||||
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build
|
||||
|
@ -170,7 +187,7 @@ jobs:
|
|||
|
||||
publish:
|
||||
needs: [meta, package]
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
actions: write
|
||||
|
@ -187,7 +204,7 @@ jobs:
|
|||
git config --global user.name "$GITHUB_USERNAME"
|
||||
git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com
|
||||
# Tag the release.
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }}
|
||||
ref: ${{ needs.meta.outputs.ref }}
|
||||
|
@ -225,7 +242,7 @@ jobs:
|
|||
needs: publish
|
||||
if: always()
|
||||
timeout-minutes: 3
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- name: Results
|
||||
run: |
|
||||
|
|
|
@ -13,8 +13,8 @@ on:
|
|||
jobs:
|
||||
sh-lint:
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v45
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just sh-lint
|
||||
|
|
|
@ -13,10 +13,10 @@ permissions:
|
|||
|
||||
jobs:
|
||||
devcontainer:
|
||||
runs-on: ubuntu-24.04
|
||||
container: ghcr.io/linkerd/dev:v45-rust
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: |
|
||||
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
|
||||
|
@ -35,10 +35,10 @@ jobs:
|
|||
|
||||
|
||||
workflows:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v45
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- shell: bash
|
||||
run: |
|
||||
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
|
||||
|
|
740
Cargo.lock
740
Cargo.lock
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
|
@ -42,8 +42,6 @@ members = [
|
|||
"linkerd/idle-cache",
|
||||
"linkerd/io",
|
||||
"linkerd/meshtls",
|
||||
"linkerd/meshtls/boring",
|
||||
"linkerd/meshtls/rustls",
|
||||
"linkerd/meshtls/verifier",
|
||||
"linkerd/metrics",
|
||||
"linkerd/mock/http-body",
|
||||
|
@ -71,6 +69,7 @@ members = [
|
|||
"linkerd/reconnect",
|
||||
"linkerd/retry",
|
||||
"linkerd/router",
|
||||
"linkerd/rustls",
|
||||
"linkerd/service-profiles",
|
||||
"linkerd/signal",
|
||||
"linkerd/stack",
|
||||
|
@ -115,11 +114,10 @@ prost = { version = "0.13" }
|
|||
prost-build = { version = "0.13", default-features = false }
|
||||
prost-types = { version = "0.13" }
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = [
|
||||
"ring",
|
||||
"logging",
|
||||
] }
|
||||
tonic = { version = "0.12", default-features = false }
|
||||
tonic-build = { version = "0.12", default-features = false }
|
||||
tonic = { version = "0.13", default-features = false }
|
||||
tonic-build = { version = "0.13", default-features = false }
|
||||
tower = { version = "0.5", default-features = false }
|
||||
tower-service = { version = "0.3" }
|
||||
tower-test = { version = "0.4" }
|
||||
|
@ -136,4 +134,4 @@ default-features = false
|
|||
features = ["tokio", "tracing"]
|
||||
|
||||
[workspace.dependencies.linkerd2-proxy-api]
|
||||
version = "0.16.0"
|
||||
version = "0.17.0"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# This is intended **DEVELOPMENT ONLY**, i.e. so that proxy developers can
|
||||
# easily test the proxy in the context of the larger `linkerd2` project.
|
||||
|
||||
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v45-rust
|
||||
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v47-rust
|
||||
|
||||
# Use an arbitrary ~recent edge release image to get the proxy
|
||||
# identity-initializing and linkerd-await wrappers.
|
||||
|
@ -14,11 +14,16 @@ FROM $LINKERD2_IMAGE as linkerd2
|
|||
FROM --platform=$BUILDPLATFORM $RUST_IMAGE as fetch
|
||||
|
||||
ARG PROXY_FEATURES=""
|
||||
ARG TARGETARCH="amd64"
|
||||
RUN apt-get update && \
|
||||
apt-get install -y time && \
|
||||
if [[ "$PROXY_FEATURES" =~ .*meshtls-boring.* ]] ; then \
|
||||
apt-get install -y golang ; \
|
||||
fi && \
|
||||
case "$TARGETARCH" in \
|
||||
amd64) true ;; \
|
||||
arm64) apt-get install --no-install-recommends -y binutils-aarch64-linux-gnu ;; \
|
||||
esac && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CARGO_NET_RETRY=10
|
||||
|
@ -33,7 +38,6 @@ RUN --mount=type=cache,id=cargo,target=/usr/local/cargo/registry \
|
|||
FROM fetch as build
|
||||
ENV CARGO_INCREMENTAL=0
|
||||
ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable"
|
||||
ARG TARGETARCH="amd64"
|
||||
ARG PROFILE="release"
|
||||
ARG LINKERD2_PROXY_VERSION=""
|
||||
ARG LINKERD2_PROXY_VENDOR=""
|
||||
|
|
18
deny.toml
18
deny.toml
|
@ -2,7 +2,6 @@
|
|||
targets = [
|
||||
{ triple = "x86_64-unknown-linux-gnu" },
|
||||
{ triple = "aarch64-unknown-linux-gnu" },
|
||||
{ triple = "armv7-unknown-linux-gnu" },
|
||||
]
|
||||
|
||||
[advisories]
|
||||
|
@ -24,11 +23,6 @@ allow = [
|
|||
private = { ignore = true }
|
||||
confidence-threshold = 0.8
|
||||
exceptions = [
|
||||
{ allow = [
|
||||
"ISC",
|
||||
"MIT",
|
||||
"OpenSSL",
|
||||
], name = "ring", version = "*" },
|
||||
{ allow = [
|
||||
"ISC",
|
||||
"OpenSSL",
|
||||
|
@ -39,14 +33,6 @@ exceptions = [
|
|||
], name = "aws-lc-fips-sys", version = "*" },
|
||||
]
|
||||
|
||||
[[licenses.clarify]]
|
||||
name = "ring"
|
||||
version = "*"
|
||||
expression = "MIT AND ISC AND OpenSSL"
|
||||
license-files = [
|
||||
{ path = "LICENSE", hash = 0xbd0eed23 },
|
||||
]
|
||||
|
||||
[bans]
|
||||
multiple-versions = "deny"
|
||||
# Wildcard dependencies are used for all workspace-local crates.
|
||||
|
@ -56,6 +42,8 @@ deny = [
|
|||
{ name = "rustls", wrappers = ["tokio-rustls"] },
|
||||
# rustls-webpki should be used instead.
|
||||
{ name = "webpki" },
|
||||
# aws-lc-rs should be used instead.
|
||||
{ name = "ring" }
|
||||
]
|
||||
skip = [
|
||||
# `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64`
|
||||
|
@ -76,6 +64,8 @@ skip-tree = [
|
|||
{ name = "pprof" },
|
||||
# aws-lc-rs uses a slightly outdated version of bindgen
|
||||
{ name = "bindgen", version = "0.69.5" },
|
||||
# socket v0.6 is still propagating through the ecosystem
|
||||
{ name = "socket2", version = "0.5" },
|
||||
]
|
||||
|
||||
[sources]
|
||||
|
|
10
justfile
10
justfile
|
@ -18,6 +18,10 @@ features := ""
|
|||
export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`)
|
||||
export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`)
|
||||
|
||||
# TODO: these variables will be included in dev v48
|
||||
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
|
||||
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
|
||||
|
||||
# The version name to use for packages.
|
||||
package_version := "v" + LINKERD2_PROXY_VERSION
|
||||
|
||||
|
@ -26,7 +30,7 @@ docker-repo := "localhost/linkerd/proxy"
|
|||
docker-tag := `git rev-parse --abbrev-ref HEAD | sed 's|/|.|g'` + "." + `git rev-parse --short HEAD`
|
||||
docker-image := docker-repo + ":" + docker-tag
|
||||
|
||||
# The architecture name to use for packages. Either 'amd64', 'arm64', or 'arm'.
|
||||
# The architecture name to use for packages. Either 'amd64' or 'arm64'.
|
||||
arch := "amd64"
|
||||
# The OS name to use for packages. Either 'linux' or 'windows'.
|
||||
os := "linux"
|
||||
|
@ -39,8 +43,6 @@ _target := if os + '-' + arch == "linux-amd64" {
|
|||
"x86_64-unknown-linux-" + libc
|
||||
} else if os + '-' + arch == "linux-arm64" {
|
||||
"aarch64-unknown-linux-" + libc
|
||||
} else if os + '-' + arch == "linux-arm" {
|
||||
"armv7-unknown-linux-" + libc + "eabihf"
|
||||
} else if os + '-' + arch == "windows-amd64" {
|
||||
"x86_64-pc-windows-" + libc
|
||||
} else {
|
||||
|
@ -139,7 +141,7 @@ _strip:
|
|||
|
||||
_package_bin := _package_dir / "bin" / "linkerd2-proxy"
|
||||
|
||||
# XXX {aarch64,arm}-musl builds do not enable PIE, so we use target-specific
|
||||
# XXX aarch64-musl builds do not enable PIE, so we use target-specific
|
||||
# files to document those differences.
|
||||
_expected_checksec := '.checksec' / arch + '-' + libc + '.json'
|
||||
|
||||
|
|
|
@ -100,15 +100,11 @@ impl Addr {
|
|||
// them ourselves.
|
||||
format!("[{}]", a.ip())
|
||||
};
|
||||
http::uri::Authority::from_str(&ip).unwrap_or_else(|err| {
|
||||
panic!("SocketAddr ({}) must be valid authority: {}", a, err)
|
||||
})
|
||||
}
|
||||
Addr::Socket(a) => {
|
||||
http::uri::Authority::from_str(&a.to_string()).unwrap_or_else(|err| {
|
||||
panic!("SocketAddr ({}) must be valid authority: {}", a, err)
|
||||
})
|
||||
http::uri::Authority::from_str(&ip)
|
||||
.unwrap_or_else(|err| panic!("SocketAddr ({a}) must be valid authority: {err}"))
|
||||
}
|
||||
Addr::Socket(a) => http::uri::Authority::from_str(&a.to_string())
|
||||
.unwrap_or_else(|err| panic!("SocketAddr ({a}) must be valid authority: {err}")),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -265,14 +261,14 @@ mod tests {
|
|||
];
|
||||
for (host, expected_result) in cases {
|
||||
let a = Addr::from_str(host).unwrap();
|
||||
assert_eq!(a.is_loopback(), *expected_result, "{:?}", host)
|
||||
assert_eq!(a.is_loopback(), *expected_result, "{host:?}")
|
||||
}
|
||||
}
|
||||
|
||||
fn test_to_http_authority(cases: &[&str]) {
|
||||
let width = cases.iter().map(|s| s.len()).max().unwrap_or(0);
|
||||
for host in cases {
|
||||
print!("trying {:1$} ... ", host, width);
|
||||
print!("trying {host:width$} ... ");
|
||||
Addr::from_str(host).unwrap().to_http_authority();
|
||||
println!("ok");
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
use futures::future::{self, TryFutureExt};
|
||||
use http::StatusCode;
|
||||
use linkerd_app_core::{
|
||||
metrics::{self as metrics, FmtMetrics},
|
||||
metrics::{self as metrics, legacy::FmtMetrics},
|
||||
proxy::http::{Body, BoxBody, ClientHandle, Request, Response},
|
||||
trace, Error, Result,
|
||||
};
|
||||
|
@ -32,7 +32,7 @@ pub use self::readiness::{Latch, Readiness};
|
|||
|
||||
#[derive(Clone)]
|
||||
pub struct Admin<M> {
|
||||
metrics: metrics::Serve<M>,
|
||||
metrics: metrics::legacy::Serve<M>,
|
||||
tracing: trace::Handle,
|
||||
ready: Readiness,
|
||||
shutdown_tx: mpsc::UnboundedSender<()>,
|
||||
|
@ -52,7 +52,7 @@ impl<M> Admin<M> {
|
|||
tracing: trace::Handle,
|
||||
) -> Self {
|
||||
Self {
|
||||
metrics: metrics::Serve::new(metrics),
|
||||
metrics: metrics::legacy::Serve::new(metrics),
|
||||
ready,
|
||||
shutdown_tx,
|
||||
enable_shutdown,
|
||||
|
|
|
@ -27,7 +27,7 @@ where
|
|||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?
|
||||
.map_err(io::Error::other)?
|
||||
.aggregate();
|
||||
match level.set_from(body.chunk()) {
|
||||
Ok(_) => mk_rsp(StatusCode::NO_CONTENT, BoxBody::empty()),
|
||||
|
|
|
@ -2,7 +2,7 @@ use linkerd_app_core::{
|
|||
classify,
|
||||
config::ServerConfig,
|
||||
drain, errors, identity,
|
||||
metrics::{self, FmtMetrics},
|
||||
metrics::{self, legacy::FmtMetrics},
|
||||
proxy::http,
|
||||
serve,
|
||||
svc::{self, ExtractParam, InsertParam, Param},
|
||||
|
|
|
@ -13,31 +13,23 @@ independently of the inbound and outbound proxy logic.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
drain = { workspace = true, features = ["retain"] }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
hyper-util = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
ipnet = "2.11"
|
||||
prometheus-client = { workspace = true }
|
||||
regex = "1"
|
||||
serde_json = "1"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "sync", "parking_lot"] }
|
||||
tokio-stream = { version = "0.1", features = ["time"] }
|
||||
tonic = { workspace = true, default-features = false, features = ["prost"] }
|
||||
tracing = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
|
||||
linkerd-addr = { path = "../../addr" }
|
||||
linkerd-conditional = { path = "../../conditional" }
|
||||
linkerd-dns = { path = "../../dns" }
|
||||
linkerd-duplex = { path = "../../duplex" }
|
||||
linkerd-errno = { path = "../../errno" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-error-respond = { path = "../../error-respond" }
|
||||
linkerd-exp-backoff = { path = "../../exp-backoff" }
|
||||
|
@ -64,6 +56,7 @@ linkerd-proxy-tcp = { path = "../../proxy/tcp" }
|
|||
linkerd-proxy-transport = { path = "../../proxy/transport" }
|
||||
linkerd-reconnect = { path = "../../reconnect" }
|
||||
linkerd-router = { path = "../../router" }
|
||||
linkerd-rustls = { path = "../../rustls" }
|
||||
linkerd-service-profiles = { path = "../../service-profiles" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
linkerd-stack-metrics = { path = "../../stack/metrics" }
|
||||
|
@ -83,5 +76,6 @@ features = ["make", "spawn-ready", "timeout", "util", "limit"]
|
|||
semver = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
quickcheck = { version = "1", default-features = false }
|
||||
|
|
|
@ -4,11 +4,11 @@ fn set_env(name: &str, cmd: &mut Command) {
|
|||
let value = match cmd.output() {
|
||||
Ok(output) => String::from_utf8(output.stdout).unwrap(),
|
||||
Err(err) => {
|
||||
println!("cargo:warning={}", err);
|
||||
println!("cargo:warning={err}");
|
||||
"".to_string()
|
||||
}
|
||||
};
|
||||
println!("cargo:rustc-env={}={}", name, value);
|
||||
println!("cargo:rustc-env={name}={value}");
|
||||
}
|
||||
|
||||
fn version() -> String {
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use crate::profiles;
|
||||
pub use classify::gate;
|
||||
use linkerd_error::Error;
|
||||
use linkerd_proxy_client_policy as client_policy;
|
||||
use linkerd_proxy_http::{classify, HasH2Reason, ResponseTimeoutError};
|
||||
|
@ -214,7 +213,7 @@ fn h2_error(err: &Error) -> String {
|
|||
if let Some(reason) = err.h2_reason() {
|
||||
// This should output the error code in the same format as the spec,
|
||||
// for example: PROTOCOL_ERROR
|
||||
format!("h2({:?})", reason)
|
||||
format!("h2({reason:?})")
|
||||
} else {
|
||||
trace!("classifying found non-h2 error: {:?}", err);
|
||||
String::from("unclassified")
|
||||
|
|
|
@ -101,7 +101,7 @@ impl Config {
|
|||
identity: identity::NewClient,
|
||||
) -> svc::ArcNewService<
|
||||
(),
|
||||
svc::BoxCloneSyncService<http::Request<tonic::body::BoxBody>, http::Response<RspBody>>,
|
||||
svc::BoxCloneSyncService<http::Request<tonic::body::Body>, http::Response<RspBody>>,
|
||||
> {
|
||||
let addr = self.addr;
|
||||
tracing::trace!(%addr, "Building");
|
||||
|
|
|
@ -25,6 +25,7 @@ pub mod metrics;
|
|||
pub mod proxy;
|
||||
pub mod serve;
|
||||
pub mod svc;
|
||||
pub mod tls_info;
|
||||
pub mod transport;
|
||||
|
||||
pub use self::build_info::{BuildInfo, BUILD_INFO};
|
||||
|
|
|
@ -155,10 +155,10 @@ where
|
|||
I: Iterator<Item = (&'i String, &'i String)>,
|
||||
{
|
||||
let (k0, v0) = labels_iter.next()?;
|
||||
let mut out = format!("{}_{}=\"{}\"", prefix, k0, v0);
|
||||
let mut out = format!("{prefix}_{k0}=\"{v0}\"");
|
||||
|
||||
for (k, v) in labels_iter {
|
||||
write!(out, ",{}_{}=\"{}\"", prefix, k, v).expect("label concat must succeed");
|
||||
write!(out, ",{prefix}_{k}=\"{v}\"").expect("label concat must succeed");
|
||||
}
|
||||
Some(out)
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ where
|
|||
// === impl Metrics ===
|
||||
|
||||
impl Metrics {
|
||||
pub fn new(retain_idle: Duration) -> (Self, impl FmtMetrics + Clone + Send + 'static) {
|
||||
pub fn new(retain_idle: Duration) -> (Self, impl legacy::FmtMetrics + Clone + Send + 'static) {
|
||||
let (control, control_report) = {
|
||||
let m = http_metrics::Requests::<ControlLabels, Class>::default();
|
||||
let r = m.clone().into_report(retain_idle).with_prefix("control");
|
||||
|
@ -223,6 +223,7 @@ impl Metrics {
|
|||
opentelemetry,
|
||||
};
|
||||
|
||||
use legacy::FmtMetrics as _;
|
||||
let report = endpoint_report
|
||||
.and_report(profile_route_report)
|
||||
.and_report(retry_report)
|
||||
|
@ -248,11 +249,11 @@ impl svc::Param<ControlLabels> for control::ControlAddr {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ControlLabels {
|
||||
impl legacy::FmtLabels for ControlLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { addr, server_id } = self;
|
||||
|
||||
write!(f, "addr=\"{}\",", addr)?;
|
||||
write!(f, "addr=\"{addr}\",")?;
|
||||
TlsConnect::from(server_id).fmt_labels(f)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -281,7 +282,7 @@ impl ProfileRouteLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ProfileRouteLabels {
|
||||
impl legacy::FmtLabels for ProfileRouteLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
direction,
|
||||
|
@ -290,10 +291,10 @@ impl FmtLabels for ProfileRouteLabels {
|
|||
} = self;
|
||||
|
||||
direction.fmt_labels(f)?;
|
||||
write!(f, ",dst=\"{}\"", addr)?;
|
||||
write!(f, ",dst=\"{addr}\"")?;
|
||||
|
||||
if let Some(labels) = labels.as_ref() {
|
||||
write!(f, ",{}", labels)?;
|
||||
write!(f, ",{labels}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -314,7 +315,7 @@ impl From<OutboundEndpointLabels> for EndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for EndpointLabels {
|
||||
impl legacy::FmtLabels for EndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Inbound(i) => (Direction::In, i).fmt_labels(f),
|
||||
|
@ -323,7 +324,7 @@ impl FmtLabels for EndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for InboundEndpointLabels {
|
||||
impl legacy::FmtLabels for InboundEndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
tls,
|
||||
|
@ -343,7 +344,7 @@ impl FmtLabels for InboundEndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ServerLabel {
|
||||
impl legacy::FmtLabels for ServerLabel {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self(meta, port) = self;
|
||||
write!(
|
||||
|
@ -374,7 +375,7 @@ impl prom::EncodeLabelSetMut for ServerLabel {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ServerAuthzLabels {
|
||||
impl legacy::FmtLabels for ServerAuthzLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { server, authz } = self;
|
||||
|
||||
|
@ -389,7 +390,7 @@ impl FmtLabels for ServerAuthzLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for RouteLabels {
|
||||
impl legacy::FmtLabels for RouteLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { server, route } = self;
|
||||
|
||||
|
@ -404,7 +405,7 @@ impl FmtLabels for RouteLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for RouteAuthzLabels {
|
||||
impl legacy::FmtLabels for RouteAuthzLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { route, authz } = self;
|
||||
|
||||
|
@ -425,7 +426,7 @@ impl svc::Param<OutboundZoneLocality> for OutboundEndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for OutboundEndpointLabels {
|
||||
impl legacy::FmtLabels for OutboundEndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
server_id,
|
||||
|
@ -446,7 +447,7 @@ impl FmtLabels for OutboundEndpointLabels {
|
|||
(ta, tls).fmt_labels(f)?;
|
||||
|
||||
if let Some(labels) = labels.as_ref() {
|
||||
write!(f, ",{}", labels)?;
|
||||
write!(f, ",{labels}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -462,20 +463,20 @@ impl fmt::Display for Direction {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Direction {
|
||||
impl legacy::FmtLabels for Direction {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "direction=\"{}\"", self)
|
||||
write!(f, "direction=\"{self}\"")
|
||||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Authority<'_> {
|
||||
impl legacy::FmtLabels for Authority<'_> {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self(authority) = self;
|
||||
write!(f, "authority=\"{}\"", authority)
|
||||
write!(f, "authority=\"{authority}\"")
|
||||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Class {
|
||||
impl legacy::FmtLabels for Class {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let class = |ok: bool| if ok { "success" } else { "failure" };
|
||||
|
||||
|
@ -497,8 +498,7 @@ impl FmtLabels for Class {
|
|||
|
||||
Class::Error(msg) => write!(
|
||||
f,
|
||||
"classification=\"failure\",grpc_status=\"\",error=\"{}\"",
|
||||
msg
|
||||
"classification=\"failure\",grpc_status=\"\",error=\"{msg}\""
|
||||
),
|
||||
}
|
||||
}
|
||||
|
@ -524,7 +524,7 @@ impl StackLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for StackLabels {
|
||||
impl legacy::FmtLabels for StackLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
direction,
|
||||
|
@ -533,6 +533,6 @@ impl FmtLabels for StackLabels {
|
|||
} = self;
|
||||
|
||||
direction.fmt_labels(f)?;
|
||||
write!(f, ",protocol=\"{}\",name=\"{}\"", protocol, name)
|
||||
write!(f, ",protocol=\"{protocol}\",name=\"{name}\"")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
use linkerd_metrics::prom;
|
||||
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
|
||||
use std::{
|
||||
fmt::{Error, Write},
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
static TLS_INFO: OnceLock<Arc<TlsInfo>> = OnceLock::new();
|
||||
|
||||
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, EncodeLabelSet)]
|
||||
pub struct TlsInfo {
|
||||
tls_suites: MetricValueList,
|
||||
tls_kx_groups: MetricValueList,
|
||||
tls_rand: String,
|
||||
tls_key_provider: String,
|
||||
tls_fips: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
|
||||
struct MetricValueList {
|
||||
values: Vec<&'static str>,
|
||||
}
|
||||
|
||||
impl FromIterator<&'static str> for MetricValueList {
|
||||
fn from_iter<T: IntoIterator<Item = &'static str>>(iter: T) -> Self {
|
||||
MetricValueList {
|
||||
values: iter.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodeLabelValue for MetricValueList {
|
||||
fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), Error> {
|
||||
for value in &self.values {
|
||||
value.encode(encoder)?;
|
||||
encoder.write_char(',')?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn metric() -> prom::Family<TlsInfo, prom::ConstGauge> {
|
||||
let fam = prom::Family::<TlsInfo, prom::ConstGauge>::new_with_constructor(|| {
|
||||
prom::ConstGauge::new(1)
|
||||
});
|
||||
|
||||
let tls_info = TLS_INFO.get_or_init(|| {
|
||||
let provider = linkerd_rustls::get_default_provider();
|
||||
|
||||
let tls_suites = provider
|
||||
.cipher_suites
|
||||
.iter()
|
||||
.flat_map(|cipher_suite| cipher_suite.suite().as_str())
|
||||
.collect::<MetricValueList>();
|
||||
let tls_kx_groups = provider
|
||||
.kx_groups
|
||||
.iter()
|
||||
.flat_map(|suite| suite.name().as_str())
|
||||
.collect::<MetricValueList>();
|
||||
Arc::new(TlsInfo {
|
||||
tls_suites,
|
||||
tls_kx_groups,
|
||||
tls_rand: format!("{:?}", provider.secure_random),
|
||||
tls_key_provider: format!("{:?}", provider.key_provider),
|
||||
tls_fips: provider.fips(),
|
||||
})
|
||||
});
|
||||
let _ = fam.get_or_create(tls_info);
|
||||
fam
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
use crate::metrics::ServerLabel as PolicyServerLabel;
|
||||
pub use crate::metrics::{Direction, OutboundEndpointLabels};
|
||||
use linkerd_conditional::Conditional;
|
||||
use linkerd_metrics::FmtLabels;
|
||||
use linkerd_metrics::legacy::FmtLabels;
|
||||
use linkerd_tls as tls;
|
||||
use std::{fmt, net::SocketAddr};
|
||||
|
||||
|
@ -131,14 +131,14 @@ impl FmtLabels for TlsAccept<'_> {
|
|||
write!(f, "tls=\"disabled\"")
|
||||
}
|
||||
Conditional::None(why) => {
|
||||
write!(f, "tls=\"no_identity\",no_tls_reason=\"{}\"", why)
|
||||
write!(f, "tls=\"no_identity\",no_tls_reason=\"{why}\"")
|
||||
}
|
||||
Conditional::Some(tls::ServerTlsLabels::Established { client_id }) => match client_id {
|
||||
Some(id) => write!(f, "tls=\"true\",client_id=\"{}\"", id),
|
||||
Some(id) => write!(f, "tls=\"true\",client_id=\"{id}\""),
|
||||
None => write!(f, "tls=\"true\",client_id=\"\""),
|
||||
},
|
||||
Conditional::Some(tls::ServerTlsLabels::Passthru { sni }) => {
|
||||
write!(f, "tls=\"opaque\",sni=\"{}\"", sni)
|
||||
write!(f, "tls=\"opaque\",sni=\"{sni}\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -161,10 +161,10 @@ impl FmtLabels for TlsConnect<'_> {
|
|||
write!(f, "tls=\"disabled\"")
|
||||
}
|
||||
Conditional::None(why) => {
|
||||
write!(f, "tls=\"no_identity\",no_tls_reason=\"{}\"", why)
|
||||
write!(f, "tls=\"no_identity\",no_tls_reason=\"{why}\"")
|
||||
}
|
||||
Conditional::Some(tls::ClientTlsLabels { server_id }) => {
|
||||
write!(f, "tls=\"true\",server_id=\"{}\"", server_id)
|
||||
write!(f, "tls=\"true\",server_id=\"{server_id}\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ impl Gateway {
|
|||
detect_timeout,
|
||||
queue,
|
||||
addr,
|
||||
meta,
|
||||
meta.into(),
|
||||
),
|
||||
None => {
|
||||
tracing::debug!(
|
||||
|
|
|
@ -153,7 +153,7 @@ fn mk_routes(profile: &profiles::Profile) -> Option<outbound::http::Routes> {
|
|||
if let Some((addr, metadata)) = profile.endpoint.clone() {
|
||||
return Some(outbound::http::Routes::Endpoint(
|
||||
Remote(ServerAddr(addr)),
|
||||
metadata,
|
||||
metadata.into(),
|
||||
));
|
||||
}
|
||||
|
||||
|
|
|
@ -13,8 +13,7 @@ Configures and runs the inbound proxy
|
|||
test-util = [
|
||||
"linkerd-app-test",
|
||||
"linkerd-idle-cache/test-util",
|
||||
"linkerd-meshtls/rustls",
|
||||
"linkerd-meshtls-rustls/test-util",
|
||||
"linkerd-meshtls/test-util",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
|
@ -25,8 +24,7 @@ linkerd-app-core = { path = "../core" }
|
|||
linkerd-app-test = { path = "../test", optional = true }
|
||||
linkerd-http-access-log = { path = "../../http/access-log" }
|
||||
linkerd-idle-cache = { path = "../../idle-cache" }
|
||||
linkerd-meshtls = { path = "../../meshtls", optional = true }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
|
||||
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
|
||||
linkerd-proxy-client-policy = { path = "../../proxy/client-policy" }
|
||||
linkerd-tonic-stream = { path = "../../tonic-stream" }
|
||||
linkerd-tonic-watch = { path = "../../tonic-watch" }
|
||||
|
@ -49,7 +47,7 @@ hyper = { workspace = true, features = ["http1", "http2"] }
|
|||
linkerd-app-test = { path = "../test" }
|
||||
arbitrary = { version = "1", features = ["derive"] }
|
||||
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
|
||||
|
@ -62,8 +60,7 @@ linkerd-http-metrics = { path = "../../http/metrics", features = ["test-util"] }
|
|||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] }
|
||||
linkerd-io = { path = "../../io", features = ["tokio-test"] }
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [
|
||||
|
|
|
@ -18,8 +18,7 @@ linkerd-app-core = { path = "../../core" }
|
|||
linkerd-app-inbound = { path = ".." }
|
||||
linkerd-app-test = { path = "../../test" }
|
||||
linkerd-idle-cache = { path = "../../../idle-cache", features = ["test-util"] }
|
||||
linkerd-meshtls = { path = "../../../meshtls", features = ["rustls"] }
|
||||
linkerd-meshtls-rustls = { path = "../../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
linkerd-tracing = { path = "../../../tracing", features = ["ansi"] }
|
||||
|
|
|
@ -117,7 +117,7 @@ impl<N> Inbound<N> {
|
|||
let identity = rt
|
||||
.identity
|
||||
.server()
|
||||
.with_alpn(vec![transport_header::PROTOCOL.into()])
|
||||
.spawn_with_alpn(vec![transport_header::PROTOCOL.into()])
|
||||
.expect("TLS credential store must be held");
|
||||
|
||||
inner
|
||||
|
|
|
@ -861,12 +861,7 @@ fn grpc_status_server(
|
|||
|
||||
#[tracing::instrument]
|
||||
fn connect_error() -> impl Fn(Remote<ServerAddr>) -> io::Result<io::BoxedIo> {
|
||||
move |_| {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"server is not listening",
|
||||
))
|
||||
}
|
||||
move |_| Err(io::Error::other("server is not listening"))
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
|
|
|
@ -113,10 +113,6 @@ impl<S> Inbound<S> {
|
|||
&self.runtime.identity
|
||||
}
|
||||
|
||||
pub fn proxy_metrics(&self) -> &metrics::Proxy {
|
||||
&self.runtime.metrics.proxy
|
||||
}
|
||||
|
||||
/// A helper for gateways to instrument policy checks.
|
||||
pub fn authorize_http<N>(
|
||||
&self,
|
||||
|
|
|
@ -13,7 +13,7 @@ pub(crate) mod error;
|
|||
|
||||
pub use linkerd_app_core::metrics::*;
|
||||
|
||||
/// Holds outbound proxy metrics.
|
||||
/// Holds LEGACY inbound proxy metrics.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct InboundMetrics {
|
||||
pub http_authz: authz::HttpAuthzMetrics,
|
||||
|
@ -50,7 +50,7 @@ impl InboundMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtMetrics for InboundMetrics {
|
||||
impl legacy::FmtMetrics for InboundMetrics {
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.http_authz.fmt_metrics(f)?;
|
||||
self.http_errors.fmt_metrics(f)?;
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit};
|
||||
use linkerd_app_core::{
|
||||
metrics::{
|
||||
metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels,
|
||||
ServerLabel, TargetAddr, TlsAccept,
|
||||
legacy::{Counter, FmtLabels, FmtMetrics},
|
||||
metrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, ServerLabel, TargetAddr,
|
||||
TlsAccept,
|
||||
},
|
||||
tls,
|
||||
transport::OrigDstAddr,
|
||||
|
@ -268,7 +269,7 @@ impl FmtLabels for HTTPLocalRateLimitLabels {
|
|||
scope,
|
||||
)
|
||||
} else {
|
||||
write!(f, ",ratelimit_scope=\"{}\"", scope)
|
||||
write!(f, ",ratelimit_scope=\"{scope}\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ use crate::{
|
|||
};
|
||||
use linkerd_app_core::{
|
||||
errors::{FailFastError, LoadShedError},
|
||||
metrics::FmtLabels,
|
||||
metrics::legacy::FmtLabels,
|
||||
tls,
|
||||
};
|
||||
use std::fmt;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics, ServerLabel},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics, ServerLabel,
|
||||
},
|
||||
svc::{self, stack::NewMonitor},
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc::{self, stack::NewMonitor},
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ServerPolicy> = once_cell::sync
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
{
|
||||
pub(super) fn new(
|
||||
|
@ -57,7 +57,7 @@ where
|
|||
|
||||
impl<S> Service<u16> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
|
|
|
@ -40,7 +40,7 @@ impl Config {
|
|||
limits: ReceiveLimits,
|
||||
) -> impl GetPolicy + Clone + Send + Sync + 'static
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
@ -74,7 +74,7 @@ impl<S> Store<S> {
|
|||
opaque_ports: RangeInclusiveSet<u16>,
|
||||
) -> Self
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
|
@ -138,7 +138,7 @@ impl<S> Store<S> {
|
|||
|
||||
impl<S> GetPolicy for Store<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
|
|
|
@ -263,7 +263,7 @@ fn orig_dst_addr() -> OrigDstAddr {
|
|||
OrigDstAddr(([192, 0, 2, 2], 1000).into())
|
||||
}
|
||||
|
||||
impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
|
||||
impl tonic::client::GrpcService<tonic::body::Body> for MockSvc {
|
||||
type ResponseBody = linkerd_app_core::control::RspBody;
|
||||
type Error = Error;
|
||||
type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>;
|
||||
|
@ -275,7 +275,7 @@ impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
|
|||
unreachable!()
|
||||
}
|
||||
|
||||
fn call(&mut self, _req: http::Request<tonic::body::BoxBody>) -> Self::Future {
|
||||
fn call(&mut self, _req: http::Request<tonic::body::Body>) -> Self::Future {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ impl Inbound<()> {
|
|||
limits: ReceiveLimits,
|
||||
) -> impl policy::GetPolicy + Clone + Send + Sync + 'static
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
@ -3,9 +3,7 @@ pub use futures::prelude::*;
|
|||
use linkerd_app_core::{
|
||||
config,
|
||||
dns::Suffix,
|
||||
drain, exp_backoff,
|
||||
identity::rustls,
|
||||
metrics,
|
||||
drain, exp_backoff, identity, metrics,
|
||||
proxy::{
|
||||
http::{h1, h2},
|
||||
tap,
|
||||
|
@ -98,7 +96,7 @@ pub fn runtime() -> (ProxyRuntime, drain::Signal) {
|
|||
let (tap, _) = tap::new();
|
||||
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
|
||||
let runtime = ProxyRuntime {
|
||||
identity: rustls::creds::default_for_test().1.into(),
|
||||
identity: identity::creds::default_for_test().1,
|
||||
metrics: metrics.proxy,
|
||||
tap,
|
||||
span_sink: None,
|
||||
|
|
|
@ -28,17 +28,19 @@ ipnet = "2"
|
|||
linkerd-app = { path = "..", features = ["allow-loopback"] }
|
||||
linkerd-app-core = { path = "../core" }
|
||||
linkerd-app-test = { path = "../test" }
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["test-util"] }
|
||||
linkerd-metrics = { path = "../../metrics", features = ["test_util"] }
|
||||
linkerd-rustls = { path = "../../rustls" }
|
||||
linkerd-tracing = { path = "../../tracing" }
|
||||
maplit = "1"
|
||||
parking_lot = "0.12"
|
||||
regex = "1"
|
||||
rustls-pemfile = "2.2"
|
||||
socket2 = "0.5"
|
||||
socket2 = "0.6"
|
||||
tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] }
|
||||
tokio-rustls = { workspace = true }
|
||||
tokio-stream = { version = "0.1", features = ["sync"] }
|
||||
tonic = { workspace = true, features = ["transport"], default-features = false }
|
||||
tonic = { workspace = true, features = ["transport", "router"], default-features = false }
|
||||
tower = { workspace = true, default-features = false }
|
||||
tracing = { workspace = true }
|
||||
|
||||
|
@ -72,8 +74,5 @@ flate2 = { version = "1", default-features = false, features = [
|
|||
] }
|
||||
# Log streaming isn't enabled by default globally, but we want to test it.
|
||||
linkerd-app-admin = { path = "../admin", features = ["log-streaming"] }
|
||||
# No code from this crate is actually used; only necessary to enable the Rustls
|
||||
# implementation.
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
serde_json = "1"
|
||||
|
|
|
@ -262,10 +262,7 @@ impl pb::destination_server::Destination for Controller {
|
|||
}
|
||||
|
||||
tracing::warn!(?dst, ?updates, "request does not match");
|
||||
let msg = format!(
|
||||
"expected get call for {:?} but got get call for {:?}",
|
||||
dst, req
|
||||
);
|
||||
let msg = format!("expected get call for {dst:?} but got get call for {req:?}");
|
||||
calls.push_front(Dst::Call(dst, updates));
|
||||
return Err(grpc::Status::new(grpc::Code::Unavailable, msg));
|
||||
}
|
||||
|
|
|
@ -8,7 +8,8 @@ use std::{
|
|||
};
|
||||
|
||||
use linkerd2_proxy_api::identity as pb;
|
||||
use tokio_rustls::rustls::{self, pki_types::CertificateDer, server::WebPkiClientVerifier};
|
||||
use linkerd_rustls::get_default_provider;
|
||||
use tokio_rustls::rustls::{self, server::WebPkiClientVerifier};
|
||||
use tonic as grpc;
|
||||
|
||||
pub struct Identity {
|
||||
|
@ -34,10 +35,6 @@ type Certify = Box<
|
|||
> + Send,
|
||||
>;
|
||||
|
||||
static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
|
||||
static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
|
||||
&[rustls::crypto::ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256];
|
||||
|
||||
struct Certificates {
|
||||
pub leaf: Vec<u8>,
|
||||
pub intermediates: Vec<Vec<u8>>,
|
||||
|
@ -54,13 +51,13 @@ impl Certificates {
|
|||
let leaf = certs
|
||||
.next()
|
||||
.expect("no leaf cert in pemfile")
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "rustls error reading certs"))?
|
||||
.map_err(|_| io::Error::other("rustls error reading certs"))?
|
||||
.as_ref()
|
||||
.to_vec();
|
||||
let intermediates = certs
|
||||
.map(|cert| cert.map(|cert| cert.as_ref().to_vec()))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "rustls error reading certs"))?;
|
||||
.map_err(|_| io::Error::other("rustls error reading certs"))?;
|
||||
|
||||
Ok(Certificates {
|
||||
leaf,
|
||||
|
@ -104,19 +101,16 @@ impl Identity {
|
|||
use std::io::Cursor;
|
||||
let mut roots = rustls::RootCertStore::empty();
|
||||
let trust_anchors = rustls_pemfile::certs(&mut Cursor::new(trust_anchors))
|
||||
.map(|bytes| bytes.map(CertificateDer::from))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.expect("error parsing pemfile");
|
||||
let (added, skipped) = roots.add_parsable_certificates(trust_anchors);
|
||||
assert_ne!(added, 0, "trust anchors must include at least one cert");
|
||||
assert_eq!(skipped, 0, "no certs in pemfile should be invalid");
|
||||
|
||||
let mut provider = rustls::crypto::ring::default_provider();
|
||||
provider.cipher_suites = TLS_SUPPORTED_CIPHERSUITES.to_vec();
|
||||
let provider = Arc::new(provider);
|
||||
let provider = get_default_provider();
|
||||
|
||||
let client_config = rustls::ClientConfig::builder_with_provider(provider.clone())
|
||||
.with_protocol_versions(TLS_VERSIONS)
|
||||
.with_safe_default_protocol_versions()
|
||||
.expect("client config must be valid")
|
||||
.with_root_certificates(roots.clone())
|
||||
.with_no_client_auth();
|
||||
|
@ -128,7 +122,7 @@ impl Identity {
|
|||
.expect("server verifier must be valid");
|
||||
|
||||
let server_config = rustls::ServerConfig::builder_with_provider(provider)
|
||||
.with_protocol_versions(TLS_VERSIONS)
|
||||
.with_safe_default_protocol_versions()
|
||||
.expect("server config must be valid")
|
||||
.with_client_cert_verifier(client_cert_verifier)
|
||||
.with_single_cert(certs.chain(), key)
|
||||
|
@ -219,7 +213,7 @@ impl Controller {
|
|||
let f = f.take().expect("called twice?");
|
||||
let fut = f(req)
|
||||
.map_ok(grpc::Response::new)
|
||||
.map_err(|e| grpc::Status::new(grpc::Code::Internal, format!("{}", e)));
|
||||
.map_err(|e| grpc::Status::new(grpc::Code::Internal, format!("{e}")));
|
||||
Box::pin(fut)
|
||||
});
|
||||
self.expect_calls.lock().push_back(func);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#![warn(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
|
||||
#![forbid(unsafe_code)]
|
||||
#![recursion_limit = "256"]
|
||||
#![allow(clippy::result_large_err)]
|
||||
|
||||
mod test_env;
|
||||
|
||||
|
@ -247,7 +248,7 @@ impl fmt::Display for HumanDuration {
|
|||
let secs = self.0.as_secs();
|
||||
let subsec_ms = self.0.subsec_nanos() as f64 / 1_000_000f64;
|
||||
if secs == 0 {
|
||||
write!(fmt, "{}ms", subsec_ms)
|
||||
write!(fmt, "{subsec_ms}ms")
|
||||
} else {
|
||||
write!(fmt, "{}s", secs as f64 + subsec_ms)
|
||||
}
|
||||
|
|
|
@ -302,7 +302,7 @@ impl Controller {
|
|||
}
|
||||
|
||||
pub async fn run(self) -> controller::Listening {
|
||||
let svc = grpc::transport::Server::builder()
|
||||
let routes = grpc::service::Routes::default()
|
||||
.add_service(
|
||||
inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new(
|
||||
self.inbound,
|
||||
|
@ -310,9 +310,9 @@ impl Controller {
|
|||
)
|
||||
.add_service(outbound_policies_server::OutboundPoliciesServer::new(
|
||||
Server(Arc::new(self.outbound)),
|
||||
))
|
||||
.into_service();
|
||||
controller::run(RoutesSvc(svc), "support policy controller", None).await
|
||||
));
|
||||
|
||||
controller::run(RoutesSvc(routes), "support policy controller", None).await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -525,7 +525,9 @@ impl Service<Request<hyper::body::Incoming>> for RoutesSvc {
|
|||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let Self(routes) = self;
|
||||
routes.poll_ready(cx)
|
||||
<grpc::service::Routes as Service<Request<UnsyncBoxBody<Bytes, grpc::Status>>>>::poll_ready(
|
||||
routes, cx,
|
||||
)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future {
|
||||
|
|
|
@ -108,7 +108,7 @@ impl fmt::Debug for MockOrigDst {
|
|||
match self {
|
||||
Self::Addr(addr) => f
|
||||
.debug_tuple("MockOrigDst::Addr")
|
||||
.field(&format_args!("{}", addr))
|
||||
.field(&format_args!("{addr}"))
|
||||
.finish(),
|
||||
Self::Direct => f.debug_tuple("MockOrigDst::Direct").finish(),
|
||||
Self::None => f.debug_tuple("MockOrigDst::None").finish(),
|
||||
|
@ -416,9 +416,9 @@ async fn run(proxy: Proxy, mut env: TestEnv, random_ports: bool) -> Listening {
|
|||
use std::fmt::Write;
|
||||
let mut ports = inbound_default_ports.iter();
|
||||
if let Some(port) = ports.next() {
|
||||
let mut var = format!("{}", port);
|
||||
let mut var = format!("{port}");
|
||||
for port in ports {
|
||||
write!(&mut var, ",{}", port).expect("writing to String should never fail");
|
||||
write!(&mut var, ",{port}").expect("writing to String should never fail");
|
||||
}
|
||||
info!("{}={:?}", app::env::ENV_INBOUND_PORTS, var);
|
||||
env.put(app::env::ENV_INBOUND_PORTS, var);
|
||||
|
|
|
@ -137,28 +137,28 @@ impl TapEventExt for pb::TapEvent {
|
|||
fn request_init_authority(&self) -> &str {
|
||||
match self.event() {
|
||||
pb::tap_event::http::Event::RequestInit(ev) => &ev.authority,
|
||||
e => panic!("not RequestInit event: {:?}", e),
|
||||
e => panic!("not RequestInit event: {e:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn request_init_path(&self) -> &str {
|
||||
match self.event() {
|
||||
pb::tap_event::http::Event::RequestInit(ev) => &ev.path,
|
||||
e => panic!("not RequestInit event: {:?}", e),
|
||||
e => panic!("not RequestInit event: {e:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn response_init_status(&self) -> u16 {
|
||||
match self.event() {
|
||||
pb::tap_event::http::Event::ResponseInit(ev) => ev.http_status as u16,
|
||||
e => panic!("not ResponseInit event: {:?}", e),
|
||||
e => panic!("not ResponseInit event: {e:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn response_end_bytes(&self) -> u64 {
|
||||
match self.event() {
|
||||
pb::tap_event::http::Event::ResponseEnd(ev) => ev.response_bytes,
|
||||
e => panic!("not ResponseEnd event: {:?}", e),
|
||||
e => panic!("not ResponseEnd event: {e:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ impl TapEventExt for pb::TapEvent {
|
|||
}) => code,
|
||||
_ => panic!("not Eos GrpcStatusCode: {:?}", ev.eos),
|
||||
},
|
||||
ev => panic!("not ResponseEnd event: {:?}", ev),
|
||||
ev => panic!("not ResponseEnd event: {ev:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -381,7 +381,7 @@ mod cross_version {
|
|||
}
|
||||
|
||||
fn default_dst_name(port: u16) -> String {
|
||||
format!("{}:{}", HOST, port)
|
||||
format!("{HOST}:{port}")
|
||||
}
|
||||
|
||||
fn send_default_dst(
|
||||
|
|
|
@ -63,7 +63,7 @@ async fn wait_for_profile_stage(client: &client::Client, metrics: &client::Clien
|
|||
for _ in 0i32..10 {
|
||||
assert_eq!(client.get("/load-profile").await, "");
|
||||
let m = metrics.get("/metrics").await;
|
||||
let stage_metric = format!("rt_load_profile=\"{}\"", stage);
|
||||
let stage_metric = format!("rt_load_profile=\"{stage}\"");
|
||||
if m.contains(stage_metric.as_str()) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -88,12 +88,12 @@ impl TestBuilder {
|
|||
let port = srv.addr.port();
|
||||
let ctrl = controller::new();
|
||||
|
||||
let dst_tx = ctrl.destination_tx(format!("{}:{}", host, port));
|
||||
let dst_tx = ctrl.destination_tx(format!("{host}:{port}"));
|
||||
dst_tx.send_addr(srv.addr);
|
||||
|
||||
let ctrl = controller::new();
|
||||
|
||||
let dst_tx = ctrl.destination_tx(format!("{}:{}", host, port));
|
||||
let dst_tx = ctrl.destination_tx(format!("{host}:{port}"));
|
||||
dst_tx.send_addr(srv.addr);
|
||||
|
||||
let profile_tx = ctrl.profile_tx(srv.addr.to_string());
|
||||
|
|
|
@ -1318,9 +1318,9 @@ async fn metrics_compression() {
|
|||
body.copy_to_bytes(body.remaining()),
|
||||
));
|
||||
let mut scrape = String::new();
|
||||
decoder.read_to_string(&mut scrape).unwrap_or_else(|_| {
|
||||
panic!("decode gzip (requested Accept-Encoding: {})", encoding)
|
||||
});
|
||||
decoder
|
||||
.read_to_string(&mut scrape)
|
||||
.unwrap_or_else(|_| panic!("decode gzip (requested Accept-Encoding: {encoding})"));
|
||||
scrape
|
||||
}
|
||||
};
|
||||
|
|
|
@ -26,7 +26,7 @@ async fn is_valid_json() {
|
|||
assert!(!json.is_empty());
|
||||
|
||||
for obj in json {
|
||||
println!("{}\n", obj);
|
||||
println!("{obj}\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,7 @@ async fn query_is_valid_json() {
|
|||
assert!(!json.is_empty());
|
||||
|
||||
for obj in json {
|
||||
println!("{}\n", obj);
|
||||
println!("{obj}\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,12 +74,9 @@ async fn valid_get_does_not_error() {
|
|||
|
||||
let json = logs.await.unwrap();
|
||||
for obj in json {
|
||||
println!("{}\n", obj);
|
||||
println!("{obj}\n");
|
||||
if obj.get("error").is_some() {
|
||||
panic!(
|
||||
"expected the log stream to contain no error responses!\njson = {}",
|
||||
obj
|
||||
);
|
||||
panic!("expected the log stream to contain no error responses!\njson = {obj}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -101,12 +98,9 @@ async fn valid_query_does_not_error() {
|
|||
|
||||
let json = logs.await.unwrap();
|
||||
for obj in json {
|
||||
println!("{}\n", obj);
|
||||
println!("{obj}\n");
|
||||
if obj.get("error").is_some() {
|
||||
panic!(
|
||||
"expected the log stream to contain no error responses!\njson = {}",
|
||||
obj
|
||||
);
|
||||
panic!("expected the log stream to contain no error responses!\njson = {obj}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -142,9 +136,7 @@ async fn multi_filter() {
|
|||
level.and_then(|value| value.as_str()),
|
||||
Some("DEBUG") | Some("INFO") | Some("WARN") | Some("ERROR")
|
||||
),
|
||||
"level must be DEBUG, INFO, WARN, or ERROR\n level: {:?}\n json: {:#?}",
|
||||
level,
|
||||
obj
|
||||
"level must be DEBUG, INFO, WARN, or ERROR\n level: {level:?}\n json: {obj:#?}"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -175,7 +167,7 @@ async fn get_log_stream(
|
|||
let req = client
|
||||
.request_body(
|
||||
client
|
||||
.request_builder(&format!("{}?{}", PATH, filter))
|
||||
.request_builder(&format!("{PATH}?{filter}"))
|
||||
.method(http::Method::GET)
|
||||
.body(http_body_util::Full::new(Bytes::from(filter)))
|
||||
.unwrap(),
|
||||
|
@ -231,7 +223,7 @@ where
|
|||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("body failed: {}", e);
|
||||
println!("body failed: {e}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -80,10 +80,7 @@ impl Test {
|
|||
.await
|
||||
};
|
||||
|
||||
env.put(
|
||||
app::env::ENV_INBOUND_DETECT_TIMEOUT,
|
||||
format!("{:?}", TIMEOUT),
|
||||
);
|
||||
env.put(app::env::ENV_INBOUND_DETECT_TIMEOUT, format!("{TIMEOUT:?}"));
|
||||
|
||||
(self.set_env)(&mut env);
|
||||
|
||||
|
@ -127,26 +124,6 @@ async fn inbound_timeout() {
|
|||
.await;
|
||||
}
|
||||
|
||||
/// Tests that the detect metric is labeled and incremented on I/O error.
|
||||
#[tokio::test]
|
||||
async fn inbound_io_err() {
|
||||
let _trace = trace_init();
|
||||
|
||||
let (proxy, metrics) = Test::default().run().await;
|
||||
let client = crate::tcp::client(proxy.inbound);
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tcp_client.write(TcpFixture::HELLO_MSG).await;
|
||||
drop(tcp_client);
|
||||
|
||||
metric(&proxy)
|
||||
.label("error", "i/o")
|
||||
.value(1u64)
|
||||
.assert_in(&metrics)
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Tests that the detect metric is not incremented when TLS is successfully
|
||||
/// detected.
|
||||
#[tokio::test]
|
||||
|
@ -192,44 +169,6 @@ async fn inbound_success() {
|
|||
metric.assert_in(&metrics).await;
|
||||
}
|
||||
|
||||
/// Tests both of the above cases together.
|
||||
#[tokio::test]
|
||||
async fn inbound_multi() {
|
||||
let _trace = trace_init();
|
||||
|
||||
let (proxy, metrics) = Test::default().run().await;
|
||||
let client = crate::tcp::client(proxy.inbound);
|
||||
|
||||
let metric = metric(&proxy);
|
||||
let timeout_metric = metric.clone().label("error", "tls detection timeout");
|
||||
let io_metric = metric.label("error", "i/o");
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
|
||||
.await;
|
||||
|
||||
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
drop(tcp_client);
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tcp_client.write(TcpFixture::HELLO_MSG).await;
|
||||
drop(tcp_client);
|
||||
|
||||
io_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
|
||||
.await;
|
||||
|
||||
io_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
timeout_metric.clone().value(2u64).assert_in(&metrics).await;
|
||||
drop(tcp_client);
|
||||
}
|
||||
|
||||
/// Tests that TLS detect failure metrics are collected for the direct stack.
|
||||
#[tokio::test]
|
||||
async fn inbound_direct_multi() {
|
||||
|
|
|
@ -13,7 +13,7 @@ Configures and runs the outbound proxy
|
|||
default = []
|
||||
allow-loopback = []
|
||||
test-subscriber = []
|
||||
test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util", "dep:http-body"]
|
||||
test-util = ["linkerd-app-test", "linkerd-meshtls/test-util", "dep:http-body"]
|
||||
|
||||
prometheus-client-rust-242 = [] # TODO
|
||||
|
||||
|
@ -42,7 +42,7 @@ linkerd-http-prom = { path = "../../http/prom" }
|
|||
linkerd-http-retry = { path = "../../http/retry" }
|
||||
linkerd-http-route = { path = "../../http/route" }
|
||||
linkerd-identity = { path = "../../identity" }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
|
||||
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false }
|
||||
linkerd-opaq-route = { path = "../../opaq-route" }
|
||||
linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [
|
||||
"proto",
|
||||
|
@ -67,8 +67,7 @@ linkerd-app-test = { path = "../test", features = ["client-policy"] }
|
|||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] }
|
||||
linkerd-io = { path = "../../io", features = ["tokio-test"] }
|
||||
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
|
||||
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
|
||||
linkerd-meshtls = { path = "../../meshtls", features = [
|
||||
"test-util",
|
||||
] }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
|
|
|
@ -134,7 +134,13 @@ impl<N> Outbound<N> {
|
|||
.unwrap_or_else(|| (orig_dst, Default::default()));
|
||||
// TODO(ver) We should be able to figure out resource coordinates for
|
||||
// the endpoint?
|
||||
synthesize_forward_policy(&META, detect_timeout, queue, addr, meta)
|
||||
synthesize_forward_policy(
|
||||
&META,
|
||||
detect_timeout,
|
||||
queue,
|
||||
addr,
|
||||
meta.into(),
|
||||
)
|
||||
},
|
||||
);
|
||||
return Ok((Some(profile), policy));
|
||||
|
@ -189,7 +195,7 @@ pub fn synthesize_forward_policy(
|
|||
timeout: Duration,
|
||||
queue: policy::Queue,
|
||||
addr: SocketAddr,
|
||||
metadata: policy::EndpointMetadata,
|
||||
metadata: Arc<policy::EndpointMetadata>,
|
||||
) -> ClientPolicy {
|
||||
policy_for_backend(
|
||||
meta,
|
||||
|
|
|
@ -32,7 +32,7 @@ pub use self::balance::BalancerMetrics;
|
|||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum Dispatch {
|
||||
Balance(NameAddr, EwmaConfig),
|
||||
Forward(Remote<ServerAddr>, Metadata),
|
||||
Forward(Remote<ServerAddr>, Arc<Metadata>),
|
||||
/// A backend dispatcher that explicitly fails all requests.
|
||||
Fail {
|
||||
message: Arc<str>,
|
||||
|
@ -49,7 +49,7 @@ pub struct DispatcherFailed(Arc<str>);
|
|||
pub struct Endpoint<T> {
|
||||
addr: Remote<ServerAddr>,
|
||||
is_local: bool,
|
||||
metadata: Metadata,
|
||||
metadata: Arc<Metadata>,
|
||||
parent: T,
|
||||
queue: QueueConfig,
|
||||
close_server_connection_on_remote_proxy_error: bool,
|
||||
|
|
|
@ -121,7 +121,7 @@ where
|
|||
let http2 = http2.override_from(metadata.http2_client_params());
|
||||
Endpoint {
|
||||
addr: Remote(ServerAddr(addr)),
|
||||
metadata,
|
||||
metadata: metadata.into(),
|
||||
is_local,
|
||||
parent: target.parent,
|
||||
queue: http_queue,
|
||||
|
|
|
@ -8,7 +8,7 @@ use linkerd_app_core::{
|
|||
transport::addrs::*,
|
||||
Addr, Error, Infallible, NameAddr, CANONICAL_DST_HEADER,
|
||||
};
|
||||
use std::{fmt::Debug, hash::Hash};
|
||||
use std::{fmt::Debug, hash::Hash, sync::Arc};
|
||||
use tokio::sync::watch;
|
||||
|
||||
pub mod policy;
|
||||
|
@ -32,7 +32,7 @@ pub enum Routes {
|
|||
|
||||
/// Fallback endpoint forwarding.
|
||||
// TODO(ver) Remove this variant when policy routes are fully wired up.
|
||||
Endpoint(Remote<ServerAddr>, Metadata),
|
||||
Endpoint(Remote<ServerAddr>, Arc<Metadata>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
|
@ -64,7 +64,7 @@ enum RouterParams<T: Clone + Debug + Eq + Hash> {
|
|||
Profile(profile::Params<T>),
|
||||
|
||||
// TODO(ver) Remove this variant when policy routes are fully wired up.
|
||||
Endpoint(Remote<ServerAddr>, Metadata, T),
|
||||
Endpoint(Remote<ServerAddr>, Arc<Metadata>, T),
|
||||
}
|
||||
|
||||
// Only applies to requests with profiles.
|
||||
|
|
|
@ -54,7 +54,7 @@ impl retry::Policy for RetryPolicy {
|
|||
|
||||
if let Some(codes) = self.retryable_grpc_statuses.as_ref() {
|
||||
let grpc_status = Self::grpc_status(rsp);
|
||||
let retryable = grpc_status.map_or(false, |c| codes.contains(c));
|
||||
let retryable = grpc_status.is_some_and(|c| codes.contains(c));
|
||||
tracing::debug!(retryable, grpc.status = ?grpc_status);
|
||||
if retryable {
|
||||
return true;
|
||||
|
|
|
@ -214,7 +214,7 @@ impl Outbound<()> {
|
|||
detect_timeout,
|
||||
queue,
|
||||
addr,
|
||||
meta,
|
||||
meta.into(),
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ impl Outbound<()> {
|
|||
export_hostname_labels: bool,
|
||||
) -> impl policy::GetPolicy
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
@ -130,7 +130,7 @@ impl OutboundMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtMetrics for OutboundMetrics {
|
||||
impl legacy::FmtMetrics for OutboundMetrics {
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.http_errors.fmt_metrics(f)?;
|
||||
self.tcp_errors.fmt_metrics(f)?;
|
||||
|
@ -243,7 +243,7 @@ impl EncodeLabelSet for RouteRef {
|
|||
|
||||
// === impl ConcreteLabels ===
|
||||
|
||||
impl FmtLabels for ConcreteLabels {
|
||||
impl legacy::FmtLabels for ConcreteLabels {
|
||||
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let ConcreteLabels(parent, backend) = self;
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ pub(crate) use self::{http::Http, tcp::Tcp};
|
|||
use crate::http::IdentityRequired;
|
||||
use linkerd_app_core::{
|
||||
errors::{FailFastError, LoadShedError},
|
||||
metrics::FmtLabels,
|
||||
metrics::legacy::FmtLabels,
|
||||
proxy::http::ResponseTimeoutError,
|
||||
};
|
||||
use std::fmt;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc, Error,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc,
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
@ -32,7 +32,7 @@ use tracing::info_span;
|
|||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum Dispatch {
|
||||
Balance(NameAddr, balance::EwmaConfig),
|
||||
Forward(Remote<ServerAddr>, Metadata),
|
||||
Forward(Remote<ServerAddr>, Arc<Metadata>),
|
||||
/// A backend dispatcher that explicitly fails all requests.
|
||||
Fail {
|
||||
message: Arc<str>,
|
||||
|
@ -57,7 +57,7 @@ pub struct ConcreteError {
|
|||
pub struct Endpoint<T> {
|
||||
addr: Remote<ServerAddr>,
|
||||
is_local: bool,
|
||||
metadata: Metadata,
|
||||
metadata: Arc<Metadata>,
|
||||
parent: T,
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ impl<C> Outbound<C> {
|
|||
let is_local = inbound_ips.contains(&addr.ip());
|
||||
Endpoint {
|
||||
addr: Remote(ServerAddr(addr)),
|
||||
metadata,
|
||||
metadata: metadata.into(),
|
||||
is_local,
|
||||
parent: target.parent,
|
||||
}
|
||||
|
|
|
@ -160,9 +160,7 @@ async fn balances() {
|
|||
}
|
||||
assert!(
|
||||
seen0 && seen1,
|
||||
"Both endpoints must be used; ep0={} ep1={}",
|
||||
seen0,
|
||||
seen1
|
||||
"Both endpoints must be used; ep0={seen0} ep1={seen1}"
|
||||
);
|
||||
|
||||
// When we remove the ep0, all traffic goes to ep1:
|
||||
|
@ -190,8 +188,7 @@ async fn balances() {
|
|||
task.abort();
|
||||
assert!(
|
||||
errors::is_caused_by::<FailFastError>(&*err),
|
||||
"unexpected error: {}",
|
||||
err
|
||||
"unexpected error: {err}"
|
||||
);
|
||||
assert!(resolved.only_configured(), "Resolution must be reused");
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ClientPolicy> = once_cell::sync
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
|
@ -59,7 +59,7 @@ where
|
|||
|
||||
impl<S> Service<Addr> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
|
|
|
@ -60,7 +60,7 @@ pub(crate) fn runtime() -> (ProxyRuntime, drain::Signal) {
|
|||
let (tap, _) = tap::new();
|
||||
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
|
||||
let runtime = ProxyRuntime {
|
||||
identity: linkerd_meshtls_rustls::creds::default_for_test().1.into(),
|
||||
identity: linkerd_meshtls::creds::default_for_test().1,
|
||||
metrics: metrics.proxy,
|
||||
tap,
|
||||
span_sink: None,
|
||||
|
|
|
@ -31,7 +31,7 @@ use tracing::info_span;
|
|||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum Dispatch {
|
||||
Balance(NameAddr, balance::EwmaConfig),
|
||||
Forward(Remote<ServerAddr>, Metadata),
|
||||
Forward(Remote<ServerAddr>, Arc<Metadata>),
|
||||
/// A backend dispatcher that explicitly fails all requests.
|
||||
Fail {
|
||||
message: Arc<str>,
|
||||
|
@ -56,7 +56,7 @@ pub struct ConcreteError {
|
|||
pub struct Endpoint<T> {
|
||||
addr: Remote<ServerAddr>,
|
||||
is_local: bool,
|
||||
metadata: Metadata,
|
||||
metadata: Arc<Metadata>,
|
||||
parent: T,
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ impl<C> Outbound<C> {
|
|||
let is_local = inbound_ips.contains(&addr.ip());
|
||||
Endpoint {
|
||||
addr: Remote(ServerAddr(addr)),
|
||||
metadata,
|
||||
metadata: metadata.into(),
|
||||
is_local,
|
||||
parent: target.parent,
|
||||
}
|
||||
|
|
|
@ -11,13 +11,18 @@ use linkerd_proxy_client_policy::{self as client_policy, tls::sni};
|
|||
use parking_lot::Mutex;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
marker::PhantomData,
|
||||
net::SocketAddr,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::sync::watch;
|
||||
use tokio_rustls::rustls::pki_types::DnsName;
|
||||
use tokio_rustls::rustls::{
|
||||
internal::msgs::codec::{Codec, Reader},
|
||||
pki_types::DnsName,
|
||||
InvalidMessage,
|
||||
};
|
||||
|
||||
mod basic;
|
||||
|
||||
|
@ -142,7 +147,7 @@ fn default_backend(addr: SocketAddr) -> client_policy::Backend {
|
|||
capacity: 100,
|
||||
failfast_timeout: Duration::from_secs(10),
|
||||
},
|
||||
dispatcher: BackendDispatcher::Forward(addr, EndpointMetadata::default()),
|
||||
dispatcher: BackendDispatcher::Forward(addr, EndpointMetadata::default().into()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,44 +175,57 @@ fn sni_route(backend: client_policy::Backend, sni: sni::MatchSni) -> client_poli
|
|||
// generates a sample ClientHello TLS message for testing
|
||||
fn generate_client_hello(sni: &str) -> Vec<u8> {
|
||||
use tokio_rustls::rustls::{
|
||||
internal::msgs::{
|
||||
base::Payload,
|
||||
codec::{Codec, Reader},
|
||||
enums::Compression,
|
||||
handshake::{
|
||||
ClientExtension, ClientHelloPayload, HandshakeMessagePayload, HandshakePayload,
|
||||
Random, ServerName, SessionId,
|
||||
},
|
||||
message::{MessagePayload, PlainMessage},
|
||||
},
|
||||
CipherSuite, ContentType, HandshakeType, ProtocolVersion,
|
||||
internal::msgs::{base::Payload, codec::Codec, message::PlainMessage},
|
||||
ContentType, ProtocolVersion,
|
||||
};
|
||||
|
||||
let sni = DnsName::try_from(sni.to_string()).unwrap();
|
||||
let sni = trim_hostname_trailing_dot_for_sni(&sni);
|
||||
|
||||
let mut server_name_bytes = vec![];
|
||||
0u8.encode(&mut server_name_bytes); // encode the type first
|
||||
(sni.as_ref().len() as u16).encode(&mut server_name_bytes); // then the length as u16
|
||||
server_name_bytes.extend_from_slice(sni.as_ref().as_bytes()); // then the server name itself
|
||||
// rustls has internal-only types that can encode a ClientHello, but they are mostly
|
||||
// inaccessible and an unstable part of the public API anyway. Manually encode one here for
|
||||
// testing only instead.
|
||||
|
||||
let server_name =
|
||||
ServerName::read(&mut Reader::init(&server_name_bytes)).expect("Server name is valid");
|
||||
let mut hs_payload_bytes = vec![];
|
||||
1u8.encode(&mut hs_payload_bytes); // client hello ID
|
||||
|
||||
let hs_payload = HandshakeMessagePayload {
|
||||
typ: HandshakeType::ClientHello,
|
||||
payload: HandshakePayload::ClientHello(ClientHelloPayload {
|
||||
client_version: ProtocolVersion::TLSv1_2,
|
||||
random: Random::from([0; 32]),
|
||||
session_id: SessionId::read(&mut Reader::init(&[0])).unwrap(),
|
||||
cipher_suites: vec![CipherSuite::TLS_NULL_WITH_NULL_NULL],
|
||||
compression_methods: vec![Compression::Null],
|
||||
extensions: vec![ClientExtension::ServerName(vec![server_name])],
|
||||
}),
|
||||
let client_hello_body = {
|
||||
let mut payload = LengthPayload::<U24>::empty();
|
||||
|
||||
payload.buf.extend_from_slice(&[0x03, 0x03]); // client version, TLSv1.2
|
||||
|
||||
payload.buf.extend_from_slice(&[0u8; 32]); // random
|
||||
|
||||
0u8.encode(&mut payload.buf); // session ID
|
||||
|
||||
LengthPayload::<u16>::from_slice(&[0x00, 0x00] /* TLS_NULL_WITH_NULL_NULL */)
|
||||
.encode(&mut payload.buf);
|
||||
|
||||
LengthPayload::<u8>::from_slice(&[0x00] /* no compression */).encode(&mut payload.buf);
|
||||
|
||||
let extensions = {
|
||||
let mut payload = LengthPayload::<u16>::empty();
|
||||
0u16.encode(&mut payload.buf); // server name extension ID
|
||||
|
||||
let server_name_extension = {
|
||||
let mut payload = LengthPayload::<u16>::empty();
|
||||
let server_name = {
|
||||
let mut payload = LengthPayload::<u16>::empty();
|
||||
0u8.encode(&mut payload.buf); // DNS hostname ID
|
||||
LengthPayload::<u16>::from_slice(sni.as_ref().as_bytes())
|
||||
.encode(&mut payload.buf);
|
||||
payload
|
||||
};
|
||||
server_name.encode(&mut payload.buf);
|
||||
payload
|
||||
};
|
||||
server_name_extension.encode(&mut payload.buf);
|
||||
payload
|
||||
};
|
||||
extensions.encode(&mut payload.buf);
|
||||
payload
|
||||
};
|
||||
|
||||
let mut hs_payload_bytes = Vec::default();
|
||||
MessagePayload::handshake(hs_payload).encode(&mut hs_payload_bytes);
|
||||
client_hello_body.encode(&mut hs_payload_bytes);
|
||||
|
||||
let message = PlainMessage {
|
||||
typ: ContentType::Handshake,
|
||||
|
@ -218,6 +236,65 @@ fn generate_client_hello(sni: &str) -> Vec<u8> {
|
|||
message.into_unencrypted_opaque().encode()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct LengthPayload<T> {
|
||||
buf: Vec<u8>,
|
||||
_boo: PhantomData<fn() -> T>,
|
||||
}
|
||||
|
||||
impl<T> LengthPayload<T> {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
buf: vec![],
|
||||
_boo: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_slice(s: &[u8]) -> Self {
|
||||
Self {
|
||||
buf: s.to_vec(),
|
||||
_boo: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Codec<'_> for LengthPayload<u8> {
|
||||
fn encode(&self, bytes: &mut Vec<u8>) {
|
||||
(self.buf.len() as u8).encode(bytes);
|
||||
bytes.extend_from_slice(&self.buf);
|
||||
}
|
||||
|
||||
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Codec<'_> for LengthPayload<u16> {
|
||||
fn encode(&self, bytes: &mut Vec<u8>) {
|
||||
(self.buf.len() as u16).encode(bytes);
|
||||
bytes.extend_from_slice(&self.buf);
|
||||
}
|
||||
|
||||
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct U24;
|
||||
|
||||
impl Codec<'_> for LengthPayload<U24> {
|
||||
fn encode(&self, bytes: &mut Vec<u8>) {
|
||||
let len = self.buf.len() as u32;
|
||||
bytes.extend_from_slice(&len.to_be_bytes()[1..]);
|
||||
bytes.extend_from_slice(&self.buf);
|
||||
}
|
||||
|
||||
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
fn trim_hostname_trailing_dot_for_sni(dns_name: &DnsName<'_>) -> DnsName<'static> {
|
||||
let dns_name_str = dns_name.as_ref();
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ impl Config {
|
|||
) -> Result<
|
||||
Dst<
|
||||
impl svc::Service<
|
||||
http::Request<tonic::body::BoxBody>,
|
||||
http::Request<tonic::body::Body>,
|
||||
Response = http::Response<control::RspBody>,
|
||||
Error = Error,
|
||||
Future = impl Send,
|
||||
|
|
|
@ -343,12 +343,12 @@ const DEFAULT_INBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::f
|
|||
// TODO(ver) This should be configurable at the load balancer level.
|
||||
const DEFAULT_OUTBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::from_secs(3);
|
||||
|
||||
// By default, we don't limit the number of connections a connection pol may
|
||||
// use, as doing so can severely impact CPU utilization for applications with
|
||||
// many concurrent requests. It's generally preferable to use the MAX_IDLE_AGE
|
||||
// limitations to quickly drop idle connections.
|
||||
const DEFAULT_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = usize::MAX;
|
||||
const DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = usize::MAX;
|
||||
// By default, we limit the number of connections that may be opened per-host.
|
||||
// We pick a high number (10k) that shouldn't interfere with most workloads, but
|
||||
// will prevent issues with our outbound HTTP client from exhausting the file
|
||||
// descriptors available to the process.
|
||||
const DEFAULT_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000;
|
||||
const DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000;
|
||||
|
||||
// These settings limit the number of requests that have not received responses,
|
||||
// including those buffered in the proxy and dispatched to the destination
|
||||
|
@ -1117,11 +1117,11 @@ pub fn parse_backoff<S: Strings>(
|
|||
base: &str,
|
||||
default: ExponentialBackoff,
|
||||
) -> Result<ExponentialBackoff, EnvError> {
|
||||
let min_env = format!("LINKERD2_PROXY_{}_EXP_BACKOFF_MIN", base);
|
||||
let min_env = format!("LINKERD2_PROXY_{base}_EXP_BACKOFF_MIN");
|
||||
let min = parse(strings, &min_env, parse_duration);
|
||||
let max_env = format!("LINKERD2_PROXY_{}_EXP_BACKOFF_MAX", base);
|
||||
let max_env = format!("LINKERD2_PROXY_{base}_EXP_BACKOFF_MAX");
|
||||
let max = parse(strings, &max_env, parse_duration);
|
||||
let jitter_env = format!("LINKERD2_PROXY_{}_EXP_BACKOFF_JITTER", base);
|
||||
let jitter_env = format!("LINKERD2_PROXY_{base}_EXP_BACKOFF_JITTER");
|
||||
let jitter = parse(strings, &jitter_env, parse_number::<f64>);
|
||||
|
||||
match (min?, max?, jitter?) {
|
||||
|
@ -1256,7 +1256,7 @@ pub fn parse_linkerd_identity_config<S: Strings>(
|
|||
Ok((control, certify))
|
||||
}
|
||||
(addr, end_entity_dir, token, _minr, _maxr) => {
|
||||
let s = format!("{0}_ADDR and {0}_NAME", ENV_IDENTITY_SVC_BASE);
|
||||
let s = format!("{ENV_IDENTITY_SVC_BASE}_ADDR and {ENV_IDENTITY_SVC_BASE}_NAME");
|
||||
let svc_env: &str = s.as_str();
|
||||
for (unset, name) in &[
|
||||
(addr.is_none(), svc_env),
|
||||
|
|
|
@ -172,11 +172,11 @@ mod tests {
|
|||
fn test_unit<F: Fn(u64) -> Duration>(unit: &str, to_duration: F) {
|
||||
for v in &[0, 1, 23, 456_789] {
|
||||
let d = to_duration(*v);
|
||||
let text = format!("{}{}", v, unit);
|
||||
assert_eq!(parse_duration(&text), Ok(d), "text=\"{}\"", text);
|
||||
let text = format!("{v}{unit}");
|
||||
assert_eq!(parse_duration(&text), Ok(d), "text=\"{text}\"");
|
||||
|
||||
let text = format!(" {}{}\t", v, unit);
|
||||
assert_eq!(parse_duration(&text), Ok(d), "text=\"{}\"", text);
|
||||
let text = format!(" {v}{unit}\t");
|
||||
assert_eq!(parse_duration(&text), Ok(d), "text=\"{text}\"");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ mod tests {
|
|||
fn p(s: &str) -> Result<Vec<String>, ParseError> {
|
||||
let mut sfxs = parse_dns_suffixes(s)?
|
||||
.into_iter()
|
||||
.map(|s| format!("{}", s))
|
||||
.map(|s| format!("{s}"))
|
||||
.collect::<Vec<_>>();
|
||||
sfxs.sort();
|
||||
Ok(sfxs)
|
||||
|
|
|
@ -4,7 +4,8 @@ pub use linkerd_app_core::identity::{client, Id};
|
|||
use linkerd_app_core::{
|
||||
control, dns,
|
||||
identity::{
|
||||
client::linkerd::Certify, creds, CertMetrics, Credentials, DerX509, Mode, WithCertMetrics,
|
||||
client::linkerd::Certify, creds, watch as watch_identity, CertMetrics, Credentials,
|
||||
DerX509, WithCertMetrics,
|
||||
},
|
||||
metrics::{prom, ControlHttp as ClientMetrics},
|
||||
Result,
|
||||
|
@ -137,8 +138,7 @@ fn watch(
|
|||
watch::Receiver<bool>,
|
||||
)> {
|
||||
let (tx, ready) = watch::channel(false);
|
||||
let (store, receiver) =
|
||||
Mode::default().watch(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
|
||||
let (store, receiver) = watch_identity(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
|
||||
let cred = WithCertMetrics::new(metrics, NotifyReady { store, tx });
|
||||
Ok((cred, receiver, ready))
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ use linkerd_app_core::{
|
|||
config::ServerConfig,
|
||||
control::{ControlAddr, Metrics as ControlMetrics},
|
||||
dns, drain,
|
||||
metrics::{prom, FmtMetrics},
|
||||
metrics::{legacy::FmtMetrics, prom},
|
||||
serve,
|
||||
svc::Param,
|
||||
tls_info,
|
||||
transport::{addrs::*, listen::Bind},
|
||||
Error, ProxyRuntime,
|
||||
};
|
||||
|
@ -251,9 +252,6 @@ impl Config {
|
|||
export_hostname_labels,
|
||||
);
|
||||
|
||||
let dst_addr = dst.addr.clone();
|
||||
// registry.sub_registry_with_prefix("gateway"),
|
||||
|
||||
let gateway = gateway::Gateway::new(gateway, inbound.clone(), outbound.clone()).stack(
|
||||
dst.resolve.clone(),
|
||||
dst.profiles.clone(),
|
||||
|
@ -304,6 +302,7 @@ impl Config {
|
|||
error!(%error, "Failed to register process metrics");
|
||||
}
|
||||
registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric());
|
||||
registry.register("rustls_info", "Proxy TLS info", tls_info::metric());
|
||||
|
||||
let admin = {
|
||||
let identity = identity.receiver().server();
|
||||
|
@ -330,7 +329,7 @@ impl Config {
|
|||
|
||||
Ok(App {
|
||||
admin,
|
||||
dst: dst_addr,
|
||||
dst: dst.addr,
|
||||
drain: drain_tx,
|
||||
identity,
|
||||
inbound_addr,
|
||||
|
@ -398,7 +397,7 @@ impl App {
|
|||
|
||||
pub fn tracing_addr(&self) -> Option<&ControlAddr> {
|
||||
match self.trace_collector {
|
||||
trace_collector::TraceCollector::Disabled { .. } => None,
|
||||
trace_collector::TraceCollector::Disabled => None,
|
||||
crate::trace_collector::TraceCollector::Enabled(ref oc) => Some(&oc.addr),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ impl Config {
|
|||
) -> Result<
|
||||
Policy<
|
||||
impl svc::Service<
|
||||
http::Request<tonic::body::BoxBody>,
|
||||
http::Request<tonic::body::Body>,
|
||||
Response = http::Response<control::RspBody>,
|
||||
Error = Error,
|
||||
Future = impl Send,
|
||||
|
|
|
@ -6,7 +6,7 @@ use linkerd_opencensus::{self as opencensus, metrics, proto};
|
|||
use std::{collections::HashMap, time::SystemTime};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(super) fn create_collector<S>(
|
||||
|
@ -18,7 +18,7 @@ pub(super) fn create_collector<S>(
|
|||
legacy_metrics: metrics::Registry,
|
||||
) -> EnabledCollector
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
|
|
|
@ -15,7 +15,7 @@ use std::{
|
|||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(super) struct OtelCollectorAttributes {
|
||||
|
@ -31,7 +31,7 @@ pub(super) fn create_collector<S>(
|
|||
legacy_metrics: metrics::Registry,
|
||||
) -> EnabledCollector
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
|
|
|
@ -70,8 +70,7 @@ impl<E: fmt::Debug> tower::Service<E> for NoRawTcp {
|
|||
|
||||
fn call(&mut self, endpoint: E) -> Self::Future {
|
||||
panic!(
|
||||
"no raw TCP connections expected in this test, but tried to connect to {:?}",
|
||||
endpoint
|
||||
"no raw TCP connections expected in this test, but tried to connect to {endpoint:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -279,8 +279,7 @@ impl<T: Param<ConcreteAddr>, E> tower::Service<T> for NoDst<E> {
|
|||
fn call(&mut self, target: T) -> Self::Future {
|
||||
let ConcreteAddr(addr) = target.param();
|
||||
panic!(
|
||||
"no destination resolutions were expected in this test, but tried to resolve {}",
|
||||
addr
|
||||
"no destination resolutions were expected in this test, but tried to resolve {addr}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -296,10 +295,7 @@ impl<T: Param<profiles::LookupAddr>> tower::Service<T> for NoProfiles {
|
|||
|
||||
fn call(&mut self, target: T) -> Self::Future {
|
||||
let profiles::LookupAddr(addr) = target.param();
|
||||
panic!(
|
||||
"no profile resolutions were expected in this test, but tried to resolve {}",
|
||||
addr
|
||||
);
|
||||
panic!("no profile resolutions were expected in this test, but tried to resolve {addr}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ ahash = "0.8"
|
|||
linkerd-stack = { path = "../stack" }
|
||||
parking_lot = "0.12"
|
||||
rand = { version = "0.9", features = ["small_rng"] }
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
|
@ -7,14 +7,15 @@ edition = { workspace = true }
|
|||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hickory-resolver = "0.25.2"
|
||||
linkerd-dns-name = { path = "./name" }
|
||||
linkerd-error = { path = "../error" }
|
||||
prometheus-client = { workspace = true }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
linkerd-error = { path = "../error" }
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] }
|
||||
|
|
|
@ -312,7 +312,7 @@ mod tests {
|
|||
for &(left, right, expected_result) in CASES {
|
||||
let l = left.parse::<Name>().unwrap();
|
||||
let r = right.parse::<Name>().unwrap();
|
||||
assert_eq!(l == r, expected_result, "{:?} vs {:?}", l, r);
|
||||
assert_eq!(l == r, expected_result, "{l:?} vs {r:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ mod tests {
|
|||
];
|
||||
for (host, expected_result) in cases {
|
||||
let dns_name = host.parse::<Name>().unwrap();
|
||||
assert_eq!(dns_name.is_localhost(), *expected_result, "{:?}", dns_name)
|
||||
assert_eq!(dns_name.is_localhost(), *expected_result, "{dns_name:?}")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,12 +343,11 @@ mod tests {
|
|||
for (host, expected_result) in cases {
|
||||
let dns_name = host
|
||||
.parse::<Name>()
|
||||
.unwrap_or_else(|_| panic!("'{}' was invalid", host));
|
||||
.unwrap_or_else(|_| panic!("'{host}' was invalid"));
|
||||
assert_eq!(
|
||||
dns_name.without_trailing_dot(),
|
||||
*expected_result,
|
||||
"{:?}",
|
||||
dns_name
|
||||
"{dns_name:?}"
|
||||
)
|
||||
}
|
||||
assert!(".".parse::<Name>().is_err());
|
||||
|
|
|
@ -338,7 +338,7 @@ mod tests {
|
|||
] {
|
||||
let n = Name::from_str(name).unwrap();
|
||||
let s = Suffix::from_str(suffix).unwrap();
|
||||
assert!(s.contains(&n), "{} should contain {}", suffix, name);
|
||||
assert!(s.contains(&n), "{suffix} should contain {name}");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ mod tests {
|
|||
] {
|
||||
let n = Name::from_str(name).unwrap();
|
||||
let s = Suffix::from_str(suffix).unwrap();
|
||||
assert!(!s.contains(&n), "{} should not contain {}", suffix, name);
|
||||
assert!(!s.contains(&n), "{suffix} should not contain {name}");
|
||||
}
|
||||
|
||||
assert!(Suffix::from_str("").is_err(), "suffix must not be empty");
|
||||
|
|
|
@ -15,10 +15,10 @@ tokio = { version = "1", default-features = false }
|
|||
tracing = { workspace = true }
|
||||
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
tower-test = { workspace = true }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
|
|
|
@ -2,7 +2,7 @@ use super::{ClassifyEos, ClassifyResponse};
|
|||
use futures::{prelude::*, ready};
|
||||
use http_body::Frame;
|
||||
use linkerd_error::Error;
|
||||
use linkerd_stack::{layer, ExtractParam, NewService, Service};
|
||||
use linkerd_stack::Service;
|
||||
use pin_project::{pin_project, pinned_drop};
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
|
@ -12,18 +12,6 @@ use std::{
|
|||
};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// Constructs new [`BroadcastClassification`] services.
|
||||
///
|
||||
/// `X` is an [`ExtractParam`] implementation that extracts a [`Tx`] from each
|
||||
/// target. The [`Tx`] is used to broadcast the classification of each response
|
||||
/// from the constructed [`BroadcastClassification`] service.
|
||||
#[derive(Debug)]
|
||||
pub struct NewBroadcastClassification<C, X, N> {
|
||||
inner: N,
|
||||
extract: X,
|
||||
_marker: PhantomData<fn() -> C>,
|
||||
}
|
||||
|
||||
/// A HTTP `Service` that applies a [`ClassifyResponse`] to each response, and
|
||||
/// broadcasts the classification over a [`mpsc`] channel.
|
||||
#[derive(Debug)]
|
||||
|
@ -33,14 +21,6 @@ pub struct BroadcastClassification<C: ClassifyResponse, S> {
|
|||
_marker: PhantomData<fn() -> C>,
|
||||
}
|
||||
|
||||
/// A handle to a [`mpsc`] channel over which response classifications are
|
||||
/// broadcasted.
|
||||
///
|
||||
/// This is extracted from a target value by [`NewBroadcastClassification`] when
|
||||
/// constructing a [`BroadcastClassification`] service.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Tx<C>(pub mpsc::Sender<C>);
|
||||
|
||||
#[pin_project]
|
||||
pub struct ResponseFuture<C: ClassifyResponse, B, F> {
|
||||
#[pin]
|
||||
|
@ -62,59 +42,6 @@ struct State<C, T> {
|
|||
tx: mpsc::Sender<T>,
|
||||
}
|
||||
|
||||
// === impl NewBroadcastClassification ===
|
||||
|
||||
impl<C, X: Clone, N> NewBroadcastClassification<C, X, N> {
|
||||
pub fn new(extract: X, inner: N) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
extract,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
|
||||
/// [`NewService`]s, using the provided [`ExtractParam`] implementation to
|
||||
/// extract a classification [`Tx`] from the target.
|
||||
pub fn layer_via(extract: X) -> impl layer::Layer<N, Service = Self> + Clone {
|
||||
layer::mk(move |inner| Self::new(extract.clone(), inner))
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, N> NewBroadcastClassification<C, (), N> {
|
||||
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
|
||||
/// [`NewService`]s when the target type implements
|
||||
/// [`linkerd_stack::Param`]`<`[`Tx`]`>`.
|
||||
pub fn layer() -> impl layer::Layer<N, Service = Self> + Clone {
|
||||
Self::layer_via(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, C, X, N> NewService<T> for NewBroadcastClassification<C, X, N>
|
||||
where
|
||||
C: ClassifyResponse,
|
||||
X: ExtractParam<Tx<C::Class>, T>,
|
||||
N: NewService<T>,
|
||||
{
|
||||
type Service = BroadcastClassification<C, N::Service>;
|
||||
|
||||
fn new_service(&self, target: T) -> Self::Service {
|
||||
let Tx(tx) = self.extract.extract_param(&target);
|
||||
let inner = self.inner.new_service(target);
|
||||
BroadcastClassification::new(tx, inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, X: Clone, N: Clone> Clone for NewBroadcastClassification<C, X, N> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
extract: self.extract.clone(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// === impl BroadcastClassification ===
|
||||
|
||||
impl<C: ClassifyResponse, S> BroadcastClassification<C, S> {
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
use linkerd_error::Error;
|
||||
|
||||
pub use self::{
|
||||
channel::{BroadcastClassification, NewBroadcastClassification, Tx},
|
||||
channel::BroadcastClassification,
|
||||
gate::{NewClassifyGate, NewClassifyGateSet},
|
||||
insert::{InsertClassifyResponse, NewInsertClassifyResponse},
|
||||
};
|
||||
|
||||
pub mod channel;
|
||||
mod channel;
|
||||
pub mod gate;
|
||||
mod insert;
|
||||
|
||||
|
|
|
@ -10,11 +10,9 @@ publish = { workspace = true }
|
|||
test-util = []
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#![forbid(unsafe_code)]
|
||||
|
||||
pub use self::{requests::Requests, retries::Retries};
|
||||
use linkerd_metrics::SharedStore;
|
||||
use linkerd_metrics::legacy::SharedStore;
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash, time::Duration};
|
||||
|
||||
|
|
|
@ -4,7 +4,10 @@ mod service;
|
|||
pub use self::service::{NewHttpMetrics, ResponseBody};
|
||||
use super::Report;
|
||||
use linkerd_http_classify::ClassifyResponse;
|
||||
use linkerd_metrics::{latency, Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics};
|
||||
use linkerd_metrics::{
|
||||
latency,
|
||||
legacy::{Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics},
|
||||
};
|
||||
use linkerd_stack::{self as svc, layer};
|
||||
use std::{collections::HashMap, fmt::Debug, hash::Hash};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
@ -146,7 +149,7 @@ impl ClassMetrics {
|
|||
mod tests {
|
||||
#[test]
|
||||
fn expiry() {
|
||||
use linkerd_metrics::FmtLabels;
|
||||
use linkerd_metrics::legacy::FmtLabels;
|
||||
use std::fmt;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use super::{ClassMetrics, Metrics, StatusMetrics};
|
||||
use crate::{Prefixed, Report};
|
||||
use linkerd_metrics::{
|
||||
latency, Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store,
|
||||
latency,
|
||||
legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store},
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash};
|
||||
|
|
|
@ -3,7 +3,7 @@ use futures::{ready, TryFuture};
|
|||
use http_body::{Body, Frame};
|
||||
use linkerd_error::Error;
|
||||
use linkerd_http_classify::{ClassifyEos, ClassifyResponse};
|
||||
use linkerd_metrics::NewMetrics;
|
||||
use linkerd_metrics::legacy::NewMetrics;
|
||||
use linkerd_stack::Proxy;
|
||||
use parking_lot::Mutex;
|
||||
use pin_project::{pin_project, pinned_drop};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use super::{Prefixed, Registry, Report};
|
||||
use linkerd_metrics::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
|
||||
use linkerd_metrics::legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash, sync::Arc};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue