Compare commits

..

No commits in common. "main" and "release/v2.308.0" have entirely different histories.

155 changed files with 3295 additions and 1399 deletions

View File

@ -3,7 +3,7 @@
"build": { "build": {
"dockerfile": "Dockerfile", "dockerfile": "Dockerfile",
"args": { "args": {
"DEV_VERSION": "v47", "DEV_VERSION": "v46",
"http_proxy": "${localEnv:http_proxy}", "http_proxy": "${localEnv:http_proxy}",
"https_proxy": "${localEnv:https_proxy}" "https_proxy": "${localEnv:https_proxy}"
} }

View File

@ -22,13 +22,13 @@ permissions:
jobs: jobs:
build: build:
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v47-rust container: ghcr.io/linkerd/dev:v46-rust
timeout-minutes: 20 timeout-minutes: 20
continue-on-error: true continue-on-error: true
steps: steps:
- run: rustup toolchain install --profile=minimal beta - run: rustup toolchain install --profile=minimal beta
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: just toolchain=beta fetch - run: just toolchain=beta fetch
- run: just toolchain=beta build - run: just toolchain=beta build

View File

@ -21,9 +21,9 @@ env:
jobs: jobs:
meta: meta:
timeout-minutes: 5 timeout-minutes: 5
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- id: changed - id: changed
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with: with:
@ -40,15 +40,15 @@ jobs:
codecov: codecov:
needs: meta needs: meta
if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || needs.meta.outputs.any_changed == 'true' if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || needs.meta.outputs.any_changed == 'true'
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 30 timeout-minutes: 30
container: container:
image: docker://ghcr.io/linkerd/dev:v47-rust image: docker://ghcr.io/linkerd/dev:v46-rust
options: --security-opt seccomp=unconfined # 🤷 options: --security-opt seccomp=unconfined # 🤷
env: env:
CXX: "/usr/bin/clang++-19" CXX: "/usr/bin/clang++-19"
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run - run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml - run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml

View File

@ -26,11 +26,11 @@ permissions:
jobs: jobs:
list-changed: list-changed:
timeout-minutes: 3 timeout-minutes: 3
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: docker://rust:1.88.0 container: docker://rust:1.88.0
steps: steps:
- run: apt update && apt install -y jo - run: apt update && apt install -y jo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
id: changed-files id: changed-files
@ -47,7 +47,7 @@ jobs:
build: build:
needs: [list-changed] needs: [list-changed]
timeout-minutes: 40 timeout-minutes: 40
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: docker://rust:1.88.0 container: docker://rust:1.88.0
strategy: strategy:
matrix: matrix:
@ -55,7 +55,7 @@ jobs:
steps: steps:
- run: rustup toolchain add nightly - run: rustup toolchain add nightly
- run: cargo install cargo-fuzz - run: cargo install cargo-fuzz
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- working-directory: ${{matrix.dir}} - working-directory: ${{matrix.dir}}
run: cargo +nightly fetch run: cargo +nightly fetch

View File

@ -12,9 +12,9 @@ on:
jobs: jobs:
markdownlint: markdownlint:
timeout-minutes: 5 timeout-minutes: 5
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e - uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e
with: with:
globs: "**/*.md" globs: "**/*.md"

View File

@ -22,13 +22,13 @@ permissions:
jobs: jobs:
build: build:
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v47-rust container: ghcr.io/linkerd/dev:v46-rust
timeout-minutes: 20 timeout-minutes: 20
continue-on-error: true continue-on-error: true
steps: steps:
- run: rustup toolchain install --profile=minimal nightly - run: rustup toolchain install --profile=minimal nightly
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: just toolchain=nightly fetch - run: just toolchain=nightly fetch
- run: just toolchain=nightly profile=release build - run: just toolchain=nightly profile=release build

View File

@ -14,9 +14,9 @@ concurrency:
jobs: jobs:
meta: meta:
timeout-minutes: 5 timeout-minutes: 5
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- id: build - id: build
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with: with:
@ -57,7 +57,7 @@ jobs:
info: info:
timeout-minutes: 3 timeout-minutes: 3
needs: meta needs: meta
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- name: Info - name: Info
run: | run: |
@ -74,24 +74,24 @@ jobs:
actions: actions:
needs: meta needs: meta
if: needs.meta.outputs.actions_changed == 'true' if: needs.meta.outputs.actions_changed == 'true'
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: linkerd/dev/actions/setup-tools@v47 - uses: linkerd/dev/actions/setup-tools@v46
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: just action-lint - run: just action-lint
- run: just action-dev-check - run: just action-dev-check
rust: rust:
needs: meta needs: meta
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true' if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v47-rust container: ghcr.io/linkerd/dev:v46-rust
permissions: permissions:
contents: read contents: read
timeout-minutes: 20 timeout-minutes: 20
steps: steps:
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: just fetch - run: just fetch
- run: cargo deny --all-features check bans licenses sources - run: cargo deny --all-features check bans licenses sources
@ -107,14 +107,14 @@ jobs:
needs: meta needs: meta
if: needs.meta.outputs.cargo_changed == 'true' if: needs.meta.outputs.cargo_changed == 'true'
timeout-minutes: 20 timeout-minutes: 20
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v47-rust container: ghcr.io/linkerd/dev:v46-rust
strategy: strategy:
matrix: matrix:
crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }} crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }}
steps: steps:
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
- run: just fetch - run: just fetch
- run: just check-crate ${{ matrix.crate }} - run: just check-crate ${{ matrix.crate }}
@ -123,11 +123,11 @@ jobs:
needs: meta needs: meta
if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true' if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true'
timeout-minutes: 20 timeout-minutes: 20
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
env: env:
WAIT_TIMEOUT: 2m WAIT_TIMEOUT: 2m
steps: steps:
- uses: linkerd/dev/actions/setup-tools@v47 - uses: linkerd/dev/actions/setup-tools@v46
- name: scurl https://run.linkerd.io/install-edge | sh - name: scurl https://run.linkerd.io/install-edge | sh
run: | run: |
scurl https://run.linkerd.io/install-edge | sh scurl https://run.linkerd.io/install-edge | sh
@ -136,7 +136,7 @@ jobs:
tag=$(linkerd version --client --short) tag=$(linkerd version --client --short)
echo "linkerd $tag" echo "linkerd $tag"
echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV" echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV"
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: just docker - run: just docker
- run: just k3d-create - run: just k3d-create
- run: just k3d-load-linkerd - run: just k3d-load-linkerd
@ -149,7 +149,7 @@ jobs:
timeout-minutes: 3 timeout-minutes: 3
needs: [meta, actions, rust, rust-crates, linkerd-install] needs: [meta, actions, rust, rust-crates, linkerd-install]
if: always() if: always()
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
permissions: permissions:
contents: write contents: write
@ -168,7 +168,7 @@ jobs:
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
run: exit 1 run: exit 1
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true' if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
- name: "Merge dependabot changes" - name: "Merge dependabot changes"
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true' if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'

View File

@ -13,7 +13,7 @@ concurrency:
jobs: jobs:
last-release: last-release:
if: github.repository == 'linkerd/linkerd2-proxy' # Don't run this in forks. if: github.repository == 'linkerd/linkerd2-proxy' # Don't run this in forks.
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 5 timeout-minutes: 5
env: env:
GH_REPO: ${{ github.repository }} GH_REPO: ${{ github.repository }}
@ -41,10 +41,10 @@ jobs:
last-commit: last-commit:
needs: last-release needs: last-release
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 5 timeout-minutes: 5
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: Check if the most recent commit is after the last release - name: Check if the most recent commit is after the last release
id: recency id: recency
env: env:
@ -62,7 +62,7 @@ jobs:
trigger-release: trigger-release:
needs: [last-release, last-commit] needs: [last-release, last-commit]
if: needs.last-release.outputs.recent == 'false' && needs.last-commit.outputs.after-release == 'true' if: needs.last-release.outputs.recent == 'false' && needs.last-commit.outputs.after-release == 'true'
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 5 timeout-minutes: 5
permissions: permissions:
actions: write actions: write

View File

@ -46,7 +46,6 @@ on:
default: true default: true
env: env:
CARGO: "cargo auditable"
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10 CARGO_NET_RETRY: 10
RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable" RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable"
@ -59,25 +58,9 @@ concurrency:
jobs: jobs:
meta: meta:
timeout-minutes: 5 timeout-minutes: 5
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - id: meta
if: github.event_name == 'pull_request'
- id: workflow
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
.github/workflows/release.yml
- id: build
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
justfile
Cargo.toml
- id: version
env: env:
VERSION: ${{ inputs.version }} VERSION: ${{ inputs.version }}
shell: bash shell: bash
@ -85,45 +68,47 @@ jobs:
set -euo pipefail set -euo pipefail
shopt -s extglob shopt -s extglob
if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then
echo version="0.0.0-test.${GITHUB_SHA:0:7}" >> "$GITHUB_OUTPUT" echo version="0.0.0-test.${GITHUB_SHA:0:7}"
echo archs='["amd64"]'
echo oses='["linux"]'
exit 0 exit 0
fi fi >> "$GITHUB_OUTPUT"
if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then
echo "Invalid version: $VERSION" >&2 echo "Invalid version: $VERSION" >&2
exit 1 exit 1
fi fi
echo version="${VERSION#v}" >> "$GITHUB_OUTPUT" ( echo version="${VERSION#v}"
echo archs='["amd64", "arm64", "arm"]'
- id: platform
shell: bash
env:
WORKFLOW_CHANGED: ${{ steps.workflow.outputs.any_changed }}
run: |
if [[ "$GITHUB_EVENT_NAME" == pull_request && "$WORKFLOW_CHANGED" != 'true' ]]; then
( echo archs='["amd64"]'
echo oses='["linux"]' ) >> "$GITHUB_OUTPUT"
exit 0
fi
( echo archs='["amd64", "arm64"]'
echo oses='["linux", "windows"]' echo oses='["linux", "windows"]'
) >> "$GITHUB_OUTPUT" ) >> "$GITHUB_OUTPUT"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
if: github.event_name == 'pull_request'
- id: changed
if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
with:
files: |
.github/workflows/release.yml
justfile
Cargo.toml
outputs: outputs:
archs: ${{ steps.platform.outputs.archs }} archs: ${{ steps.meta.outputs.archs }}
oses: ${{ steps.platform.outputs.oses }} oses: ${{ steps.meta.outputs.oses }}
version: ${{ steps.version.outputs.version }} version: ${{ steps.meta.outputs.version }}
package: ${{ github.event_name == 'workflow_dispatch' || steps.build.outputs.any_changed == 'true' || steps.workflow.outputs.any_changed == 'true' }} package: ${{ github.event_name == 'workflow_dispatch' || steps.changed.outputs.any_changed == 'true' }}
profile: ${{ inputs.profile || 'release' }} profile: ${{ inputs.profile || 'release' }}
publish: ${{ inputs.publish }} publish: ${{ inputs.publish }}
ref: ${{ inputs.ref || github.sha }} ref: ${{ inputs.ref || github.sha }}
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.version.outputs.version }}" tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.meta.outputs.version }}"
prerelease: ${{ inputs.prerelease }} prerelease: ${{ inputs.prerelease }}
draft: ${{ inputs.draft }} draft: ${{ inputs.draft }}
latest: ${{ inputs.latest }} latest: ${{ inputs.latest }}
info: info:
needs: meta needs: meta
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 3 timeout-minutes: 3
steps: steps:
- name: Inputs - name: Inputs
@ -149,13 +134,15 @@ jobs:
exclude: exclude:
- os: windows - os: windows
arch: arm64 arch: arm64
- os: windows
arch: arm
# If we're not actually building on a release tag, don't short-circuit on # If we're not actually building on a release tag, don't short-circuit on
# errors. This helps us know whether a failure is platform-specific. # errors. This helps us know whether a failure is platform-specific.
continue-on-error: ${{ needs.meta.outputs.publish != 'true' }} continue-on-error: ${{ needs.meta.outputs.publish != 'true' }}
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 40 timeout-minutes: 40
container: docker://ghcr.io/linkerd/dev:v47-rust-musl container: docker://ghcr.io/linkerd/dev:v46-rust-musl
env: env:
LINKERD2_PROXY_VENDOR: ${{ github.repository_owner }} LINKERD2_PROXY_VENDOR: ${{ github.repository_owner }}
LINKERD2_PROXY_VERSION: ${{ needs.meta.outputs.version }} LINKERD2_PROXY_VERSION: ${{ needs.meta.outputs.version }}
@ -163,19 +150,15 @@ jobs:
# TODO: add to dev image # TODO: add to dev image
- name: Install MiniGW - name: Install MiniGW
if: matrix.os == 'windows' if: matrix.os == 'windows'
run: apt-get update && apt-get install -y mingw-w64 run: apt-get update && apt-get install mingw-w64 -y
- name: Install cross compilation toolchain
if: matrix.arch == 'arm64'
run: apt-get update && apt-get install -y binutils-aarch64-linux-gnu
- name: Configure git - name: Configure git
run: git config --global --add safe.directory "$PWD" # actions/runner#2033 run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with: with:
ref: ${{ needs.meta.outputs.ref }} ref: ${{ needs.meta.outputs.ref }}
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
with: with:
key: ${{ matrix.os }}-${{ matrix.arch }} key: ${{ matrix.arch }}
- run: just fetch - run: just fetch
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build
@ -187,7 +170,7 @@ jobs:
publish: publish:
needs: [meta, package] needs: [meta, package]
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
timeout-minutes: 5 timeout-minutes: 5
permissions: permissions:
actions: write actions: write
@ -204,7 +187,7 @@ jobs:
git config --global user.name "$GITHUB_USERNAME" git config --global user.name "$GITHUB_USERNAME"
git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com
# Tag the release. # Tag the release.
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with: with:
token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }} token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }}
ref: ${{ needs.meta.outputs.ref }} ref: ${{ needs.meta.outputs.ref }}
@ -242,7 +225,7 @@ jobs:
needs: publish needs: publish
if: always() if: always()
timeout-minutes: 3 timeout-minutes: 3
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- name: Results - name: Results
run: | run: |

View File

@ -13,8 +13,8 @@ on:
jobs: jobs:
sh-lint: sh-lint:
timeout-minutes: 5 timeout-minutes: 5
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: linkerd/dev/actions/setup-tools@v47 - uses: linkerd/dev/actions/setup-tools@v46
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: just sh-lint - run: just sh-lint

View File

@ -13,10 +13,10 @@ permissions:
jobs: jobs:
devcontainer: devcontainer:
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
container: ghcr.io/linkerd/dev:v47-rust container: ghcr.io/linkerd/dev:v46-rust
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033
- run: | - run: |
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"' VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
@ -35,10 +35,10 @@ jobs:
workflows: workflows:
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }} runs-on: ubuntu-24.04
steps: steps:
- uses: linkerd/dev/actions/setup-tools@v47 - uses: linkerd/dev/actions/setup-tools@v46
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- shell: bash - shell: bash
run: | run: |
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"' VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'

File diff suppressed because it is too large Load Diff

View File

@ -42,6 +42,8 @@ members = [
"linkerd/idle-cache", "linkerd/idle-cache",
"linkerd/io", "linkerd/io",
"linkerd/meshtls", "linkerd/meshtls",
"linkerd/meshtls/boring",
"linkerd/meshtls/rustls",
"linkerd/meshtls/verifier", "linkerd/meshtls/verifier",
"linkerd/metrics", "linkerd/metrics",
"linkerd/mock/http-body", "linkerd/mock/http-body",
@ -69,7 +71,6 @@ members = [
"linkerd/reconnect", "linkerd/reconnect",
"linkerd/retry", "linkerd/retry",
"linkerd/router", "linkerd/router",
"linkerd/rustls",
"linkerd/service-profiles", "linkerd/service-profiles",
"linkerd/signal", "linkerd/signal",
"linkerd/stack", "linkerd/stack",
@ -114,10 +115,11 @@ prost = { version = "0.13" }
prost-build = { version = "0.13", default-features = false } prost-build = { version = "0.13", default-features = false }
prost-types = { version = "0.13" } prost-types = { version = "0.13" }
tokio-rustls = { version = "0.26", default-features = false, features = [ tokio-rustls = { version = "0.26", default-features = false, features = [
"ring",
"logging", "logging",
] } ] }
tonic = { version = "0.13", default-features = false } tonic = { version = "0.12", default-features = false }
tonic-build = { version = "0.13", default-features = false } tonic-build = { version = "0.12", default-features = false }
tower = { version = "0.5", default-features = false } tower = { version = "0.5", default-features = false }
tower-service = { version = "0.3" } tower-service = { version = "0.3" }
tower-test = { version = "0.4" } tower-test = { version = "0.4" }
@ -134,4 +136,4 @@ default-features = false
features = ["tokio", "tracing"] features = ["tokio", "tracing"]
[workspace.dependencies.linkerd2-proxy-api] [workspace.dependencies.linkerd2-proxy-api]
version = "0.17.0" version = "0.16.0"

View File

@ -3,7 +3,7 @@
# This is intended **DEVELOPMENT ONLY**, i.e. so that proxy developers can # This is intended **DEVELOPMENT ONLY**, i.e. so that proxy developers can
# easily test the proxy in the context of the larger `linkerd2` project. # easily test the proxy in the context of the larger `linkerd2` project.
ARG RUST_IMAGE=ghcr.io/linkerd/dev:v47-rust ARG RUST_IMAGE=ghcr.io/linkerd/dev:v46-rust
# Use an arbitrary ~recent edge release image to get the proxy # Use an arbitrary ~recent edge release image to get the proxy
# identity-initializing and linkerd-await wrappers. # identity-initializing and linkerd-await wrappers.
@ -14,16 +14,11 @@ FROM $LINKERD2_IMAGE as linkerd2
FROM --platform=$BUILDPLATFORM $RUST_IMAGE as fetch FROM --platform=$BUILDPLATFORM $RUST_IMAGE as fetch
ARG PROXY_FEATURES="" ARG PROXY_FEATURES=""
ARG TARGETARCH="amd64"
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y time && \ apt-get install -y time && \
if [[ "$PROXY_FEATURES" =~ .*meshtls-boring.* ]] ; then \ if [[ "$PROXY_FEATURES" =~ .*meshtls-boring.* ]] ; then \
apt-get install -y golang ; \ apt-get install -y golang ; \
fi && \ fi && \
case "$TARGETARCH" in \
amd64) true ;; \
arm64) apt-get install --no-install-recommends -y binutils-aarch64-linux-gnu ;; \
esac && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
ENV CARGO_NET_RETRY=10 ENV CARGO_NET_RETRY=10
@ -38,6 +33,7 @@ RUN --mount=type=cache,id=cargo,target=/usr/local/cargo/registry \
FROM fetch as build FROM fetch as build
ENV CARGO_INCREMENTAL=0 ENV CARGO_INCREMENTAL=0
ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable" ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable"
ARG TARGETARCH="amd64"
ARG PROFILE="release" ARG PROFILE="release"
ARG LINKERD2_PROXY_VERSION="" ARG LINKERD2_PROXY_VERSION=""
ARG LINKERD2_PROXY_VENDOR="" ARG LINKERD2_PROXY_VENDOR=""

View File

@ -2,6 +2,7 @@
targets = [ targets = [
{ triple = "x86_64-unknown-linux-gnu" }, { triple = "x86_64-unknown-linux-gnu" },
{ triple = "aarch64-unknown-linux-gnu" }, { triple = "aarch64-unknown-linux-gnu" },
{ triple = "armv7-unknown-linux-gnu" },
] ]
[advisories] [advisories]
@ -23,6 +24,11 @@ allow = [
private = { ignore = true } private = { ignore = true }
confidence-threshold = 0.8 confidence-threshold = 0.8
exceptions = [ exceptions = [
{ allow = [
"ISC",
"MIT",
"OpenSSL",
], name = "ring", version = "*" },
{ allow = [ { allow = [
"ISC", "ISC",
"OpenSSL", "OpenSSL",
@ -33,6 +39,14 @@ exceptions = [
], name = "aws-lc-fips-sys", version = "*" }, ], name = "aws-lc-fips-sys", version = "*" },
] ]
[[licenses.clarify]]
name = "ring"
version = "*"
expression = "MIT AND ISC AND OpenSSL"
license-files = [
{ path = "LICENSE", hash = 0xbd0eed23 },
]
[bans] [bans]
multiple-versions = "deny" multiple-versions = "deny"
# Wildcard dependencies are used for all workspace-local crates. # Wildcard dependencies are used for all workspace-local crates.
@ -42,8 +56,6 @@ deny = [
{ name = "rustls", wrappers = ["tokio-rustls"] }, { name = "rustls", wrappers = ["tokio-rustls"] },
# rustls-webpki should be used instead. # rustls-webpki should be used instead.
{ name = "webpki" }, { name = "webpki" },
# aws-lc-rs should be used instead.
{ name = "ring" }
] ]
skip = [ skip = [
# `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64` # `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64`
@ -64,8 +76,6 @@ skip-tree = [
{ name = "pprof" }, { name = "pprof" },
# aws-lc-rs uses a slightly outdated version of bindgen # aws-lc-rs uses a slightly outdated version of bindgen
{ name = "bindgen", version = "0.69.5" }, { name = "bindgen", version = "0.69.5" },
# socket v0.6 is still propagating through the ecosystem
{ name = "socket2", version = "0.5" },
] ]
[sources] [sources]

View File

@ -18,10 +18,6 @@ features := ""
export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`) export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`)
export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`) export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`)
# TODO: these variables will be included in dev v48
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
# The version name to use for packages. # The version name to use for packages.
package_version := "v" + LINKERD2_PROXY_VERSION package_version := "v" + LINKERD2_PROXY_VERSION
@ -30,7 +26,7 @@ docker-repo := "localhost/linkerd/proxy"
docker-tag := `git rev-parse --abbrev-ref HEAD | sed 's|/|.|g'` + "." + `git rev-parse --short HEAD` docker-tag := `git rev-parse --abbrev-ref HEAD | sed 's|/|.|g'` + "." + `git rev-parse --short HEAD`
docker-image := docker-repo + ":" + docker-tag docker-image := docker-repo + ":" + docker-tag
# The architecture name to use for packages. Either 'amd64' or 'arm64'. # The architecture name to use for packages. Either 'amd64', 'arm64', or 'arm'.
arch := "amd64" arch := "amd64"
# The OS name to use for packages. Either 'linux' or 'windows'. # The OS name to use for packages. Either 'linux' or 'windows'.
os := "linux" os := "linux"
@ -43,6 +39,8 @@ _target := if os + '-' + arch == "linux-amd64" {
"x86_64-unknown-linux-" + libc "x86_64-unknown-linux-" + libc
} else if os + '-' + arch == "linux-arm64" { } else if os + '-' + arch == "linux-arm64" {
"aarch64-unknown-linux-" + libc "aarch64-unknown-linux-" + libc
} else if os + '-' + arch == "linux-arm" {
"armv7-unknown-linux-" + libc + "eabihf"
} else if os + '-' + arch == "windows-amd64" { } else if os + '-' + arch == "windows-amd64" {
"x86_64-pc-windows-" + libc "x86_64-pc-windows-" + libc
} else { } else {
@ -141,7 +139,7 @@ _strip:
_package_bin := _package_dir / "bin" / "linkerd2-proxy" _package_bin := _package_dir / "bin" / "linkerd2-proxy"
# XXX aarch64-musl builds do not enable PIE, so we use target-specific # XXX {aarch64,arm}-musl builds do not enable PIE, so we use target-specific
# files to document those differences. # files to document those differences.
_expected_checksec := '.checksec' / arch + '-' + libc + '.json' _expected_checksec := '.checksec' / arch + '-' + libc + '.json'

View File

@ -13,7 +13,7 @@
use futures::future::{self, TryFutureExt}; use futures::future::{self, TryFutureExt};
use http::StatusCode; use http::StatusCode;
use linkerd_app_core::{ use linkerd_app_core::{
metrics::{self as metrics, legacy::FmtMetrics}, metrics::{self as metrics, FmtMetrics},
proxy::http::{Body, BoxBody, ClientHandle, Request, Response}, proxy::http::{Body, BoxBody, ClientHandle, Request, Response},
trace, Error, Result, trace, Error, Result,
}; };
@ -32,7 +32,7 @@ pub use self::readiness::{Latch, Readiness};
#[derive(Clone)] #[derive(Clone)]
pub struct Admin<M> { pub struct Admin<M> {
metrics: metrics::legacy::Serve<M>, metrics: metrics::Serve<M>,
tracing: trace::Handle, tracing: trace::Handle,
ready: Readiness, ready: Readiness,
shutdown_tx: mpsc::UnboundedSender<()>, shutdown_tx: mpsc::UnboundedSender<()>,
@ -52,7 +52,7 @@ impl<M> Admin<M> {
tracing: trace::Handle, tracing: trace::Handle,
) -> Self { ) -> Self {
Self { Self {
metrics: metrics::legacy::Serve::new(metrics), metrics: metrics::Serve::new(metrics),
ready, ready,
shutdown_tx, shutdown_tx,
enable_shutdown, enable_shutdown,

View File

@ -2,7 +2,7 @@ use linkerd_app_core::{
classify, classify,
config::ServerConfig, config::ServerConfig,
drain, errors, identity, drain, errors, identity,
metrics::{self, legacy::FmtMetrics}, metrics::{self, FmtMetrics},
proxy::http, proxy::http,
serve, serve,
svc::{self, ExtractParam, InsertParam, Param}, svc::{self, ExtractParam, InsertParam, Param},

View File

@ -13,23 +13,31 @@ independently of the inbound and outbound proxy logic.
""" """
[dependencies] [dependencies]
bytes = { workspace = true }
drain = { workspace = true, features = ["retain"] } drain = { workspace = true, features = ["retain"] }
http = { workspace = true } http = { workspace = true }
http-body = { workspace = true } http-body = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["http1", "http2"] } hyper = { workspace = true, features = ["http1", "http2"] }
hyper-util = { workspace = true }
futures = { version = "0.3", default-features = false } futures = { version = "0.3", default-features = false }
ipnet = "2.11" ipnet = "2.11"
prometheus-client = { workspace = true } prometheus-client = { workspace = true }
regex = "1"
serde_json = "1"
thiserror = "2" thiserror = "2"
tokio = { version = "1", features = ["macros", "sync", "parking_lot"] } tokio = { version = "1", features = ["macros", "sync", "parking_lot"] }
tokio-stream = { version = "0.1", features = ["time"] } tokio-stream = { version = "0.1", features = ["time"] }
tonic = { workspace = true, default-features = false, features = ["prost"] } tonic = { workspace = true, default-features = false, features = ["prost"] }
tracing = { workspace = true } tracing = { workspace = true }
parking_lot = "0.12"
pin-project = "1" pin-project = "1"
linkerd-addr = { path = "../../addr" } linkerd-addr = { path = "../../addr" }
linkerd-conditional = { path = "../../conditional" } linkerd-conditional = { path = "../../conditional" }
linkerd-dns = { path = "../../dns" } linkerd-dns = { path = "../../dns" }
linkerd-duplex = { path = "../../duplex" }
linkerd-errno = { path = "../../errno" }
linkerd-error = { path = "../../error" } linkerd-error = { path = "../../error" }
linkerd-error-respond = { path = "../../error-respond" } linkerd-error-respond = { path = "../../error-respond" }
linkerd-exp-backoff = { path = "../../exp-backoff" } linkerd-exp-backoff = { path = "../../exp-backoff" }
@ -56,7 +64,6 @@ linkerd-proxy-tcp = { path = "../../proxy/tcp" }
linkerd-proxy-transport = { path = "../../proxy/transport" } linkerd-proxy-transport = { path = "../../proxy/transport" }
linkerd-reconnect = { path = "../../reconnect" } linkerd-reconnect = { path = "../../reconnect" }
linkerd-router = { path = "../../router" } linkerd-router = { path = "../../router" }
linkerd-rustls = { path = "../../rustls" }
linkerd-service-profiles = { path = "../../service-profiles" } linkerd-service-profiles = { path = "../../service-profiles" }
linkerd-stack = { path = "../../stack" } linkerd-stack = { path = "../../stack" }
linkerd-stack-metrics = { path = "../../stack/metrics" } linkerd-stack-metrics = { path = "../../stack/metrics" }
@ -76,6 +83,5 @@ features = ["make", "spawn-ready", "timeout", "util", "limit"]
semver = "1" semver = "1"
[dev-dependencies] [dev-dependencies]
bytes = { workspace = true }
http-body-util = { workspace = true }
linkerd-mock-http-body = { path = "../../mock/http-body" } linkerd-mock-http-body = { path = "../../mock/http-body" }
quickcheck = { version = "1", default-features = false }

View File

@ -1,4 +1,5 @@
use crate::profiles; use crate::profiles;
pub use classify::gate;
use linkerd_error::Error; use linkerd_error::Error;
use linkerd_proxy_client_policy as client_policy; use linkerd_proxy_client_policy as client_policy;
use linkerd_proxy_http::{classify, HasH2Reason, ResponseTimeoutError}; use linkerd_proxy_http::{classify, HasH2Reason, ResponseTimeoutError};

View File

@ -101,7 +101,7 @@ impl Config {
identity: identity::NewClient, identity: identity::NewClient,
) -> svc::ArcNewService< ) -> svc::ArcNewService<
(), (),
svc::BoxCloneSyncService<http::Request<tonic::body::Body>, http::Response<RspBody>>, svc::BoxCloneSyncService<http::Request<tonic::body::BoxBody>, http::Response<RspBody>>,
> { > {
let addr = self.addr; let addr = self.addr;
tracing::trace!(%addr, "Building"); tracing::trace!(%addr, "Building");

View File

@ -25,7 +25,6 @@ pub mod metrics;
pub mod proxy; pub mod proxy;
pub mod serve; pub mod serve;
pub mod svc; pub mod svc;
pub mod tls_info;
pub mod transport; pub mod transport;
pub use self::build_info::{BuildInfo, BUILD_INFO}; pub use self::build_info::{BuildInfo, BUILD_INFO};

View File

@ -166,7 +166,7 @@ where
// === impl Metrics === // === impl Metrics ===
impl Metrics { impl Metrics {
pub fn new(retain_idle: Duration) -> (Self, impl legacy::FmtMetrics + Clone + Send + 'static) { pub fn new(retain_idle: Duration) -> (Self, impl FmtMetrics + Clone + Send + 'static) {
let (control, control_report) = { let (control, control_report) = {
let m = http_metrics::Requests::<ControlLabels, Class>::default(); let m = http_metrics::Requests::<ControlLabels, Class>::default();
let r = m.clone().into_report(retain_idle).with_prefix("control"); let r = m.clone().into_report(retain_idle).with_prefix("control");
@ -223,7 +223,6 @@ impl Metrics {
opentelemetry, opentelemetry,
}; };
use legacy::FmtMetrics as _;
let report = endpoint_report let report = endpoint_report
.and_report(profile_route_report) .and_report(profile_route_report)
.and_report(retry_report) .and_report(retry_report)
@ -249,7 +248,7 @@ impl svc::Param<ControlLabels> for control::ControlAddr {
} }
} }
impl legacy::FmtLabels for ControlLabels { impl FmtLabels for ControlLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { addr, server_id } = self; let Self { addr, server_id } = self;
@ -282,7 +281,7 @@ impl ProfileRouteLabels {
} }
} }
impl legacy::FmtLabels for ProfileRouteLabels { impl FmtLabels for ProfileRouteLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { let Self {
direction, direction,
@ -315,7 +314,7 @@ impl From<OutboundEndpointLabels> for EndpointLabels {
} }
} }
impl legacy::FmtLabels for EndpointLabels { impl FmtLabels for EndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Self::Inbound(i) => (Direction::In, i).fmt_labels(f), Self::Inbound(i) => (Direction::In, i).fmt_labels(f),
@ -324,7 +323,7 @@ impl legacy::FmtLabels for EndpointLabels {
} }
} }
impl legacy::FmtLabels for InboundEndpointLabels { impl FmtLabels for InboundEndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { let Self {
tls, tls,
@ -344,7 +343,7 @@ impl legacy::FmtLabels for InboundEndpointLabels {
} }
} }
impl legacy::FmtLabels for ServerLabel { impl FmtLabels for ServerLabel {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(meta, port) = self; let Self(meta, port) = self;
write!( write!(
@ -375,7 +374,7 @@ impl prom::EncodeLabelSetMut for ServerLabel {
} }
} }
impl legacy::FmtLabels for ServerAuthzLabels { impl FmtLabels for ServerAuthzLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { server, authz } = self; let Self { server, authz } = self;
@ -390,7 +389,7 @@ impl legacy::FmtLabels for ServerAuthzLabels {
} }
} }
impl legacy::FmtLabels for RouteLabels { impl FmtLabels for RouteLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { server, route } = self; let Self { server, route } = self;
@ -405,7 +404,7 @@ impl legacy::FmtLabels for RouteLabels {
} }
} }
impl legacy::FmtLabels for RouteAuthzLabels { impl FmtLabels for RouteAuthzLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { route, authz } = self; let Self { route, authz } = self;
@ -426,7 +425,7 @@ impl svc::Param<OutboundZoneLocality> for OutboundEndpointLabels {
} }
} }
impl legacy::FmtLabels for OutboundEndpointLabels { impl FmtLabels for OutboundEndpointLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { let Self {
server_id, server_id,
@ -463,20 +462,20 @@ impl fmt::Display for Direction {
} }
} }
impl legacy::FmtLabels for Direction { impl FmtLabels for Direction {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "direction=\"{self}\"") write!(f, "direction=\"{self}\"")
} }
} }
impl legacy::FmtLabels for Authority<'_> { impl FmtLabels for Authority<'_> {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self(authority) = self; let Self(authority) = self;
write!(f, "authority=\"{authority}\"") write!(f, "authority=\"{authority}\"")
} }
} }
impl legacy::FmtLabels for Class { impl FmtLabels for Class {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let class = |ok: bool| if ok { "success" } else { "failure" }; let class = |ok: bool| if ok { "success" } else { "failure" };
@ -524,7 +523,7 @@ impl StackLabels {
} }
} }
impl legacy::FmtLabels for StackLabels { impl FmtLabels for StackLabels {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { let Self {
direction, direction,

View File

@ -1,70 +0,0 @@
use linkerd_metrics::prom;
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
use std::{
fmt::{Error, Write},
sync::{Arc, OnceLock},
};
static TLS_INFO: OnceLock<Arc<TlsInfo>> = OnceLock::new();
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, EncodeLabelSet)]
pub struct TlsInfo {
tls_suites: MetricValueList,
tls_kx_groups: MetricValueList,
tls_rand: String,
tls_key_provider: String,
tls_fips: bool,
}
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
struct MetricValueList {
values: Vec<&'static str>,
}
impl FromIterator<&'static str> for MetricValueList {
fn from_iter<T: IntoIterator<Item = &'static str>>(iter: T) -> Self {
MetricValueList {
values: iter.into_iter().collect(),
}
}
}
impl EncodeLabelValue for MetricValueList {
fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), Error> {
for value in &self.values {
value.encode(encoder)?;
encoder.write_char(',')?;
}
Ok(())
}
}
pub fn metric() -> prom::Family<TlsInfo, prom::ConstGauge> {
let fam = prom::Family::<TlsInfo, prom::ConstGauge>::new_with_constructor(|| {
prom::ConstGauge::new(1)
});
let tls_info = TLS_INFO.get_or_init(|| {
let provider = linkerd_rustls::get_default_provider();
let tls_suites = provider
.cipher_suites
.iter()
.flat_map(|cipher_suite| cipher_suite.suite().as_str())
.collect::<MetricValueList>();
let tls_kx_groups = provider
.kx_groups
.iter()
.flat_map(|suite| suite.name().as_str())
.collect::<MetricValueList>();
Arc::new(TlsInfo {
tls_suites,
tls_kx_groups,
tls_rand: format!("{:?}", provider.secure_random),
tls_key_provider: format!("{:?}", provider.key_provider),
tls_fips: provider.fips(),
})
});
let _ = fam.get_or_create(tls_info);
fam
}

View File

@ -1,7 +1,7 @@
use crate::metrics::ServerLabel as PolicyServerLabel; use crate::metrics::ServerLabel as PolicyServerLabel;
pub use crate::metrics::{Direction, OutboundEndpointLabels}; pub use crate::metrics::{Direction, OutboundEndpointLabels};
use linkerd_conditional::Conditional; use linkerd_conditional::Conditional;
use linkerd_metrics::legacy::FmtLabels; use linkerd_metrics::FmtLabels;
use linkerd_tls as tls; use linkerd_tls as tls;
use std::{fmt, net::SocketAddr}; use std::{fmt, net::SocketAddr};

View File

@ -13,7 +13,8 @@ Configures and runs the inbound proxy
test-util = [ test-util = [
"linkerd-app-test", "linkerd-app-test",
"linkerd-idle-cache/test-util", "linkerd-idle-cache/test-util",
"linkerd-meshtls/test-util", "linkerd-meshtls/rustls",
"linkerd-meshtls-rustls/test-util",
] ]
[dependencies] [dependencies]
@ -24,7 +25,8 @@ linkerd-app-core = { path = "../core" }
linkerd-app-test = { path = "../test", optional = true } linkerd-app-test = { path = "../test", optional = true }
linkerd-http-access-log = { path = "../../http/access-log" } linkerd-http-access-log = { path = "../../http/access-log" }
linkerd-idle-cache = { path = "../../idle-cache" } linkerd-idle-cache = { path = "../../idle-cache" }
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false } linkerd-meshtls = { path = "../../meshtls", optional = true }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
linkerd-proxy-client-policy = { path = "../../proxy/client-policy" } linkerd-proxy-client-policy = { path = "../../proxy/client-policy" }
linkerd-tonic-stream = { path = "../../tonic-stream" } linkerd-tonic-stream = { path = "../../tonic-stream" }
linkerd-tonic-watch = { path = "../../tonic-watch" } linkerd-tonic-watch = { path = "../../tonic-watch" }
@ -47,7 +49,7 @@ hyper = { workspace = true, features = ["http1", "http2"] }
linkerd-app-test = { path = "../test" } linkerd-app-test = { path = "../test" }
arbitrary = { version = "1", features = ["derive"] } arbitrary = { version = "1", features = ["derive"] }
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] } libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
linkerd-meshtls = { path = "../../meshtls", features = [ linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
"test-util", "test-util",
] } ] }
@ -60,7 +62,8 @@ linkerd-http-metrics = { path = "../../http/metrics", features = ["test-util"] }
linkerd-http-box = { path = "../../http/box" } linkerd-http-box = { path = "../../http/box" }
linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] } linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] }
linkerd-io = { path = "../../io", features = ["tokio-test"] } linkerd-io = { path = "../../io", features = ["tokio-test"] }
linkerd-meshtls = { path = "../../meshtls", features = [ linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
"test-util", "test-util",
] } ] }
linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [ linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [

View File

@ -18,7 +18,8 @@ linkerd-app-core = { path = "../../core" }
linkerd-app-inbound = { path = ".." } linkerd-app-inbound = { path = ".." }
linkerd-app-test = { path = "../../test" } linkerd-app-test = { path = "../../test" }
linkerd-idle-cache = { path = "../../../idle-cache", features = ["test-util"] } linkerd-idle-cache = { path = "../../../idle-cache", features = ["test-util"] }
linkerd-meshtls = { path = "../../../meshtls", features = [ linkerd-meshtls = { path = "../../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../../meshtls/rustls", features = [
"test-util", "test-util",
] } ] }
linkerd-tracing = { path = "../../../tracing", features = ["ansi"] } linkerd-tracing = { path = "../../../tracing", features = ["ansi"] }

View File

@ -117,7 +117,7 @@ impl<N> Inbound<N> {
let identity = rt let identity = rt
.identity .identity
.server() .server()
.spawn_with_alpn(vec![transport_header::PROTOCOL.into()]) .with_alpn(vec![transport_header::PROTOCOL.into()])
.expect("TLS credential store must be held"); .expect("TLS credential store must be held");
inner inner

View File

@ -113,6 +113,10 @@ impl<S> Inbound<S> {
&self.runtime.identity &self.runtime.identity
} }
pub fn proxy_metrics(&self) -> &metrics::Proxy {
&self.runtime.metrics.proxy
}
/// A helper for gateways to instrument policy checks. /// A helper for gateways to instrument policy checks.
pub fn authorize_http<N>( pub fn authorize_http<N>(
&self, &self,

View File

@ -13,7 +13,7 @@ pub(crate) mod error;
pub use linkerd_app_core::metrics::*; pub use linkerd_app_core::metrics::*;
/// Holds LEGACY inbound proxy metrics. /// Holds outbound proxy metrics.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct InboundMetrics { pub struct InboundMetrics {
pub http_authz: authz::HttpAuthzMetrics, pub http_authz: authz::HttpAuthzMetrics,
@ -50,7 +50,7 @@ impl InboundMetrics {
} }
} }
impl legacy::FmtMetrics for InboundMetrics { impl FmtMetrics for InboundMetrics {
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.http_authz.fmt_metrics(f)?; self.http_authz.fmt_metrics(f)?;
self.http_errors.fmt_metrics(f)?; self.http_errors.fmt_metrics(f)?;

View File

@ -1,9 +1,8 @@
use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit}; use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit};
use linkerd_app_core::{ use linkerd_app_core::{
metrics::{ metrics::{
legacy::{Counter, FmtLabels, FmtMetrics}, metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels,
metrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, ServerLabel, TargetAddr, ServerLabel, TargetAddr, TlsAccept,
TlsAccept,
}, },
tls, tls,
transport::OrigDstAddr, transport::OrigDstAddr,

View File

@ -8,7 +8,7 @@ use crate::{
}; };
use linkerd_app_core::{ use linkerd_app_core::{
errors::{FailFastError, LoadShedError}, errors::{FailFastError, LoadShedError},
metrics::legacy::FmtLabels, metrics::FmtLabels,
tls, tls,
}; };
use std::fmt; use std::fmt;

View File

@ -1,9 +1,6 @@
use super::ErrorKind; use super::ErrorKind;
use linkerd_app_core::{ use linkerd_app_core::{
metrics::{ metrics::{metrics, Counter, FmtMetrics, ServerLabel},
legacy::{Counter, FmtMetrics},
metrics, ServerLabel,
},
svc::{self, stack::NewMonitor}, svc::{self, stack::NewMonitor},
transport::{labels::TargetAddr, OrigDstAddr}, transport::{labels::TargetAddr, OrigDstAddr},
Error, Error,

View File

@ -1,9 +1,6 @@
use super::ErrorKind; use super::ErrorKind;
use linkerd_app_core::{ use linkerd_app_core::{
metrics::{ metrics::{metrics, Counter, FmtMetrics},
legacy::{Counter, FmtMetrics},
metrics,
},
svc::{self, stack::NewMonitor}, svc::{self, stack::NewMonitor},
transport::{labels::TargetAddr, OrigDstAddr}, transport::{labels::TargetAddr, OrigDstAddr},
Error, Error,

View File

@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ServerPolicy> = once_cell::sync
impl<S> Api<S> impl<S> Api<S>
where where
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone, S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static, S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
{ {
pub(super) fn new( pub(super) fn new(
@ -57,7 +57,7 @@ where
impl<S> Service<u16> for Api<S> impl<S> Service<u16> for Api<S>
where where
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>, S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: Clone + Send + Sync + 'static, S: Clone + Send + Sync + 'static,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static, S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
S::Future: Send + 'static, S::Future: Send + 'static,

View File

@ -40,7 +40,7 @@ impl Config {
limits: ReceiveLimits, limits: ReceiveLimits,
) -> impl GetPolicy + Clone + Send + Sync + 'static ) -> impl GetPolicy + Clone + Send + Sync + 'static
where where
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>, C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static, C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>, C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static, C::ResponseBody: Send + 'static,

View File

@ -74,7 +74,7 @@ impl<S> Store<S> {
opaque_ports: RangeInclusiveSet<u16>, opaque_ports: RangeInclusiveSet<u16>,
) -> Self ) -> Self
where where
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>, S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: Clone + Send + Sync + 'static, S: Clone + Send + Sync + 'static,
S::Future: Send, S::Future: Send,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static, S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
@ -138,7 +138,7 @@ impl<S> Store<S> {
impl<S> GetPolicy for Store<S> impl<S> GetPolicy for Store<S>
where where
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>, S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: Clone + Send + Sync + 'static, S: Clone + Send + Sync + 'static,
S::Future: Send, S::Future: Send,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static, S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,

View File

@ -263,7 +263,7 @@ fn orig_dst_addr() -> OrigDstAddr {
OrigDstAddr(([192, 0, 2, 2], 1000).into()) OrigDstAddr(([192, 0, 2, 2], 1000).into())
} }
impl tonic::client::GrpcService<tonic::body::Body> for MockSvc { impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
type ResponseBody = linkerd_app_core::control::RspBody; type ResponseBody = linkerd_app_core::control::RspBody;
type Error = Error; type Error = Error;
type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>; type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>;
@ -275,7 +275,7 @@ impl tonic::client::GrpcService<tonic::body::Body> for MockSvc {
unreachable!() unreachable!()
} }
fn call(&mut self, _req: http::Request<tonic::body::Body>) -> Self::Future { fn call(&mut self, _req: http::Request<tonic::body::BoxBody>) -> Self::Future {
unreachable!() unreachable!()
} }
} }

View File

@ -27,7 +27,7 @@ impl Inbound<()> {
limits: ReceiveLimits, limits: ReceiveLimits,
) -> impl policy::GetPolicy + Clone + Send + Sync + 'static ) -> impl policy::GetPolicy + Clone + Send + Sync + 'static
where where
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>, C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static, C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>, C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static, C::ResponseBody: Send + 'static,

View File

@ -3,7 +3,9 @@ pub use futures::prelude::*;
use linkerd_app_core::{ use linkerd_app_core::{
config, config,
dns::Suffix, dns::Suffix,
drain, exp_backoff, identity, metrics, drain, exp_backoff,
identity::rustls,
metrics,
proxy::{ proxy::{
http::{h1, h2}, http::{h1, h2},
tap, tap,
@ -96,7 +98,7 @@ pub fn runtime() -> (ProxyRuntime, drain::Signal) {
let (tap, _) = tap::new(); let (tap, _) = tap::new();
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10)); let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
let runtime = ProxyRuntime { let runtime = ProxyRuntime {
identity: identity::creds::default_for_test().1, identity: rustls::creds::default_for_test().1.into(),
metrics: metrics.proxy, metrics: metrics.proxy,
tap, tap,
span_sink: None, span_sink: None,

View File

@ -28,19 +28,17 @@ ipnet = "2"
linkerd-app = { path = "..", features = ["allow-loopback"] } linkerd-app = { path = "..", features = ["allow-loopback"] }
linkerd-app-core = { path = "../core" } linkerd-app-core = { path = "../core" }
linkerd-app-test = { path = "../test" } linkerd-app-test = { path = "../test" }
linkerd-meshtls = { path = "../../meshtls", features = ["test-util"] }
linkerd-metrics = { path = "../../metrics", features = ["test_util"] } linkerd-metrics = { path = "../../metrics", features = ["test_util"] }
linkerd-rustls = { path = "../../rustls" }
linkerd-tracing = { path = "../../tracing" } linkerd-tracing = { path = "../../tracing" }
maplit = "1" maplit = "1"
parking_lot = "0.12" parking_lot = "0.12"
regex = "1" regex = "1"
rustls-pemfile = "2.2" rustls-pemfile = "2.2"
socket2 = "0.6" socket2 = "0.5"
tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] } tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] }
tokio-rustls = { workspace = true } tokio-rustls = { workspace = true }
tokio-stream = { version = "0.1", features = ["sync"] } tokio-stream = { version = "0.1", features = ["sync"] }
tonic = { workspace = true, features = ["transport", "router"], default-features = false } tonic = { workspace = true, features = ["transport"], default-features = false }
tower = { workspace = true, default-features = false } tower = { workspace = true, default-features = false }
tracing = { workspace = true } tracing = { workspace = true }
@ -74,5 +72,8 @@ flate2 = { version = "1", default-features = false, features = [
] } ] }
# Log streaming isn't enabled by default globally, but we want to test it. # Log streaming isn't enabled by default globally, but we want to test it.
linkerd-app-admin = { path = "../admin", features = ["log-streaming"] } linkerd-app-admin = { path = "../admin", features = ["log-streaming"] }
# No code from this crate is actually used; only necessary to enable the Rustls
# implementation.
linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] } linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
serde_json = "1" serde_json = "1"

View File

@ -8,7 +8,6 @@ use std::{
}; };
use linkerd2_proxy_api::identity as pb; use linkerd2_proxy_api::identity as pb;
use linkerd_rustls::get_default_provider;
use tokio_rustls::rustls::{self, server::WebPkiClientVerifier}; use tokio_rustls::rustls::{self, server::WebPkiClientVerifier};
use tonic as grpc; use tonic as grpc;
@ -35,6 +34,10 @@ type Certify = Box<
> + Send, > + Send,
>; >;
static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
&[rustls::crypto::ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256];
struct Certificates { struct Certificates {
pub leaf: Vec<u8>, pub leaf: Vec<u8>,
pub intermediates: Vec<Vec<u8>>, pub intermediates: Vec<Vec<u8>>,
@ -107,10 +110,12 @@ impl Identity {
assert_ne!(added, 0, "trust anchors must include at least one cert"); assert_ne!(added, 0, "trust anchors must include at least one cert");
assert_eq!(skipped, 0, "no certs in pemfile should be invalid"); assert_eq!(skipped, 0, "no certs in pemfile should be invalid");
let provider = get_default_provider(); let mut provider = rustls::crypto::ring::default_provider();
provider.cipher_suites = TLS_SUPPORTED_CIPHERSUITES.to_vec();
let provider = Arc::new(provider);
let client_config = rustls::ClientConfig::builder_with_provider(provider.clone()) let client_config = rustls::ClientConfig::builder_with_provider(provider.clone())
.with_safe_default_protocol_versions() .with_protocol_versions(TLS_VERSIONS)
.expect("client config must be valid") .expect("client config must be valid")
.with_root_certificates(roots.clone()) .with_root_certificates(roots.clone())
.with_no_client_auth(); .with_no_client_auth();
@ -122,7 +127,7 @@ impl Identity {
.expect("server verifier must be valid"); .expect("server verifier must be valid");
let server_config = rustls::ServerConfig::builder_with_provider(provider) let server_config = rustls::ServerConfig::builder_with_provider(provider)
.with_safe_default_protocol_versions() .with_protocol_versions(TLS_VERSIONS)
.expect("server config must be valid") .expect("server config must be valid")
.with_client_cert_verifier(client_cert_verifier) .with_client_cert_verifier(client_cert_verifier)
.with_single_cert(certs.chain(), key) .with_single_cert(certs.chain(), key)

View File

@ -302,7 +302,7 @@ impl Controller {
} }
pub async fn run(self) -> controller::Listening { pub async fn run(self) -> controller::Listening {
let routes = grpc::service::Routes::default() let svc = grpc::transport::Server::builder()
.add_service( .add_service(
inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new( inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new(
self.inbound, self.inbound,
@ -310,9 +310,9 @@ impl Controller {
) )
.add_service(outbound_policies_server::OutboundPoliciesServer::new( .add_service(outbound_policies_server::OutboundPoliciesServer::new(
Server(Arc::new(self.outbound)), Server(Arc::new(self.outbound)),
)); ))
.into_service();
controller::run(RoutesSvc(routes), "support policy controller", None).await controller::run(RoutesSvc(svc), "support policy controller", None).await
} }
} }
@ -525,9 +525,7 @@ impl Service<Request<hyper::body::Incoming>> for RoutesSvc {
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let Self(routes) = self; let Self(routes) = self;
<grpc::service::Routes as Service<Request<UnsyncBoxBody<Bytes, grpc::Status>>>>::poll_ready( routes.poll_ready(cx)
routes, cx,
)
} }
fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future { fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future {

View File

@ -124,6 +124,26 @@ async fn inbound_timeout() {
.await; .await;
} }
/// Tests that the detect metric is labeled and incremented on I/O error.
#[tokio::test]
async fn inbound_io_err() {
let _trace = trace_init();
let (proxy, metrics) = Test::default().run().await;
let client = crate::tcp::client(proxy.inbound);
let tcp_client = client.connect().await;
tcp_client.write(TcpFixture::HELLO_MSG).await;
drop(tcp_client);
metric(&proxy)
.label("error", "i/o")
.value(1u64)
.assert_in(&metrics)
.await;
}
/// Tests that the detect metric is not incremented when TLS is successfully /// Tests that the detect metric is not incremented when TLS is successfully
/// detected. /// detected.
#[tokio::test] #[tokio::test]
@ -169,6 +189,44 @@ async fn inbound_success() {
metric.assert_in(&metrics).await; metric.assert_in(&metrics).await;
} }
/// Tests both of the above cases together.
#[tokio::test]
async fn inbound_multi() {
let _trace = trace_init();
let (proxy, metrics) = Test::default().run().await;
let client = crate::tcp::client(proxy.inbound);
let metric = metric(&proxy);
let timeout_metric = metric.clone().label("error", "tls detection timeout");
let io_metric = metric.label("error", "i/o");
let tcp_client = client.connect().await;
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
.await;
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
drop(tcp_client);
let tcp_client = client.connect().await;
tcp_client.write(TcpFixture::HELLO_MSG).await;
drop(tcp_client);
io_metric.clone().value(1u64).assert_in(&metrics).await;
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
let tcp_client = client.connect().await;
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
.await;
io_metric.clone().value(1u64).assert_in(&metrics).await;
timeout_metric.clone().value(2u64).assert_in(&metrics).await;
drop(tcp_client);
}
/// Tests that TLS detect failure metrics are collected for the direct stack. /// Tests that TLS detect failure metrics are collected for the direct stack.
#[tokio::test] #[tokio::test]
async fn inbound_direct_multi() { async fn inbound_direct_multi() {

View File

@ -13,7 +13,7 @@ Configures and runs the outbound proxy
default = [] default = []
allow-loopback = [] allow-loopback = []
test-subscriber = [] test-subscriber = []
test-util = ["linkerd-app-test", "linkerd-meshtls/test-util", "dep:http-body"] test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util", "dep:http-body"]
prometheus-client-rust-242 = [] # TODO prometheus-client-rust-242 = [] # TODO
@ -42,7 +42,7 @@ linkerd-http-prom = { path = "../../http/prom" }
linkerd-http-retry = { path = "../../http/retry" } linkerd-http-retry = { path = "../../http/retry" }
linkerd-http-route = { path = "../../http/route" } linkerd-http-route = { path = "../../http/route" }
linkerd-identity = { path = "../../identity" } linkerd-identity = { path = "../../identity" }
linkerd-meshtls = { path = "../../meshtls", optional = true, default-features = false } linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true }
linkerd-opaq-route = { path = "../../opaq-route" } linkerd-opaq-route = { path = "../../opaq-route" }
linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [ linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [
"proto", "proto",
@ -67,7 +67,8 @@ linkerd-app-test = { path = "../test", features = ["client-policy"] }
linkerd-http-box = { path = "../../http/box" } linkerd-http-box = { path = "../../http/box" }
linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] } linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] }
linkerd-io = { path = "../../io", features = ["tokio-test"] } linkerd-io = { path = "../../io", features = ["tokio-test"] }
linkerd-meshtls = { path = "../../meshtls", features = [ linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] }
linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [
"test-util", "test-util",
] } ] }
linkerd-mock-http-body = { path = "../../mock/http-body" } linkerd-mock-http-body = { path = "../../mock/http-body" }

View File

@ -146,7 +146,7 @@ impl Outbound<()> {
export_hostname_labels: bool, export_hostname_labels: bool,
) -> impl policy::GetPolicy ) -> impl policy::GetPolicy
where where
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>, C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
C: Clone + Unpin + Send + Sync + 'static, C: Clone + Unpin + Send + Sync + 'static,
C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>, C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>,
C::ResponseBody: Send + 'static, C::ResponseBody: Send + 'static,

View File

@ -130,7 +130,7 @@ impl OutboundMetrics {
} }
} }
impl legacy::FmtMetrics for OutboundMetrics { impl FmtMetrics for OutboundMetrics {
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.http_errors.fmt_metrics(f)?; self.http_errors.fmt_metrics(f)?;
self.tcp_errors.fmt_metrics(f)?; self.tcp_errors.fmt_metrics(f)?;
@ -243,7 +243,7 @@ impl EncodeLabelSet for RouteRef {
// === impl ConcreteLabels === // === impl ConcreteLabels ===
impl legacy::FmtLabels for ConcreteLabels { impl FmtLabels for ConcreteLabels {
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ConcreteLabels(parent, backend) = self; let ConcreteLabels(parent, backend) = self;

View File

@ -5,7 +5,7 @@ pub(crate) use self::{http::Http, tcp::Tcp};
use crate::http::IdentityRequired; use crate::http::IdentityRequired;
use linkerd_app_core::{ use linkerd_app_core::{
errors::{FailFastError, LoadShedError}, errors::{FailFastError, LoadShedError},
metrics::legacy::FmtLabels, metrics::FmtLabels,
proxy::http::ResponseTimeoutError, proxy::http::ResponseTimeoutError,
}; };
use std::fmt; use std::fmt;

View File

@ -1,9 +1,6 @@
use super::ErrorKind; use super::ErrorKind;
use linkerd_app_core::{ use linkerd_app_core::{
metrics::{ metrics::{metrics, Counter, FmtMetrics},
legacy::{Counter, FmtMetrics},
metrics,
},
svc, Error, svc, Error,
}; };
use parking_lot::RwLock; use parking_lot::RwLock;

View File

@ -1,9 +1,6 @@
use super::ErrorKind; use super::ErrorKind;
use linkerd_app_core::{ use linkerd_app_core::{
metrics::{ metrics::{metrics, Counter, FmtMetrics},
legacy::{Counter, FmtMetrics},
metrics,
},
svc, svc,
transport::{labels::TargetAddr, OrigDstAddr}, transport::{labels::TargetAddr, OrigDstAddr},
Error, Error,

View File

@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ClientPolicy> = once_cell::sync
impl<S> Api<S> impl<S> Api<S>
where where
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone, S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static, S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
{ {
pub(crate) fn new( pub(crate) fn new(
@ -59,7 +59,7 @@ where
impl<S> Service<Addr> for Api<S> impl<S> Service<Addr> for Api<S>
where where
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>, S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
S: Clone + Send + Sync + 'static, S: Clone + Send + Sync + 'static,
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static, S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
S::Future: Send + 'static, S::Future: Send + 'static,

View File

@ -60,7 +60,7 @@ pub(crate) fn runtime() -> (ProxyRuntime, drain::Signal) {
let (tap, _) = tap::new(); let (tap, _) = tap::new();
let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10)); let (metrics, _) = metrics::Metrics::new(std::time::Duration::from_secs(10));
let runtime = ProxyRuntime { let runtime = ProxyRuntime {
identity: linkerd_meshtls::creds::default_for_test().1, identity: linkerd_meshtls_rustls::creds::default_for_test().1.into(),
metrics: metrics.proxy, metrics: metrics.proxy,
tap, tap,
span_sink: None, span_sink: None,

View File

@ -11,18 +11,13 @@ use linkerd_proxy_client_policy::{self as client_policy, tls::sni};
use parking_lot::Mutex; use parking_lot::Mutex;
use std::{ use std::{
collections::HashMap, collections::HashMap,
marker::PhantomData,
net::SocketAddr, net::SocketAddr,
sync::Arc, sync::Arc,
task::{Context, Poll}, task::{Context, Poll},
time::Duration, time::Duration,
}; };
use tokio::sync::watch; use tokio::sync::watch;
use tokio_rustls::rustls::{ use tokio_rustls::rustls::pki_types::DnsName;
internal::msgs::codec::{Codec, Reader},
pki_types::DnsName,
InvalidMessage,
};
mod basic; mod basic;
@ -175,57 +170,44 @@ fn sni_route(backend: client_policy::Backend, sni: sni::MatchSni) -> client_poli
// generates a sample ClientHello TLS message for testing // generates a sample ClientHello TLS message for testing
fn generate_client_hello(sni: &str) -> Vec<u8> { fn generate_client_hello(sni: &str) -> Vec<u8> {
use tokio_rustls::rustls::{ use tokio_rustls::rustls::{
internal::msgs::{base::Payload, codec::Codec, message::PlainMessage}, internal::msgs::{
ContentType, ProtocolVersion, base::Payload,
codec::{Codec, Reader},
enums::Compression,
handshake::{
ClientExtension, ClientHelloPayload, HandshakeMessagePayload, HandshakePayload,
Random, ServerName, SessionId,
},
message::{MessagePayload, PlainMessage},
},
CipherSuite, ContentType, HandshakeType, ProtocolVersion,
}; };
let sni = DnsName::try_from(sni.to_string()).unwrap(); let sni = DnsName::try_from(sni.to_string()).unwrap();
let sni = trim_hostname_trailing_dot_for_sni(&sni); let sni = trim_hostname_trailing_dot_for_sni(&sni);
// rustls has internal-only types that can encode a ClientHello, but they are mostly let mut server_name_bytes = vec![];
// inaccessible and an unstable part of the public API anyway. Manually encode one here for 0u8.encode(&mut server_name_bytes); // encode the type first
// testing only instead. (sni.as_ref().len() as u16).encode(&mut server_name_bytes); // then the length as u16
server_name_bytes.extend_from_slice(sni.as_ref().as_bytes()); // then the server name itself
let mut hs_payload_bytes = vec![]; let server_name =
1u8.encode(&mut hs_payload_bytes); // client hello ID ServerName::read(&mut Reader::init(&server_name_bytes)).expect("Server name is valid");
let client_hello_body = { let hs_payload = HandshakeMessagePayload {
let mut payload = LengthPayload::<U24>::empty(); typ: HandshakeType::ClientHello,
payload: HandshakePayload::ClientHello(ClientHelloPayload {
payload.buf.extend_from_slice(&[0x03, 0x03]); // client version, TLSv1.2 client_version: ProtocolVersion::TLSv1_2,
random: Random::from([0; 32]),
payload.buf.extend_from_slice(&[0u8; 32]); // random session_id: SessionId::read(&mut Reader::init(&[0])).unwrap(),
cipher_suites: vec![CipherSuite::TLS_NULL_WITH_NULL_NULL],
0u8.encode(&mut payload.buf); // session ID compression_methods: vec![Compression::Null],
extensions: vec![ClientExtension::ServerName(vec![server_name])],
LengthPayload::<u16>::from_slice(&[0x00, 0x00] /* TLS_NULL_WITH_NULL_NULL */) }),
.encode(&mut payload.buf);
LengthPayload::<u8>::from_slice(&[0x00] /* no compression */).encode(&mut payload.buf);
let extensions = {
let mut payload = LengthPayload::<u16>::empty();
0u16.encode(&mut payload.buf); // server name extension ID
let server_name_extension = {
let mut payload = LengthPayload::<u16>::empty();
let server_name = {
let mut payload = LengthPayload::<u16>::empty();
0u8.encode(&mut payload.buf); // DNS hostname ID
LengthPayload::<u16>::from_slice(sni.as_ref().as_bytes())
.encode(&mut payload.buf);
payload
}; };
server_name.encode(&mut payload.buf);
payload let mut hs_payload_bytes = Vec::default();
}; MessagePayload::handshake(hs_payload).encode(&mut hs_payload_bytes);
server_name_extension.encode(&mut payload.buf);
payload
};
extensions.encode(&mut payload.buf);
payload
};
client_hello_body.encode(&mut hs_payload_bytes);
let message = PlainMessage { let message = PlainMessage {
typ: ContentType::Handshake, typ: ContentType::Handshake,
@ -236,65 +218,6 @@ fn generate_client_hello(sni: &str) -> Vec<u8> {
message.into_unencrypted_opaque().encode() message.into_unencrypted_opaque().encode()
} }
#[derive(Debug)]
struct LengthPayload<T> {
buf: Vec<u8>,
_boo: PhantomData<fn() -> T>,
}
impl<T> LengthPayload<T> {
fn empty() -> Self {
Self {
buf: vec![],
_boo: PhantomData,
}
}
fn from_slice(s: &[u8]) -> Self {
Self {
buf: s.to_vec(),
_boo: PhantomData,
}
}
}
impl Codec<'_> for LengthPayload<u8> {
fn encode(&self, bytes: &mut Vec<u8>) {
(self.buf.len() as u8).encode(bytes);
bytes.extend_from_slice(&self.buf);
}
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
unimplemented!()
}
}
impl Codec<'_> for LengthPayload<u16> {
fn encode(&self, bytes: &mut Vec<u8>) {
(self.buf.len() as u16).encode(bytes);
bytes.extend_from_slice(&self.buf);
}
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
unimplemented!()
}
}
#[derive(Debug)]
struct U24;
impl Codec<'_> for LengthPayload<U24> {
fn encode(&self, bytes: &mut Vec<u8>) {
let len = self.buf.len() as u32;
bytes.extend_from_slice(&len.to_be_bytes()[1..]);
bytes.extend_from_slice(&self.buf);
}
fn read(_: &mut Reader<'_>) -> std::result::Result<Self, InvalidMessage> {
unimplemented!()
}
}
fn trim_hostname_trailing_dot_for_sni(dns_name: &DnsName<'_>) -> DnsName<'static> { fn trim_hostname_trailing_dot_for_sni(dns_name: &DnsName<'_>) -> DnsName<'static> {
let dns_name_str = dns_name.as_ref(); let dns_name_str = dns_name.as_ref();

View File

@ -43,7 +43,7 @@ impl Config {
) -> Result< ) -> Result<
Dst< Dst<
impl svc::Service< impl svc::Service<
http::Request<tonic::body::Body>, http::Request<tonic::body::BoxBody>,
Response = http::Response<control::RspBody>, Response = http::Response<control::RspBody>,
Error = Error, Error = Error,
Future = impl Send, Future = impl Send,

View File

@ -343,11 +343,15 @@ const DEFAULT_INBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::f
// TODO(ver) This should be configurable at the load balancer level. // TODO(ver) This should be configurable at the load balancer level.
const DEFAULT_OUTBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::from_secs(3); const DEFAULT_OUTBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT: Duration = Duration::from_secs(3);
// By default, we limit the number of connections that may be opened per-host. // By default, we don't limit the number of connections a connection pool may
// We pick a high number (10k) that shouldn't interfere with most workloads, but // use, as doing so can severely impact CPU utilization for applications with
// will prevent issues with our outbound HTTP client from exhausting the file // many concurrent requests. It's generally preferable to use the MAX_IDLE_AGE
// descriptors available to the process. // limitations to quickly drop idle connections.
const DEFAULT_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000; const DEFAULT_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = usize::MAX;
// By default, we limit the number of outbound connections that may be opened
// per-host. We pick a high number (10k) that shouldn't interfere with most
// workloads, but will prevent issues with our outbound HTTP client from
// exhausting the file descriptors available to the process.
const DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000; const DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: usize = 10_000;
// These settings limit the number of requests that have not received responses, // These settings limit the number of requests that have not received responses,

View File

@ -4,8 +4,7 @@ pub use linkerd_app_core::identity::{client, Id};
use linkerd_app_core::{ use linkerd_app_core::{
control, dns, control, dns,
identity::{ identity::{
client::linkerd::Certify, creds, watch as watch_identity, CertMetrics, Credentials, client::linkerd::Certify, creds, CertMetrics, Credentials, DerX509, Mode, WithCertMetrics,
DerX509, WithCertMetrics,
}, },
metrics::{prom, ControlHttp as ClientMetrics}, metrics::{prom, ControlHttp as ClientMetrics},
Result, Result,
@ -138,7 +137,8 @@ fn watch(
watch::Receiver<bool>, watch::Receiver<bool>,
)> { )> {
let (tx, ready) = watch::channel(false); let (tx, ready) = watch::channel(false);
let (store, receiver) = watch_identity(tls.id, tls.server_name, &tls.trust_anchors_pem)?; let (store, receiver) =
Mode::default().watch(tls.id, tls.server_name, &tls.trust_anchors_pem)?;
let cred = WithCertMetrics::new(metrics, NotifyReady { store, tx }); let cred = WithCertMetrics::new(metrics, NotifyReady { store, tx });
Ok((cred, receiver, ready)) Ok((cred, receiver, ready))
} }

View File

@ -19,10 +19,9 @@ use linkerd_app_core::{
config::ServerConfig, config::ServerConfig,
control::{ControlAddr, Metrics as ControlMetrics}, control::{ControlAddr, Metrics as ControlMetrics},
dns, drain, dns, drain,
metrics::{legacy::FmtMetrics, prom}, metrics::{prom, FmtMetrics},
serve, serve,
svc::Param, svc::Param,
tls_info,
transport::{addrs::*, listen::Bind}, transport::{addrs::*, listen::Bind},
Error, ProxyRuntime, Error, ProxyRuntime,
}; };
@ -252,6 +251,9 @@ impl Config {
export_hostname_labels, export_hostname_labels,
); );
let dst_addr = dst.addr.clone();
// registry.sub_registry_with_prefix("gateway"),
let gateway = gateway::Gateway::new(gateway, inbound.clone(), outbound.clone()).stack( let gateway = gateway::Gateway::new(gateway, inbound.clone(), outbound.clone()).stack(
dst.resolve.clone(), dst.resolve.clone(),
dst.profiles.clone(), dst.profiles.clone(),
@ -302,7 +304,6 @@ impl Config {
error!(%error, "Failed to register process metrics"); error!(%error, "Failed to register process metrics");
} }
registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric()); registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric());
registry.register("rustls_info", "Proxy TLS info", tls_info::metric());
let admin = { let admin = {
let identity = identity.receiver().server(); let identity = identity.receiver().server();
@ -329,7 +330,7 @@ impl Config {
Ok(App { Ok(App {
admin, admin,
dst: dst.addr, dst: dst_addr,
drain: drain_tx, drain: drain_tx,
identity, identity,
inbound_addr, inbound_addr,

View File

@ -46,7 +46,7 @@ impl Config {
) -> Result< ) -> Result<
Policy< Policy<
impl svc::Service< impl svc::Service<
http::Request<tonic::body::Body>, http::Request<tonic::body::BoxBody>,
Response = http::Response<control::RspBody>, Response = http::Response<control::RspBody>,
Error = Error, Error = Error,
Future = impl Send, Future = impl Send,

View File

@ -6,7 +6,7 @@ use linkerd_opencensus::{self as opencensus, metrics, proto};
use std::{collections::HashMap, time::SystemTime}; use std::{collections::HashMap, time::SystemTime};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use tonic::{body::Body as TonicBody, client::GrpcService}; use tonic::{body::BoxBody, client::GrpcService};
use tracing::Instrument; use tracing::Instrument;
pub(super) fn create_collector<S>( pub(super) fn create_collector<S>(
@ -18,7 +18,7 @@ pub(super) fn create_collector<S>(
legacy_metrics: metrics::Registry, legacy_metrics: metrics::Registry,
) -> EnabledCollector ) -> EnabledCollector
where where
S: GrpcService<TonicBody> + Clone + Send + 'static, S: GrpcService<BoxBody> + Clone + Send + 'static,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Future: Send, S::Future: Send,
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static, S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,

View File

@ -15,7 +15,7 @@ use std::{
}; };
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use tonic::{body::Body as TonicBody, client::GrpcService}; use tonic::{body::BoxBody, client::GrpcService};
use tracing::Instrument; use tracing::Instrument;
pub(super) struct OtelCollectorAttributes { pub(super) struct OtelCollectorAttributes {
@ -31,7 +31,7 @@ pub(super) fn create_collector<S>(
legacy_metrics: metrics::Registry, legacy_metrics: metrics::Registry,
) -> EnabledCollector ) -> EnabledCollector
where where
S: GrpcService<TonicBody> + Clone + Send + 'static, S: GrpcService<BoxBody> + Clone + Send + 'static,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Future: Send, S::Future: Send,
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static, S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,

View File

@ -11,6 +11,7 @@ ahash = "0.8"
linkerd-stack = { path = "../stack" } linkerd-stack = { path = "../stack" }
parking_lot = "0.12" parking_lot = "0.12"
rand = { version = "0.9", features = ["small_rng"] } rand = { version = "0.9", features = ["small_rng"] }
tokio = { version = "1", features = ["macros"] }
tracing = { workspace = true } tracing = { workspace = true }
[dev-dependencies] [dev-dependencies]

View File

@ -7,15 +7,14 @@ edition = { workspace = true }
publish = { workspace = true } publish = { workspace = true }
[dependencies] [dependencies]
futures = { version = "0.3", default-features = false }
hickory-resolver = "0.25.2" hickory-resolver = "0.25.2"
linkerd-dns-name = { path = "./name" } linkerd-dns-name = { path = "./name" }
linkerd-error = { path = "../error" }
prometheus-client = { workspace = true } prometheus-client = { workspace = true }
thiserror = "2" thiserror = "2"
tokio = { version = "1", features = ["rt", "sync", "time"] } tokio = { version = "1", features = ["rt", "sync", "time"] }
tracing = { workspace = true } tracing = { workspace = true }
[dev-dependencies]
linkerd-error = { path = "../error" }
[lints.rust] [lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] }

View File

@ -15,10 +15,10 @@ tokio = { version = "1", default-features = false }
tracing = { workspace = true } tracing = { workspace = true }
linkerd-error = { path = "../../error" } linkerd-error = { path = "../../error" }
linkerd-http-box = { path = "../../http/box" }
linkerd-stack = { path = "../../stack" } linkerd-stack = { path = "../../stack" }
[dev-dependencies] [dev-dependencies]
tokio-test = "0.4" tokio-test = "0.4"
tower-test = { workspace = true } tower-test = { workspace = true }
linkerd-http-box = { path = "../../http/box" }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] } linkerd-tracing = { path = "../../tracing", features = ["ansi"] }

View File

@ -2,7 +2,7 @@ use super::{ClassifyEos, ClassifyResponse};
use futures::{prelude::*, ready}; use futures::{prelude::*, ready};
use http_body::Frame; use http_body::Frame;
use linkerd_error::Error; use linkerd_error::Error;
use linkerd_stack::Service; use linkerd_stack::{layer, ExtractParam, NewService, Service};
use pin_project::{pin_project, pinned_drop}; use pin_project::{pin_project, pinned_drop};
use std::{ use std::{
fmt::Debug, fmt::Debug,
@ -12,6 +12,18 @@ use std::{
}; };
use tokio::sync::mpsc; use tokio::sync::mpsc;
/// Constructs new [`BroadcastClassification`] services.
///
/// `X` is an [`ExtractParam`] implementation that extracts a [`Tx`] from each
/// target. The [`Tx`] is used to broadcast the classification of each response
/// from the constructed [`BroadcastClassification`] service.
#[derive(Debug)]
pub struct NewBroadcastClassification<C, X, N> {
inner: N,
extract: X,
_marker: PhantomData<fn() -> C>,
}
/// A HTTP `Service` that applies a [`ClassifyResponse`] to each response, and /// A HTTP `Service` that applies a [`ClassifyResponse`] to each response, and
/// broadcasts the classification over a [`mpsc`] channel. /// broadcasts the classification over a [`mpsc`] channel.
#[derive(Debug)] #[derive(Debug)]
@ -21,6 +33,14 @@ pub struct BroadcastClassification<C: ClassifyResponse, S> {
_marker: PhantomData<fn() -> C>, _marker: PhantomData<fn() -> C>,
} }
/// A handle to a [`mpsc`] channel over which response classifications are
/// broadcasted.
///
/// This is extracted from a target value by [`NewBroadcastClassification`] when
/// constructing a [`BroadcastClassification`] service.
#[derive(Clone, Debug)]
pub struct Tx<C>(pub mpsc::Sender<C>);
#[pin_project] #[pin_project]
pub struct ResponseFuture<C: ClassifyResponse, B, F> { pub struct ResponseFuture<C: ClassifyResponse, B, F> {
#[pin] #[pin]
@ -42,6 +62,59 @@ struct State<C, T> {
tx: mpsc::Sender<T>, tx: mpsc::Sender<T>,
} }
// === impl NewBroadcastClassification ===
impl<C, X: Clone, N> NewBroadcastClassification<C, X, N> {
pub fn new(extract: X, inner: N) -> Self {
Self {
inner,
extract,
_marker: PhantomData,
}
}
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
/// [`NewService`]s, using the provided [`ExtractParam`] implementation to
/// extract a classification [`Tx`] from the target.
pub fn layer_via(extract: X) -> impl layer::Layer<N, Service = Self> + Clone {
layer::mk(move |inner| Self::new(extract.clone(), inner))
}
}
impl<C, N> NewBroadcastClassification<C, (), N> {
/// Returns a [`layer::Layer`] that constructs `NewBroadcastClassification`
/// [`NewService`]s when the target type implements
/// [`linkerd_stack::Param`]`<`[`Tx`]`>`.
pub fn layer() -> impl layer::Layer<N, Service = Self> + Clone {
Self::layer_via(())
}
}
impl<T, C, X, N> NewService<T> for NewBroadcastClassification<C, X, N>
where
C: ClassifyResponse,
X: ExtractParam<Tx<C::Class>, T>,
N: NewService<T>,
{
type Service = BroadcastClassification<C, N::Service>;
fn new_service(&self, target: T) -> Self::Service {
let Tx(tx) = self.extract.extract_param(&target);
let inner = self.inner.new_service(target);
BroadcastClassification::new(tx, inner)
}
}
impl<C, X: Clone, N: Clone> Clone for NewBroadcastClassification<C, X, N> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
extract: self.extract.clone(),
_marker: PhantomData,
}
}
}
// === impl BroadcastClassification === // === impl BroadcastClassification ===
impl<C: ClassifyResponse, S> BroadcastClassification<C, S> { impl<C: ClassifyResponse, S> BroadcastClassification<C, S> {

View File

@ -4,12 +4,12 @@
use linkerd_error::Error; use linkerd_error::Error;
pub use self::{ pub use self::{
channel::BroadcastClassification, channel::{BroadcastClassification, NewBroadcastClassification, Tx},
gate::{NewClassifyGate, NewClassifyGateSet}, gate::{NewClassifyGate, NewClassifyGateSet},
insert::{InsertClassifyResponse, NewInsertClassifyResponse}, insert::{InsertClassifyResponse, NewInsertClassifyResponse},
}; };
mod channel; pub mod channel;
pub mod gate; pub mod gate;
mod insert; mod insert;

View File

@ -10,9 +10,11 @@ publish = { workspace = true }
test-util = [] test-util = []
[dependencies] [dependencies]
bytes = { workspace = true }
futures = { version = "0.3", default-features = false } futures = { version = "0.3", default-features = false }
http = { workspace = true } http = { workspace = true }
http-body = { workspace = true } http-body = { workspace = true }
hyper = { workspace = true, features = ["http1", "http2"] }
parking_lot = "0.12" parking_lot = "0.12"
pin-project = "1" pin-project = "1"
tokio = { version = "1", features = ["time"] } tokio = { version = "1", features = ["time"] }

View File

@ -2,7 +2,7 @@
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
pub use self::{requests::Requests, retries::Retries}; pub use self::{requests::Requests, retries::Retries};
use linkerd_metrics::legacy::SharedStore; use linkerd_metrics::SharedStore;
use parking_lot::Mutex; use parking_lot::Mutex;
use std::{fmt, hash::Hash, time::Duration}; use std::{fmt, hash::Hash, time::Duration};

View File

@ -4,10 +4,7 @@ mod service;
pub use self::service::{NewHttpMetrics, ResponseBody}; pub use self::service::{NewHttpMetrics, ResponseBody};
use super::Report; use super::Report;
use linkerd_http_classify::ClassifyResponse; use linkerd_http_classify::ClassifyResponse;
use linkerd_metrics::{ use linkerd_metrics::{latency, Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics};
latency,
legacy::{Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics},
};
use linkerd_stack::{self as svc, layer}; use linkerd_stack::{self as svc, layer};
use std::{collections::HashMap, fmt::Debug, hash::Hash}; use std::{collections::HashMap, fmt::Debug, hash::Hash};
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};
@ -149,7 +146,7 @@ impl ClassMetrics {
mod tests { mod tests {
#[test] #[test]
fn expiry() { fn expiry() {
use linkerd_metrics::legacy::FmtLabels; use linkerd_metrics::FmtLabels;
use std::fmt; use std::fmt;
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};

View File

@ -1,8 +1,7 @@
use super::{ClassMetrics, Metrics, StatusMetrics}; use super::{ClassMetrics, Metrics, StatusMetrics};
use crate::{Prefixed, Report}; use crate::{Prefixed, Report};
use linkerd_metrics::{ use linkerd_metrics::{
latency, latency, Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store,
legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store},
}; };
use parking_lot::Mutex; use parking_lot::Mutex;
use std::{fmt, hash::Hash}; use std::{fmt, hash::Hash};

View File

@ -3,7 +3,7 @@ use futures::{ready, TryFuture};
use http_body::{Body, Frame}; use http_body::{Body, Frame};
use linkerd_error::Error; use linkerd_error::Error;
use linkerd_http_classify::{ClassifyEos, ClassifyResponse}; use linkerd_http_classify::{ClassifyEos, ClassifyResponse};
use linkerd_metrics::legacy::NewMetrics; use linkerd_metrics::NewMetrics;
use linkerd_stack::Proxy; use linkerd_stack::Proxy;
use parking_lot::Mutex; use parking_lot::Mutex;
use pin_project::{pin_project, pinned_drop}; use pin_project::{pin_project, pinned_drop};

View File

@ -1,5 +1,5 @@
use super::{Prefixed, Registry, Report}; use super::{Prefixed, Registry, Report};
use linkerd_metrics::legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric}; use linkerd_metrics::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
use parking_lot::Mutex; use parking_lot::Mutex;
use std::{fmt, hash::Hash, sync::Arc}; use std::{fmt, hash::Hash, sync::Arc};
use tokio::time::{Duration, Instant}; use tokio::time::{Duration, Instant};

View File

@ -17,6 +17,7 @@ bytes = { workspace = true }
futures = { version = "0.3", default-features = false } futures = { version = "0.3", default-features = false }
http = { workspace = true } http = { workspace = true }
http-body = { workspace = true } http-body = { workspace = true }
parking_lot = "0.12"
pin-project = "1" pin-project = "1"
prometheus-client = { workspace = true } prometheus-client = { workspace = true }
thiserror = "2" thiserror = "2"

View File

@ -14,6 +14,7 @@ http-body-util = { workspace = true }
http = { workspace = true } http = { workspace = true }
parking_lot = "0.12" parking_lot = "0.12"
pin-project = "1" pin-project = "1"
tokio = { version = "1", features = ["macros", "rt"] }
tower = { workspace = true, features = ["retry"] } tower = { workspace = true, features = ["retry"] }
tracing = { workspace = true } tracing = { workspace = true }
thiserror = "2" thiserror = "2"
@ -25,6 +26,7 @@ linkerd-metrics = { path = "../../metrics" }
linkerd-stack = { path = "../../stack" } linkerd-stack = { path = "../../stack" }
[dev-dependencies] [dev-dependencies]
hyper = { workspace = true }
linkerd-tracing = { path = "../../tracing", features = ["ansi"] } linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
linkerd-mock-http-body = { path = "../../mock/http-body" } linkerd-mock-http-body = { path = "../../mock/http-body" }
tokio = { version = "1", features = ["macros", "rt"] } tokio = { version = "1", features = ["macros", "rt"] }

View File

@ -21,3 +21,6 @@ url = "2"
workspace = true workspace = true
features = ["http-route", "grpc-route"] features = ["http-route", "grpc-route"]
optional = true optional = true
[dev-dependencies]
maplit = "1"

View File

@ -10,6 +10,7 @@ Facilities for HTTP/1 upgrades.
""" """
[dependencies] [dependencies]
bytes = { workspace = true }
drain = { workspace = true } drain = { workspace = true }
futures = { version = "0.3", default-features = false } futures = { version = "0.3", default-features = false }
http = { workspace = true } http = { workspace = true }

View File

@ -25,14 +25,14 @@ impl CertMetrics {
let expiry_ts = prom::Gauge::default(); let expiry_ts = prom::Gauge::default();
registry.register_with_unit( registry.register_with_unit(
"expiration_timestamp", "expiration_timestamp",
"Time when this proxy's current mTLS identity certificate will expire (in seconds since the UNIX epoch)", "Time when the this proxy's current mTLS identity certificate will expire (in seconds since the UNIX epoch)",
prom::Unit::Seconds, expiry_ts.clone() prom::Unit::Seconds, expiry_ts.clone()
); );
let refresh_ts = prom::Gauge::default(); let refresh_ts = prom::Gauge::default();
registry.register_with_unit( registry.register_with_unit(
"refresh_timestamp", "refresh_timestamp",
"Time when this proxy's current mTLS identity certificate was last updated", "Time when the this proxy's current mTLS identity certificate were last updated",
prom::Unit::Seconds, prom::Unit::Seconds,
refresh_ts.clone(), refresh_ts.clone(),
); );

View File

@ -10,6 +10,8 @@ publish = { workspace = true }
test-util = [] test-util = []
[dependencies] [dependencies]
futures = { version = "0.3", default-features = false }
linkerd-error = { path = "../error" }
linkerd-stack = { path = "../stack" } linkerd-stack = { path = "../stack" }
parking_lot = "0.12" parking_lot = "0.12"
tokio = { version = "1", default-features = false, features = [ tokio = { version = "1", default-features = false, features = [
@ -26,3 +28,4 @@ tokio = { version = "1", default-features = false, features = [
"test-util", "test-util",
"time", "time",
] } ] }
linkerd-tracing = { path = "../tracing", features = ["ansi"] }

View File

@ -7,32 +7,32 @@ edition = "2018"
publish = { workspace = true } publish = { workspace = true }
[features] [features]
rustls-aws-lc-fips = ["tokio-rustls/fips"] rustls = ["linkerd-meshtls-rustls", "__has_any_tls_impls"]
test-util = ["linkerd-tls-test-util"] rustls-aws-lc = ["rustls", "linkerd-meshtls-rustls/aws-lc"]
rustls-aws-lc-fips = ["rustls-aws-lc", "linkerd-meshtls-rustls/aws-lc-fips"]
rustls-ring = ["rustls", "linkerd-meshtls-rustls/ring"]
boring = ["linkerd-meshtls-boring", "__has_any_tls_impls"]
boring-fips = ["boring", "linkerd-meshtls-boring/fips"]
# Enabled if *any* TLS impl is enabled.
__has_any_tls_impls = []
[dependencies] [dependencies]
futures = { version = "0.3", default-features = false } futures = { version = "0.3", default-features = false }
rustls-pemfile = "2.2" pin-project = "1"
rustls-webpki = { version = "0.103.4", default-features = false, features = ["std", "aws-lc-rs"] }
thiserror = "2"
tokio = { version = "1", features = ["macros", "rt", "sync"] }
tokio-rustls = { workspace = true, features = ["aws-lc-rs"] }
tracing = { workspace = true }
linkerd-dns-name = { path = "../dns/name" } linkerd-dns-name = { path = "../dns/name" }
linkerd-error = { path = "../error" } linkerd-error = { path = "../error" }
linkerd-identity = { path = "../identity" } linkerd-identity = { path = "../identity" }
linkerd-io = { path = "../io" } linkerd-io = { path = "../io" }
linkerd-meshtls-verifier = { path = "verifier" } linkerd-meshtls-boring = { path = "boring", optional = true }
linkerd-rustls = { path = "../rustls" } linkerd-meshtls-rustls = { path = "rustls", optional = true }
linkerd-stack = { path = "../stack" } linkerd-stack = { path = "../stack" }
linkerd-tls = { path = "../tls" } linkerd-tls = { path = "../tls" }
linkerd-tls-test-util = { path = "../tls/test-util", optional = true }
[dev-dependencies] [dev-dependencies]
tokio = { version = "1", features = ["macros", "net", "rt-multi-thread"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread"] }
tracing = { workspace = true } tracing = { workspace = true }
rcgen = { version = "0.14.3", default-features = false, features = ["crypto", "pem", "aws_lc_rs"] } rcgen = "0.13.2"
linkerd-conditional = { path = "../conditional" } linkerd-conditional = { path = "../conditional" }
linkerd-proxy-transport = { path = "../proxy/transport" } linkerd-proxy-transport = { path = "../proxy/transport" }

View File

@ -0,0 +1,30 @@
[package]
name = "linkerd-meshtls-boring"
version = { workspace = true }
authors = { workspace = true }
license = { workspace = true }
edition = "2018"
publish = { workspace = true }
[dependencies]
boring = "4"
futures = { version = "0.3", default-features = false }
hex = "0.4" # used for debug logging
tokio = { version = "1", features = ["macros", "sync"] }
tokio-boring = "4"
tracing = { workspace = true }
linkerd-dns-name = { path = "../../dns/name" }
linkerd-error = { path = "../../error" }
linkerd-identity = { path = "../../identity" }
linkerd-io = { path = "../../io" }
linkerd-meshtls-verifier = { path = "../verifier" }
linkerd-stack = { path = "../../stack" }
linkerd-tls = { path = "../../tls" }
[features]
fips = ["boring/fips"]
[dev-dependencies]
linkerd-tls-test-util = { path = "../../tls/test-util" }
linkerd-meshtls = { path = "../../meshtls" }

View File

@ -0,0 +1,185 @@
use crate::creds::CredsRx;
use linkerd_identity as id;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{NewService, Service};
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef, ServerName};
use std::{future::Future, pin::Pin, sync::Arc, task::Context};
use tracing::{debug, trace};
#[derive(Clone)]
pub struct NewClient(CredsRx);
#[derive(Clone)]
pub struct Connect {
rx: CredsRx,
alpn: Option<Arc<[Vec<u8>]>>,
id: id::Id,
server: ServerName,
}
pub type ConnectFuture<I> = Pin<Box<dyn Future<Output = io::Result<ClientIo<I>>> + Send>>;
#[derive(Debug)]
pub struct ClientIo<I>(tokio_boring::SslStream<I>);
// === impl NewClient ===
impl NewClient {
pub(crate) fn new(rx: CredsRx) -> Self {
Self(rx)
}
}
impl NewService<ClientTls> for NewClient {
type Service = Connect;
fn new_service(&self, target: ClientTls) -> Self::Service {
Connect::new(target, self.0.clone())
}
}
impl std::fmt::Debug for NewClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NewClient").finish()
}
}
// === impl Connect ===
impl Connect {
pub(crate) fn new(client_tls: ClientTls, rx: CredsRx) -> Self {
Self {
rx,
alpn: client_tls.alpn.map(|AlpnProtocols(ps)| ps.into()),
server: client_tls.server_name,
id: client_tls.server_id.into(),
}
}
}
impl<I> Service<I> for Connect
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
{
type Response = ClientIo<I>;
type Error = io::Error;
type Future = ConnectFuture<I>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
fn call(&mut self, io: I) -> Self::Future {
let server_name = self.server.clone();
let server_id = self.id.clone();
let connector = self
.rx
.borrow()
.connector(self.alpn.as_deref().unwrap_or(&[]));
Box::pin(async move {
let config = connector
.map_err(io::Error::other)?
.configure()
.map_err(io::Error::other)?;
// Establish a TLS connection to the server using the provided
// `server_name` as an SNI value to the server.
//
// Hostname verification is DISABLED, as we do not require that the
// peer's certificate actually matches the `server_name`. Instead,
// the `server_id` is used to perform the appropriate form of
// verification after the session is established.
let io = tokio_boring::connect(config.verify_hostname(false), server_name.as_str(), io)
.await
.map_err(|e| match e.as_io_error() {
// TODO(ver) boring should let us take ownership of the error directly.
Some(ioe) => io::Error::new(ioe.kind(), ioe.to_string()),
// XXX(ver) to use the boring error directly here we have to
// constrain the socket on Sync + std::fmt::Debug, which is
// a pain.
None => io::Error::other("unexpected TLS handshake error"),
})?;
// Servers must present a peer certificate. We extract the x509 cert
// and verify it manually against the `server_id`.
let cert = io
.ssl()
.peer_certificate()
.ok_or_else(|| io::Error::other("could not extract peer cert"))?;
let cert_der = id::DerX509(cert.to_der()?);
verifier::verify_id(&cert_der, &server_id)?;
debug!(
tls = io.ssl().version_str(),
client.cert = ?io.ssl().certificate().and_then(super::fingerprint),
peer.cert = ?io.ssl().peer_certificate().as_deref().and_then(super::fingerprint),
alpn = ?io.ssl().selected_alpn_protocol(),
"Initiated TLS connection"
);
trace!(peer.id = %server_id, peer.name = %server_name);
Ok(ClientIo(io))
})
}
}
// === impl ClientIo ===
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I> ClientIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.ssl()
.selected_alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().peer_addr()
}
}

View File

@ -0,0 +1,209 @@
mod receiver;
mod store;
pub use self::{receiver::Receiver, store::Store};
use boring::{
pkey::{PKey, Private},
ssl,
x509::{store::X509StoreBuilder, X509},
};
use linkerd_dns_name as dns;
use linkerd_error::Result;
use linkerd_identity as id;
use std::sync::Arc;
use tokio::sync::watch;
pub fn watch(
local_id: id::Id,
server_name: dns::Name,
roots_pem: &str,
) -> Result<(Store, Receiver)> {
let creds = {
let roots = X509::stack_from_pem(roots_pem.as_bytes())?;
Arc::new(BaseCreds { roots })
};
let (tx, rx) = watch::channel(Creds::from(creds.clone()));
let rx = Receiver::new(local_id.clone(), server_name, rx);
let store = Store::new(creds, local_id, tx);
Ok((store, rx))
}
pub(crate) struct Creds {
base: Arc<BaseCreds>,
certs: Option<Certs>,
}
struct BaseCreds {
roots: Vec<X509>,
}
struct Certs {
leaf: X509,
intermediates: Vec<X509>,
key: PKey<Private>,
}
pub(crate) type CredsRx = watch::Receiver<Creds>;
type CredsTx = watch::Sender<Creds>;
// === impl Creds ===
impl From<Arc<BaseCreds>> for Creds {
fn from(base: Arc<BaseCreds>) -> Self {
Self { base, certs: None }
}
}
impl Creds {
// TODO(ver) Specify certificate types, signing algorithms, cipher suites..
pub(crate) fn acceptor(&self, alpn_protocols: &[Vec<u8>]) -> Result<ssl::SslAcceptor> {
// mozilla_intermediate_v5 is the only variant that enables TLSv1.3, so we use that.
let mut conn = ssl::SslAcceptor::mozilla_intermediate_v5(ssl::SslMethod::tls_server())?;
// Force use of TLSv1.3.
conn.set_options(ssl::SslOptions::NO_TLSV1_2);
conn.clear_options(ssl::SslOptions::NO_TLSV1_3);
let roots = self.root_store()?;
tracing::debug!(
roots = ?self
.base
.roots
.iter()
.filter_map(|c| super::fingerprint(c))
.collect::<Vec<_>>(),
"Configuring acceptor roots",
);
conn.set_cert_store(roots);
// Ensure that client certificates are validated when present.
conn.set_verify(ssl::SslVerifyMode::PEER);
if let Some(certs) = &self.certs {
tracing::debug!(
cert = ?super::fingerprint(&certs.leaf),
"Configuring acceptor certificate",
);
conn.set_private_key(&certs.key)?;
conn.set_certificate(&certs.leaf)?;
conn.check_private_key()?;
for c in &certs.intermediates {
conn.add_extra_chain_cert(c.to_owned())?;
}
}
if !alpn_protocols.is_empty() {
let p = serialize_alpn(alpn_protocols)?;
conn.set_alpn_protos(&p)?;
}
Ok(conn.build())
}
// TODO(ver) Specify certificate types, signing algorithms, cipher suites..
pub(crate) fn connector(&self, alpn_protocols: &[Vec<u8>]) -> Result<ssl::SslConnector> {
// XXX(ver) This function reads from the environment and/or the filesystem. This likely is
// at best wasteful and at worst unsafe (if another thread were to mutate these environment
// variables simultaneously, for instance). Unfortunately, the boring APIs don't really give
// us an alternative AFAICT.
let mut conn = ssl::SslConnector::builder(ssl::SslMethod::tls_client())?;
// Explicitly enable use of TLSv1.3
conn.set_options(ssl::SslOptions::NO_TLSV1 | ssl::SslOptions::NO_TLSV1_1);
// XXX(ver) if we disable use of TLSv1.2, connections just hang.
//conn.set_options(ssl::SslOptions::NO_TLSV1_2);
conn.clear_options(ssl::SslOptions::NO_TLSV1_3);
tracing::debug!(
roots = ?self
.base
.roots
.iter()
.filter_map(|c| super::fingerprint(c))
.collect::<Vec<_>>(),
"Configuring connector roots",
);
let roots = self.root_store()?;
conn.set_cert_store(roots);
if let Some(certs) = &self.certs {
tracing::debug!(
cert = ?super::fingerprint(&certs.leaf),
intermediates = %certs.intermediates.len(),
"Configuring connector certificate",
);
conn.set_private_key(&certs.key)?;
conn.set_certificate(&certs.leaf)?;
conn.check_private_key()?;
for c in &certs.intermediates {
conn.add_extra_chain_cert(c.to_owned())?;
}
}
if !alpn_protocols.is_empty() {
let p = serialize_alpn(alpn_protocols)?;
conn.set_alpn_protos(&p)?;
}
Ok(conn.build())
}
fn root_store(&self) -> Result<boring::x509::store::X509Store> {
let mut store = X509StoreBuilder::new()?;
for c in &self.base.roots {
store.add_cert(c.to_owned())?;
}
Ok(store.build())
}
}
/// Encodes a list of ALPN protocols into a slice of bytes.
///
/// `boring` requires that the list of protocols be encoded in the wire format.
fn serialize_alpn(protocols: &[Vec<u8>]) -> Result<Vec<u8>> {
// Allocate a buffer to hold the encoded protocols.
let mut bytes = {
// One additional byte for each protocol's length prefix.
let cap = protocols.len() + protocols.iter().map(Vec::len).sum::<usize>();
Vec::with_capacity(cap)
};
// Encode each protocol as a length-prefixed string.
for p in protocols {
if p.is_empty() {
continue;
}
if p.len() > 255 {
return Err("ALPN protocols must be less than 256 bytes".into());
}
bytes.push(p.len() as u8);
bytes.extend(p);
}
Ok(bytes)
}
#[cfg(test)]
#[test]
fn test_serialize_alpn() {
assert_eq!(serialize_alpn(&[b"h2".to_vec()]).unwrap(), b"\x02h2");
assert_eq!(
serialize_alpn(&[b"h2".to_vec(), b"http/1.1".to_vec()]).unwrap(),
b"\x02h2\x08http/1.1"
);
assert_eq!(
serialize_alpn(&[b"h2".to_vec(), b"http/1.1".to_vec()]).unwrap(),
b"\x02h2\x08http/1.1"
);
assert_eq!(
serialize_alpn(&[b"h2".to_vec(), vec![], b"http/1.1".to_vec()]).unwrap(),
b"\x02h2\x08http/1.1"
);
assert!(serialize_alpn(&[(0..255).collect()]).is_ok());
assert!(serialize_alpn(&[(0..=255).collect()]).is_err());
}

View File

@ -0,0 +1,45 @@
use super::CredsRx;
use crate::{NewClient, Server};
use linkerd_dns_name as dns;
use linkerd_identity as id;
#[derive(Clone)]
pub struct Receiver {
id: id::Id,
name: dns::Name,
rx: CredsRx,
}
impl Receiver {
pub(crate) fn new(id: id::Id, name: dns::Name, rx: CredsRx) -> Self {
Self { id, name, rx }
}
/// Returns the local identity.
pub fn local_id(&self) -> &id::Id {
&self.id
}
/// Returns the mTLS Server Name.
pub fn server_name(&self) -> &dns::Name {
&self.name
}
/// Returns a `NewClient` that can be used to establish TLS on client connections.
pub fn new_client(&self) -> NewClient {
NewClient::new(self.rx.clone())
}
/// Returns a `Server` that can be used to terminate TLS on server connections.
pub fn server(&self) -> Server {
Server::new(self.name.clone(), self.rx.clone())
}
}
impl std::fmt::Debug for Receiver {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Receiver")
.field("name", &self.name)
.finish()
}
}

View File

@ -0,0 +1,72 @@
use super::{BaseCreds, Certs, Creds, CredsTx};
use boring::pkey::PKey;
use boring::x509::{X509StoreContext, X509};
use linkerd_error::Result;
use linkerd_identity as id;
use linkerd_meshtls_verifier as verifier;
use std::sync::Arc;
pub struct Store {
creds: Arc<BaseCreds>,
id: id::Id,
tx: CredsTx,
}
// === impl Store ===
impl Store {
pub(super) fn new(creds: Arc<BaseCreds>, id: id::Id, tx: CredsTx) -> Self {
Self { creds, id, tx }
}
}
impl id::Credentials for Store {
/// Publishes TLS client and server configurations using
fn set_certificate(
&mut self,
id::DerX509(leaf_der): id::DerX509,
intermediates: Vec<id::DerX509>,
key_pkcs8: Vec<u8>,
_expiry: std::time::SystemTime,
) -> Result<()> {
let leaf = X509::from_der(&leaf_der)?;
verifier::verify_id(&leaf_der, &self.id)?;
let intermediates = intermediates
.into_iter()
.map(|id::DerX509(der)| X509::from_der(&der).map_err(Into::into))
.collect::<Result<Vec<_>>>()?;
let key = PKey::private_key_from_pkcs8(&key_pkcs8)?;
let creds = Creds {
base: self.creds.clone(),
certs: Some(Certs {
leaf,
intermediates,
key,
}),
};
let mut context = X509StoreContext::new()?;
let roots = creds.root_store()?;
let mut chain = boring::stack::Stack::new()?;
for i in &creds.certs.as_ref().unwrap().intermediates {
chain.push(i.to_owned())?;
}
let init = {
let leaf = &creds.certs.as_ref().unwrap().leaf;
context.init(&roots, leaf, &chain, |c| c.verify_cert())?
};
if !init {
return Err("certificate could not be validated against the trust chain".into());
}
// If receivers are dropped, we don't return an error (as this would likely cause the
// updater to retry more aggressively). It's fine to silently ignore these errors.
let _ = self.tx.send(creds);
Ok(())
}
}

View File

@ -0,0 +1,40 @@
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
#![forbid(unsafe_code)]
//! This crate provides an implementation of _meshtls_ backed by `boringssl` (as
//! provided by <https://github.com/cloudflare/boring>).
//!
//! There are several caveats with the current implementation:
//!
//! In its current form, this crate is compatible with the `meshtls-rustls`
//! implementation, which requires of ECDSA-P256-SHA256 keys & signature
//! algorithms. This crate doesn't actually constrain the algorithms beyond the
//! Mozilla's 'intermediate' (v5) [defaults][defaults]. But, the goal for
//! supporting `boring` is to provide a FIPS 140-2 compliant mode. There's a
//! [PR][fips-pr] that implements this, but code changes will likely be required
//! to enable this once it's merged/released.
//!
//! A new SSL context is created for each connection. This is probably
//! unnecessary, but it's simpler for now. We can revisit this if needed.
//!
//! This module is not enabled by default. See the `linkerd-meshtls` and
//! `linkerd2-proxy` crates for more information.
//!
//! [defaults]: https://wiki.mozilla.org/Security/Server_Side_TLS
//! [fips-pr]: https://github.com/cloudflare/boring/pull/52
mod client;
pub mod creds;
mod server;
#[cfg(test)]
mod tests;
pub use self::{
client::{ClientIo, Connect, ConnectFuture, NewClient},
server::{Server, ServerIo, TerminateFuture},
};
fn fingerprint(c: &boring::x509::X509Ref) -> Option<String> {
let digest = c.digest(boring::hash::MessageDigest::sha256()).ok()?;
Some(hex::encode(digest)[0..8].to_string())
}

View File

@ -0,0 +1,180 @@
use crate::creds::CredsRx;
use linkerd_dns_name as dns;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{Param, Service};
use linkerd_tls::{ClientId, NegotiatedProtocol, ServerName, ServerTls};
use std::{future::Future, pin::Pin, sync::Arc, task::Context};
use tracing::debug;
#[derive(Clone)]
pub struct Server {
name: dns::Name,
rx: CredsRx,
alpn: Option<Arc<[Vec<u8>]>>,
}
pub type TerminateFuture<I> =
Pin<Box<dyn Future<Output = io::Result<(ServerTls, ServerIo<I>)>> + Send>>;
#[derive(Debug)]
pub struct ServerIo<I>(tokio_boring::SslStream<I>);
// === impl Server ===
impl Server {
pub(crate) fn new(name: dns::Name, rx: CredsRx) -> Self {
Self {
name,
rx,
alpn: None,
}
}
pub fn with_alpn(mut self, alpn_protocols: Vec<Vec<u8>>) -> Self {
self.alpn = if alpn_protocols.is_empty() {
None
} else {
Some(alpn_protocols.into())
};
self
}
}
impl Param<ServerName> for Server {
fn param(&self) -> ServerName {
ServerName(self.name.clone())
}
}
impl<I> Service<I> for Server
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
{
type Response = (ServerTls, ServerIo<I>);
type Error = std::io::Error;
type Future = TerminateFuture<I>;
#[inline]
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
fn call(&mut self, io: I) -> Self::Future {
// TODO(ver) we should avoid creating a new context for each connection.
let acceptor = self
.rx
.borrow()
.acceptor(self.alpn.as_deref().unwrap_or(&[]));
Box::pin(async move {
let acc = acceptor.map_err(io::Error::other)?;
let io = tokio_boring::accept(&acc, io)
.await
.map(ServerIo)
.map_err(|e| match e.as_io_error() {
Some(ioe) => io::Error::new(ioe.kind(), ioe.to_string()),
// XXX(ver) to use the boring error directly here we have to constraint the
// socket on Sync + std::fmt::Debug, which is a pain.
None => io::Error::other("unexpected TLS handshake error"),
})?;
let client_id = io.client_identity();
let negotiated_protocol = io.negotiated_protocol();
debug!(
tls = io.0.ssl().version_str(),
srv.cert = ?io.0.ssl().certificate().and_then(super::fingerprint),
peer.cert = ?io.0.ssl().peer_certificate().as_deref().and_then(super::fingerprint),
client.id = ?client_id,
alpn = ?negotiated_protocol,
"Accepted TLS connection"
);
let tls = ServerTls::Established {
client_id,
negotiated_protocol,
};
Ok((tls, io))
})
}
}
// === impl ServerIo ===
impl<I> ServerIo<I> {
#[inline]
fn negotiated_protocol(&self) -> Option<NegotiatedProtocol> {
self.0
.ssl()
.selected_alpn_protocol()
.map(|p| NegotiatedProtocol(p.to_vec()))
}
fn client_identity(&self) -> Option<ClientId> {
match self.0.ssl().peer_certificate() {
Some(cert) => {
let der = cert
.to_der()
.map_err(
|error| tracing::warn!(%error, "Failed to encode client end cert to der"),
)
.ok()?;
verifier::client_identity(&der).map(ClientId)
}
None => {
debug!("Connection missing peer certificate");
None
}
}
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().peer_addr()
}
}

View File

@ -0,0 +1,36 @@
[package]
name = "linkerd-meshtls-rustls"
version = { workspace = true }
authors = { workspace = true }
license = { workspace = true }
edition = "2018"
publish = { workspace = true }
[features]
default = ["ring"]
ring = ["tokio-rustls/ring", "rustls-webpki/ring"]
aws-lc = ["tokio-rustls/aws-lc-rs", "rustls-webpki/aws-lc-rs"]
aws-lc-fips = ["aws-lc", "tokio-rustls/fips"]
test-util = ["linkerd-tls-test-util"]
[dependencies]
futures = { version = "0.3", default-features = false }
ring = { version = "0.17", features = ["std"] }
rustls-pemfile = "2.2"
rustls-webpki = { version = "0.103.1", default-features = false, features = ["std"] }
thiserror = "2"
tokio = { version = "1", features = ["macros", "rt", "sync"] }
tokio-rustls = { workspace = true }
tracing = { workspace = true }
linkerd-dns-name = { path = "../../dns/name" }
linkerd-error = { path = "../../error" }
linkerd-io = { path = "../../io" }
linkerd-identity = { path = "../../identity" }
linkerd-stack = { path = "../../stack" }
linkerd-tls = { path = "../../tls" }
linkerd-tls-test-util = { path = "../../tls/test-util", optional = true }
linkerd-meshtls-verifier = { path = "../verifier" }
[dev-dependencies]
linkerd-tls-test-util = { path = "../../tls/test-util" }

View File

@ -0,0 +1,11 @@
#[cfg(feature = "aws-lc")]
mod aws_lc;
#[cfg(feature = "ring")]
mod ring;
#[cfg(feature = "aws-lc")]
pub use aws_lc::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
#[cfg(all(not(feature = "aws-lc"), feature = "ring"))]
pub use ring::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
#[cfg(all(not(feature = "aws-lc"), not(feature = "ring")))]
compile_error!("No rustls backend enabled. Enabled one of the \"ring\" or \"aws-lc\" features");

View File

@ -0,0 +1,74 @@
pub use aws_lc_rs::default_provider;
use tokio_rustls::rustls::{
self,
crypto::{aws_lc_rs, WebPkiSupportedAlgorithms},
};
#[cfg(not(feature = "aws-lc-fips"))]
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
aws_lc_rs::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
];
// Prefer aes-256-gcm if fips is enabled, with chaha20-poly1305 as a fallback
#[cfg(feature = "aws-lc-fips")]
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
aws_lc_rs::cipher_suite::TLS13_AES_256_GCM_SHA384,
aws_lc_rs::cipher_suite::TLS13_AES_128_GCM_SHA256,
aws_lc_rs::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
];
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
all: &[
webpki::aws_lc_rs::ECDSA_P256_SHA256,
webpki::aws_lc_rs::ECDSA_P256_SHA384,
webpki::aws_lc_rs::ECDSA_P384_SHA256,
webpki::aws_lc_rs::ECDSA_P384_SHA384,
webpki::aws_lc_rs::ECDSA_P521_SHA256,
webpki::aws_lc_rs::ECDSA_P521_SHA384,
webpki::aws_lc_rs::ECDSA_P521_SHA512,
webpki::aws_lc_rs::ED25519,
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA256,
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA384,
webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA512,
webpki::aws_lc_rs::RSA_PKCS1_3072_8192_SHA384,
],
mapping: &[
// Note: for TLS1.2 the curve is not fixed by SignatureScheme. For TLS1.3 it is.
(
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
&[
webpki::aws_lc_rs::ECDSA_P384_SHA384,
webpki::aws_lc_rs::ECDSA_P256_SHA384,
webpki::aws_lc_rs::ECDSA_P521_SHA384,
],
),
(
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
&[
webpki::aws_lc_rs::ECDSA_P256_SHA256,
webpki::aws_lc_rs::ECDSA_P384_SHA256,
webpki::aws_lc_rs::ECDSA_P521_SHA256,
],
),
(
rustls::SignatureScheme::ECDSA_NISTP521_SHA512,
&[webpki::aws_lc_rs::ECDSA_P521_SHA512],
),
(
rustls::SignatureScheme::ED25519,
&[webpki::aws_lc_rs::ED25519],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA512,
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA512],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA384,
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA384],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA256,
&[webpki::aws_lc_rs::RSA_PKCS1_2048_8192_SHA256],
),
],
};

View File

@ -0,0 +1,55 @@
pub use ring::default_provider;
use tokio_rustls::rustls::{
self,
crypto::{ring, WebPkiSupportedAlgorithms},
};
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
ring::cipher_suite::TLS13_AES_128_GCM_SHA256,
ring::cipher_suite::TLS13_AES_256_GCM_SHA384,
];
// A subset of the algorithms supported by rustls+ring, imported from
// https://github.com/rustls/rustls/blob/v/0.23.21/rustls/src/crypto/ring/mod.rs#L107
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
all: &[
webpki::ring::ECDSA_P256_SHA256,
webpki::ring::ECDSA_P256_SHA384,
webpki::ring::ECDSA_P384_SHA256,
webpki::ring::ECDSA_P384_SHA384,
webpki::ring::ED25519,
webpki::ring::RSA_PKCS1_2048_8192_SHA256,
webpki::ring::RSA_PKCS1_2048_8192_SHA384,
webpki::ring::RSA_PKCS1_2048_8192_SHA512,
webpki::ring::RSA_PKCS1_3072_8192_SHA384,
],
mapping: &[
(
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
&[
webpki::ring::ECDSA_P384_SHA384,
webpki::ring::ECDSA_P256_SHA384,
],
),
(
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
&[
webpki::ring::ECDSA_P256_SHA256,
webpki::ring::ECDSA_P384_SHA256,
],
),
(rustls::SignatureScheme::ED25519, &[webpki::ring::ED25519]),
(
rustls::SignatureScheme::RSA_PKCS1_SHA512,
&[webpki::ring::RSA_PKCS1_2048_8192_SHA512],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA384,
&[webpki::ring::RSA_PKCS1_2048_8192_SHA384],
),
(
rustls::SignatureScheme::RSA_PKCS1_SHA256,
&[webpki::ring::RSA_PKCS1_2048_8192_SHA256],
),
],
};

View File

@ -0,0 +1,184 @@
use futures::prelude::*;
use linkerd_identity as id;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{NewService, Service};
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef};
use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context};
use tokio::sync::watch;
use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig};
/// A `NewService` that produces `Connect` services from a dynamic TLS configuration.
#[derive(Clone)]
pub struct NewClient {
config: watch::Receiver<Arc<ClientConfig>>,
}
/// A `Service` that initiates client-side TLS connections.
#[derive(Clone)]
pub struct Connect {
server_id: id::Id,
server_name: rustls::pki_types::ServerName<'static>,
config: Arc<ClientConfig>,
}
pub type ConnectFuture<I> = Pin<Box<dyn Future<Output = io::Result<ClientIo<I>>> + Send>>;
#[derive(Debug)]
pub struct ClientIo<I>(tokio_rustls::client::TlsStream<I>);
// === impl NewClient ===
impl NewClient {
pub(crate) fn new(config: watch::Receiver<Arc<ClientConfig>>) -> Self {
Self { config }
}
}
impl NewService<ClientTls> for NewClient {
type Service = Connect;
fn new_service(&self, target: ClientTls) -> Self::Service {
Connect::new(target, (*self.config.borrow()).clone())
}
}
impl std::fmt::Debug for NewClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NewClient").finish()
}
}
// === impl Connect ===
impl Connect {
pub(crate) fn new(client_tls: ClientTls, config: Arc<ClientConfig>) -> Self {
// If ALPN protocols are configured by the endpoint, we have to clone the entire
// configuration and set the protocols. If there are no ALPN options, clone the Arc'd base
// configuration without extra allocation.
//
// TODO it would be better to avoid cloning the whole TLS config per-connection, but the
// Rustls API doesn't give us a lot of options.
let config = match client_tls.alpn {
None => config,
Some(AlpnProtocols(protocols)) => {
let mut c = (*config).clone();
c.alpn_protocols = protocols;
Arc::new(c)
}
};
let server_name =
rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string())
.expect("identity must be a valid DNS name");
Self {
server_id: client_tls.server_id.into(),
server_name,
config,
}
}
}
fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> {
match c.peer_certificates().and_then(|certs| certs.first()) {
Some(leaf_cert) => io::Result::Ok(leaf_cert),
None => Err(io::Error::other("missing tls end cert")),
}
}
impl<I> Service<I> for Connect
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
{
type Response = ClientIo<I>;
type Error = io::Error;
type Future = ConnectFuture<I>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
fn call(&mut self, io: I) -> Self::Future {
let server_id = self.server_id.clone();
Box::pin(
// Connect to the server, sending the `server_name` SNI in the
// client handshake. The provided config should use the
// `AnySanVerifier` to ignore the server certificate's DNS SANs.
// Instead, we extract the server's leaf certificate after the
// handshake and verify that it matches the provided `server_id``.
tokio_rustls::TlsConnector::from(self.config.clone())
// XXX(eliza): it's a bummer that the server name has to be cloned here...
.connect(self.server_name.clone(), io)
.map(move |s| {
let s = s?;
let (_, conn) = s.get_ref();
let end_cert = extract_cert(conn)?;
verifier::verify_id(end_cert, &server_id)?;
Ok(ClientIo(s))
}),
)
}
}
// === impl ClientIo ===
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I> ClientIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.get_ref()
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().0.peer_addr()
}
}

View File

@ -0,0 +1,131 @@
mod receiver;
mod store;
pub(crate) mod verify;
use crate::backend;
pub use self::{receiver::Receiver, store::Store};
use linkerd_dns_name as dns;
use linkerd_error::Result;
use linkerd_identity as id;
use ring::error::KeyRejected;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{self, crypto::CryptoProvider};
use tracing::warn;
#[derive(Debug, Error)]
#[error("{0}")]
pub struct InvalidKey(#[source] KeyRejected);
#[derive(Debug, Error)]
#[error("invalid trust roots")]
pub struct InvalidTrustRoots(());
pub fn watch(
local_id: id::Id,
server_name: dns::Name,
roots_pem: &str,
) -> Result<(Store, Receiver)> {
let mut roots = rustls::RootCertStore::empty();
let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem))
.collect::<Result<Vec<_>, _>>()
{
Err(error) => {
warn!(%error, "invalid trust anchors file");
return Err(error.into());
}
Ok(certs) if certs.is_empty() => {
warn!("no valid certs in trust anchors file");
return Err("no trust roots in PEM file".into());
}
Ok(certs) => certs,
};
let (added, skipped) = roots.add_parsable_certificates(certs);
if skipped != 0 {
warn!("Skipped {} invalid trust anchors", skipped);
}
if added == 0 {
return Err("no trust roots loaded".into());
}
// XXX: Rustls's built-in verifiers don't let us tweak things as fully as we'd like (e.g.
// controlling the set of trusted signature algorithms), but they provide good enough
// defaults for now.
// TODO: lock down the verification further.
let server_cert_verifier = Arc::new(verify::AnySanVerifier::new(roots.clone()));
let (client_tx, client_rx) = {
// Since we don't have a certificate yet, build a client configuration
// that doesn't attempt client authentication. Once we get a
// certificate, the `Store` will publish a new configuration with a
// client certificate resolver.
let mut c =
store::client_config_builder(server_cert_verifier.clone()).with_no_client_auth();
// Disable session resumption for the time-being until resumption is
// more tested.
c.resumption = rustls::client::Resumption::disabled();
watch::channel(Arc::new(c))
};
let (server_tx, server_rx) = {
// Since we don't have a certificate yet, use an empty cert resolver so
// that handshaking always fails. Once we get a certificate, the `Store`
// will publish a new configuration with a server certificate resolver.
let empty_resolver = Arc::new(rustls::server::ResolvesServerCertUsingSni::new());
watch::channel(store::server_config(roots.clone(), empty_resolver))
};
let rx = Receiver::new(local_id.clone(), server_name.clone(), client_rx, server_rx);
let store = Store::new(
roots,
server_cert_verifier,
local_id,
server_name,
client_tx,
server_tx,
);
Ok((store, rx))
}
fn default_provider() -> CryptoProvider {
let mut provider = backend::default_provider();
provider.cipher_suites = params::TLS_SUPPORTED_CIPHERSUITES.to_vec();
provider
}
#[cfg(feature = "test-util")]
pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) {
watch(
ent.name.parse().expect("id must be valid"),
ent.name.parse().expect("name must be valid"),
std::str::from_utf8(ent.trust_anchors).expect("roots must be PEM"),
)
.expect("credentials must be valid")
}
#[cfg(feature = "test-util")]
pub fn default_for_test() -> (Store, Receiver) {
for_test(&linkerd_tls_test_util::FOO_NS1)
}
mod params {
use crate::backend;
use tokio_rustls::rustls::{self, crypto::WebPkiSupportedAlgorithms};
// These must be kept in sync:
pub static SIGNATURE_ALG_RING_SIGNING: &ring::signature::EcdsaSigningAlgorithm =
&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING;
pub const SIGNATURE_ALG_RUSTLS_SCHEME: rustls::SignatureScheme =
rustls::SignatureScheme::ECDSA_NISTP256_SHA256;
pub const SIGNATURE_ALG_RUSTLS_ALGORITHM: rustls::SignatureAlgorithm =
rustls::SignatureAlgorithm::ECDSA;
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = backend::SUPPORTED_SIG_ALGS;
pub static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13];
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] =
backend::TLS_SUPPORTED_CIPHERSUITES;
}

View File

@ -70,7 +70,7 @@ mod tests {
/// incoming handshakes, but that doesn't matter for these tests, where we /// incoming handshakes, but that doesn't matter for these tests, where we
/// don't actually do any TLS. /// don't actually do any TLS.
fn empty_server_config() -> rustls::ServerConfig { fn empty_server_config() -> rustls::ServerConfig {
rustls::ServerConfig::builder_with_provider(linkerd_rustls::get_default_provider()) rustls::ServerConfig::builder_with_provider(Arc::new(crate::backend::default_provider()))
.with_protocol_versions(rustls::ALL_VERSIONS) .with_protocol_versions(rustls::ALL_VERSIONS)
.expect("client config must be valid") .expect("client config must be valid")
.with_client_cert_verifier(Arc::new(rustls::server::NoClientAuth)) .with_client_cert_verifier(Arc::new(rustls::server::NoClientAuth))
@ -83,7 +83,7 @@ mod tests {
/// it doesn't trust any root certificates. However, that doesn't actually /// it doesn't trust any root certificates. However, that doesn't actually
/// matter for these tests, which don't actually do TLS. /// matter for these tests, which don't actually do TLS.
fn empty_client_config() -> rustls::ClientConfig { fn empty_client_config() -> rustls::ClientConfig {
rustls::ClientConfig::builder_with_provider(linkerd_rustls::get_default_provider()) rustls::ClientConfig::builder_with_provider(Arc::new(crate::backend::default_provider()))
.with_protocol_versions(rustls::ALL_VERSIONS) .with_protocol_versions(rustls::ALL_VERSIONS)
.expect("client config must be valid") .expect("client config must be valid")
.with_root_certificates(rustls::RootCertStore::empty()) .with_root_certificates(rustls::RootCertStore::empty())

View File

@ -1,15 +1,12 @@
use super::{default_provider, params::*, InvalidKey};
use linkerd_dns_name as dns; use linkerd_dns_name as dns;
use linkerd_error::Result; use linkerd_error::Result;
use linkerd_identity as id; use linkerd_identity as id;
use linkerd_meshtls_verifier as verifier; use linkerd_meshtls_verifier as verifier;
use ring::{rand, signature::EcdsaKeyPair};
use std::{convert::TryFrom, sync::Arc}; use std::{convert::TryFrom, sync::Arc};
use tokio::sync::watch; use tokio::sync::watch;
use tokio_rustls::rustls::{ use tokio_rustls::rustls::{self, pki_types::UnixTime, server::WebPkiClientVerifier};
self,
pki_types::{PrivatePkcs8KeyDer, UnixTime},
server::WebPkiClientVerifier,
sign::CertifiedKey,
};
use tracing::debug; use tracing::debug;
pub struct Store { pub struct Store {
@ -19,16 +16,20 @@ pub struct Store {
server_name: dns::Name, server_name: dns::Name,
client_tx: watch::Sender<Arc<rustls::ClientConfig>>, client_tx: watch::Sender<Arc<rustls::ClientConfig>>,
server_tx: watch::Sender<Arc<rustls::ServerConfig>>, server_tx: watch::Sender<Arc<rustls::ServerConfig>>,
random: ring::rand::SystemRandom,
} }
#[derive(Clone, Debug)]
struct Key(Arc<EcdsaKeyPair>);
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
struct CertResolver(Arc<rustls::sign::CertifiedKey>); struct CertResolver(Arc<rustls::sign::CertifiedKey>);
pub(super) fn client_config_builder( pub(super) fn client_config_builder(
cert_verifier: Arc<dyn rustls::client::danger::ServerCertVerifier>, cert_verifier: Arc<dyn rustls::client::danger::ServerCertVerifier>,
) -> rustls::ConfigBuilder<rustls::ClientConfig, rustls::client::WantsClientCert> { ) -> rustls::ConfigBuilder<rustls::ClientConfig, rustls::client::WantsClientCert> {
rustls::ClientConfig::builder_with_provider(linkerd_rustls::get_default_provider()) rustls::ClientConfig::builder_with_provider(Arc::new(default_provider()))
.with_protocol_versions(linkerd_rustls::TLS_VERSIONS) .with_protocol_versions(TLS_VERSIONS)
.expect("client config must be valid") .expect("client config must be valid")
// XXX: Rustls's built-in verifiers don't let us tweak things as fully // XXX: Rustls's built-in verifiers don't let us tweak things as fully
// as we'd like (e.g. controlling the set of trusted signature // as we'd like (e.g. controlling the set of trusted signature
@ -54,7 +55,7 @@ pub(super) fn server_config(
// controlling the set of trusted signature algorithms), but they provide good enough // controlling the set of trusted signature algorithms), but they provide good enough
// defaults for now. // defaults for now.
// TODO: lock down the verification further. // TODO: lock down the verification further.
let provider = linkerd_rustls::get_default_provider(); let provider = Arc::new(default_provider());
let client_cert_verifier = let client_cert_verifier =
WebPkiClientVerifier::builder_with_provider(Arc::new(roots), provider.clone()) WebPkiClientVerifier::builder_with_provider(Arc::new(roots), provider.clone())
@ -63,7 +64,7 @@ pub(super) fn server_config(
.expect("server verifier must be valid"); .expect("server verifier must be valid");
rustls::ServerConfig::builder_with_provider(provider) rustls::ServerConfig::builder_with_provider(provider)
.with_protocol_versions(linkerd_rustls::TLS_VERSIONS) .with_protocol_versions(TLS_VERSIONS)
.expect("server config must be valid") .expect("server config must be valid")
.with_client_cert_verifier(client_cert_verifier) .with_client_cert_verifier(client_cert_verifier)
.with_cert_resolver(resolver) .with_cert_resolver(resolver)
@ -89,6 +90,7 @@ impl Store {
server_name, server_name,
client_tx, client_tx,
server_tx, server_tx,
random: ring::rand::SystemRandom::new(),
} }
} }
@ -145,11 +147,13 @@ impl id::Credentials for Store {
// Use the client's verifier to validate the certificate for our local name. // Use the client's verifier to validate the certificate for our local name.
self.validate(&chain)?; self.validate(&chain)?;
let key_der = PrivatePkcs8KeyDer::from(key); let key = EcdsaKeyPair::from_pkcs8(SIGNATURE_ALG_RING_SIGNING, &key, &self.random)
let provider = rustls::crypto::CryptoProvider::get_default() .map_err(InvalidKey)?;
.expect("Failed to get default crypto provider");
let key = CertifiedKey::from_der(chain, key_der.into(), provider)?; let resolver = Arc::new(CertResolver(Arc::new(rustls::sign::CertifiedKey::new(
let resolver = Arc::new(CertResolver(Arc::new(key))); chain,
Arc::new(Key(Arc::new(key))),
))));
// Build new client and server TLS configs. // Build new client and server TLS configs.
let client = self.client_config(resolver.clone()); let client = self.client_config(resolver.clone());
@ -163,6 +167,39 @@ impl id::Credentials for Store {
} }
} }
// === impl Key ===
impl rustls::sign::SigningKey for Key {
fn choose_scheme(
&self,
offered: &[rustls::SignatureScheme],
) -> Option<Box<dyn rustls::sign::Signer>> {
if !offered.contains(&SIGNATURE_ALG_RUSTLS_SCHEME) {
return None;
}
Some(Box::new(self.clone()))
}
fn algorithm(&self) -> rustls::SignatureAlgorithm {
SIGNATURE_ALG_RUSTLS_ALGORITHM
}
}
impl rustls::sign::Signer for Key {
fn sign(&self, message: &[u8]) -> Result<Vec<u8>, rustls::Error> {
let rng = rand::SystemRandom::new();
self.0
.sign(&rng, message)
.map(|signature| signature.as_ref().to_owned())
.map_err(|ring::error::Unspecified| rustls::Error::General("Signing Failed".to_owned()))
}
fn scheme(&self) -> rustls::SignatureScheme {
SIGNATURE_ALG_RUSTLS_SCHEME
}
}
// === impl CertResolver === // === impl CertResolver ===
impl CertResolver { impl CertResolver {
@ -171,7 +208,7 @@ impl CertResolver {
&self, &self,
sigschemes: &[rustls::SignatureScheme], sigschemes: &[rustls::SignatureScheme],
) -> Option<Arc<rustls::sign::CertifiedKey>> { ) -> Option<Arc<rustls::sign::CertifiedKey>> {
if !sigschemes.contains(&linkerd_rustls::SIGNATURE_ALG_RUSTLS_SCHEME) { if !sigschemes.contains(&SIGNATURE_ALG_RUSTLS_SCHEME) {
debug!("Signature scheme not supported -> no certificate"); debug!("Signature scheme not supported -> no certificate");
return None; return None;
} }

View File

@ -1,4 +1,4 @@
use linkerd_rustls::SUPPORTED_SIG_ALGS; use crate::creds::params::SUPPORTED_SIG_ALGS;
use std::{convert::TryFrom, sync::Arc}; use std::{convert::TryFrom, sync::Arc};
use tokio_rustls::rustls::{ use tokio_rustls::rustls::{
self, self,

View File

@ -0,0 +1,14 @@
#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)]
#![forbid(unsafe_code)]
mod backend;
mod client;
pub mod creds;
mod server;
#[cfg(test)]
mod tests;
pub use self::{
client::{ClientIo, Connect, ConnectFuture, NewClient},
server::{Server, ServerIo, TerminateFuture},
};

View File

@ -0,0 +1,197 @@
use futures::prelude::*;
use linkerd_dns_name as dns;
use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{Param, Service};
use linkerd_tls::{ClientId, NegotiatedProtocol, NegotiatedProtocolRef, ServerName, ServerTls};
use std::{pin::Pin, sync::Arc, task::Context};
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{pki_types::CertificateDer, ServerConfig};
use tracing::debug;
/// A Service that terminates TLS connections using a dynamically updated server configuration.
#[derive(Clone)]
pub struct Server {
name: dns::Name,
rx: watch::Receiver<Arc<ServerConfig>>,
}
pub type TerminateFuture<I> = futures::future::MapOk<
tokio_rustls::Accept<I>,
fn(tokio_rustls::server::TlsStream<I>) -> (ServerTls, ServerIo<I>),
>;
#[derive(Debug)]
pub struct ServerIo<I>(tokio_rustls::server::TlsStream<I>);
#[derive(Debug, Error)]
#[error("credential store lost")]
pub struct LostStore(());
impl Server {
pub(crate) fn new(name: dns::Name, rx: watch::Receiver<Arc<ServerConfig>>) -> Self {
Self { name, rx }
}
#[cfg(test)]
pub(crate) fn config(&self) -> Arc<ServerConfig> {
(*self.rx.borrow()).clone()
}
/// Spawns a background task that watches for TLS configuration updates and creates an augmented
/// configuration with the provided ALPN protocols. The returned server uses this ALPN-aware
/// configuration.
pub fn spawn_with_alpn(self, alpn_protocols: Vec<Vec<u8>>) -> Result<Self, LostStore> {
if alpn_protocols.is_empty() {
return Ok(self);
}
let mut orig_rx = self.rx;
let mut c = (**orig_rx.borrow_and_update()).clone();
c.alpn_protocols.clone_from(&alpn_protocols);
let (tx, rx) = watch::channel(c.into());
// Spawn a background task that watches the optional server configuration and publishes it
// as a reliable channel, including any ALPN overrides.
//
// The background task completes when the original sender is closed or when all receivers
// are dropped.
tokio::spawn(async move {
loop {
tokio::select! {
_ = tx.closed() => {
debug!("ALPN TLS config receivers dropped");
return;
}
res = orig_rx.changed() => {
if res.is_err() {
debug!("TLS config sender closed");
return;
}
}
}
let mut c = (*orig_rx.borrow().clone()).clone();
c.alpn_protocols.clone_from(&alpn_protocols);
let _ = tx.send(c.into());
}
});
Ok(Self::new(self.name, rx))
}
}
impl Param<ServerName> for Server {
fn param(&self) -> ServerName {
ServerName(self.name.clone())
}
}
impl<I> Service<I> for Server
where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin,
{
type Response = (ServerTls, ServerIo<I>);
type Error = std::io::Error;
type Future = TerminateFuture<I>;
#[inline]
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> {
io::Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, io: I) -> Self::Future {
tokio_rustls::TlsAcceptor::from((*self.rx.borrow()).clone())
.accept(io)
.map_ok(|io| {
// Determine the peer's identity, if it exist.
let client_id = client_identity(&io);
let negotiated_protocol = io
.get_ref()
.1
.alpn_protocol()
.map(|b| NegotiatedProtocol(b.into()));
debug!(client.id = ?client_id, alpn = ?negotiated_protocol, "Accepted TLS connection");
let tls = ServerTls::Established {
client_id,
negotiated_protocol,
};
(tls, ServerIo(io))
})
}
}
fn client_identity<I>(tls: &tokio_rustls::server::TlsStream<I>) -> Option<ClientId> {
let (_io, session) = tls.get_ref();
let certs = session.peer_certificates()?;
let c = certs.first().map(CertificateDer::as_ref)?;
verifier::client_identity(c).map(ClientId)
}
// === impl ServerIo ===
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ServerIo<I> {
#[inline]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ServerIo<I> {
#[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx)
}
#[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
#[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
}
impl<I> ServerIo<I> {
#[inline]
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> {
self.0
.get_ref()
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
}
}
impl<I: io::PeerAddr> io::PeerAddr for ServerIo<I> {
#[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().0.peer_addr()
}
}

View File

@ -0,0 +1,50 @@
use linkerd_identity::{Credentials, DerX509};
use linkerd_tls_test_util::*;
use std::time::{Duration, SystemTime};
fn load(ent: &Entity) -> crate::creds::Store {
let roots_pem = std::str::from_utf8(ent.trust_anchors).expect("valid PEM");
let (store, _) = crate::creds::watch(
ent.name.parse().unwrap(),
ent.name.parse().unwrap(),
roots_pem,
)
.expect("credentials must be readable");
store
}
#[test]
fn can_construct_client_and_server_config_from_valid_settings() {
assert!(load(&FOO_NS1)
.set_certificate(
DerX509(FOO_NS1.crt.to_vec()),
vec![],
FOO_NS1.key.to_vec(),
SystemTime::now() + Duration::from_secs(1000)
)
.is_ok());
}
#[test]
fn recognize_ca_did_not_issue_cert() {
assert!(load(&FOO_NS1_CA2)
.set_certificate(
DerX509(FOO_NS1.crt.to_vec()),
vec![],
FOO_NS1.key.to_vec(),
SystemTime::now() + Duration::from_secs(1000)
)
.is_err());
}
#[test]
fn recognize_cert_is_not_valid_for_identity() {
assert!(load(&BAR_NS1)
.set_certificate(
DerX509(FOO_NS1.crt.to_vec()),
vec![],
FOO_NS1.key.to_vec(),
SystemTime::now() + Duration::from_secs(1000)
)
.is_err());
}

View File

@ -1,93 +1,92 @@
use futures::prelude::*;
use linkerd_identity as id;
use linkerd_io as io; use linkerd_io as io;
use linkerd_meshtls_verifier as verifier;
use linkerd_stack::{NewService, Service}; use linkerd_stack::{NewService, Service};
use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocol, NegotiatedProtocolRef}; use linkerd_tls::{ClientTls, NegotiatedProtocol};
use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context}; use std::{
use tokio::sync::watch; future::Future,
use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig}; pin::Pin,
task::{Context, Poll},
};
/// A `NewService` that produces `Connect` services from a dynamic TLS configuration. #[cfg(feature = "boring")]
#[derive(Clone)] use crate::boring;
pub struct NewClient {
config: watch::Receiver<Arc<ClientConfig>>, #[cfg(feature = "rustls")]
use crate::rustls;
#[cfg(not(feature = "__has_any_tls_impls"))]
use std::marker::PhantomData;
#[derive(Clone, Debug)]
pub enum NewClient {
#[cfg(feature = "boring")]
Boring(boring::NewClient),
#[cfg(feature = "rustls")]
Rustls(rustls::NewClient),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
} }
/// A `Service` that initiates client-side TLS connections.
#[derive(Clone)] #[derive(Clone)]
pub struct Connect { pub enum Connect {
server_id: id::Id, #[cfg(feature = "boring")]
server_name: rustls::pki_types::ServerName<'static>, Boring(boring::Connect),
config: Arc<ClientConfig>,
#[cfg(feature = "rustls")]
Rustls(rustls::Connect),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
} }
pub type ConnectFuture<I> = #[pin_project::pin_project(project = ConnectFutureProj)]
Pin<Box<dyn Future<Output = io::Result<(ClientIo<I>, Option<NegotiatedProtocol>)>> + Send>>; pub enum ConnectFuture<I> {
#[cfg(feature = "boring")]
Boring(#[pin] boring::ConnectFuture<I>),
#[cfg(feature = "rustls")]
Rustls(#[pin] rustls::ConnectFuture<I>),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls(PhantomData<fn(I)>),
}
#[pin_project::pin_project(project = ClientIoProj)]
#[derive(Debug)] #[derive(Debug)]
pub struct ClientIo<I>(tokio_rustls::client::TlsStream<I>); pub enum ClientIo<I> {
#[cfg(feature = "boring")]
Boring(#[pin] boring::ClientIo<I>),
#[cfg(feature = "rustls")]
Rustls(#[pin] rustls::ClientIo<I>),
#[cfg(not(feature = "__has_any_tls_impls"))]
NoTls(PhantomData<fn(I)>),
}
// === impl NewClient === // === impl NewClient ===
impl NewClient {
pub(crate) fn new(config: watch::Receiver<Arc<ClientConfig>>) -> Self {
Self { config }
}
}
impl NewService<ClientTls> for NewClient { impl NewService<ClientTls> for NewClient {
type Service = Connect; type Service = Connect;
#[inline]
fn new_service(&self, target: ClientTls) -> Self::Service { fn new_service(&self, target: ClientTls) -> Self::Service {
Connect::new(target, (*self.config.borrow()).clone()) match self {
} #[cfg(feature = "boring")]
} Self::Boring(new_client) => Connect::Boring(new_client.new_service(target)),
impl std::fmt::Debug for NewClient { #[cfg(feature = "rustls")]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Self::Rustls(new_client) => Connect::Rustls(new_client.new_service(target)),
f.debug_struct("NewClient").finish()
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(target),
}
} }
} }
// === impl Connect === // === impl Connect ===
impl Connect {
pub(crate) fn new(client_tls: ClientTls, config: Arc<ClientConfig>) -> Self {
// If ALPN protocols are configured by the endpoint, we have to clone the entire
// configuration and set the protocols. If there are no ALPN options, clone the Arc'd base
// configuration without extra allocation.
//
// TODO it would be better to avoid cloning the whole TLS config per-connection, but the
// Rustls API doesn't give us a lot of options.
let config = match client_tls.alpn {
None => config,
Some(AlpnProtocols(protocols)) => {
let mut c = (*config).clone();
c.alpn_protocols = protocols;
Arc::new(c)
}
};
let server_name =
rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string())
.expect("identity must be a valid DNS name");
Self {
server_id: client_tls.server_id.into(),
server_name,
config,
}
}
}
fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> {
match c.peer_certificates().and_then(|certs| certs.first()) {
Some(leaf_cert) => io::Result::Ok(leaf_cert),
None => Err(io::Error::other("missing tls end cert")),
}
}
impl<I> Service<I> for Connect impl<I> Service<I> for Connect
where where
I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static, I: io::AsyncRead + io::AsyncWrite + Send + Unpin + 'static,
@ -96,31 +95,67 @@ where
type Error = io::Error; type Error = io::Error;
type Future = ConnectFuture<I>; type Future = ConnectFuture<I>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> io::Poll<()> { #[inline]
io::Poll::Ready(Ok(())) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self {
#[cfg(feature = "boring")]
Self::Boring(connect) => <boring::Connect as Service<I>>::poll_ready(connect, cx),
#[cfg(feature = "rustls")]
Self::Rustls(connect) => <rustls::Connect as Service<I>>::poll_ready(connect, cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
} }
#[inline]
fn call(&mut self, io: I) -> Self::Future { fn call(&mut self, io: I) -> Self::Future {
let server_id = self.server_id.clone(); match self {
Box::pin( #[cfg(feature = "boring")]
// Connect to the server, sending the `server_name` SNI in the Self::Boring(connect) => ConnectFuture::Boring(connect.call(io)),
// client handshake. The provided config should use the
// `AnySanVerifier` to ignore the server certificate's DNS SANs. #[cfg(feature = "rustls")]
// Instead, we extract the server's leaf certificate after the Self::Rustls(connect) => ConnectFuture::Rustls(connect.call(io)),
// handshake and verify that it matches the provided `server_id``.
tokio_rustls::TlsConnector::from(self.config.clone()) #[cfg(not(feature = "__has_any_tls_impls"))]
// XXX(eliza): it's a bummer that the server name has to be cloned here... _ => crate::no_tls!(io),
.connect(self.server_name.clone(), io) }
.map(move |s| { }
let s = s?; }
let (_, conn) = s.get_ref();
let end_cert = extract_cert(conn)?; // === impl ConnectFuture ===
verifier::verify_id(end_cert, &server_id)?;
let io = ClientIo(s); impl<I> Future for ConnectFuture<I>
where
I: io::AsyncRead + io::AsyncWrite + Unpin,
{
type Output = io::Result<(ClientIo<I>, Option<NegotiatedProtocol>)>;
#[inline]
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "boring")]
ConnectFutureProj::Boring(f) => {
let res = futures::ready!(f.poll(cx));
Poll::Ready(res.map(|io| {
let np = io.negotiated_protocol().map(|np| np.to_owned()); let np = io.negotiated_protocol().map(|np| np.to_owned());
Ok((io, np)) (ClientIo::Boring(io), np)
}), }))
) }
#[cfg(feature = "rustls")]
ConnectFutureProj::Rustls(f) => {
let res = futures::ready!(f.poll(cx));
Poll::Ready(res.map(|io| {
let np = io.negotiated_protocol().map(|np| np.to_owned());
(ClientIo::Rustls(io), np)
}))
}
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
} }
} }
@ -129,59 +164,104 @@ where
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> { impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncRead for ClientIo<I> {
#[inline] #[inline]
fn poll_read( fn poll_read(
mut self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
buf: &mut io::ReadBuf<'_>, buf: &mut io::ReadBuf<'_>,
) -> io::Poll<()> { ) -> io::Poll<()> {
Pin::new(&mut self.0).poll_read(cx, buf) match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_read(cx, buf),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_read(cx, buf),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, buf),
}
} }
} }
impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> { impl<I: io::AsyncRead + io::AsyncWrite + Unpin> io::AsyncWrite for ClientIo<I> {
#[inline] #[inline]
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> { fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_flush(cx) match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_flush(cx),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_flush(cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
} }
#[inline] #[inline]
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> { fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> io::Poll<()> {
Pin::new(&mut self.0).poll_shutdown(cx) match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_shutdown(cx),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_shutdown(cx),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx),
}
} }
#[inline] #[inline]
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> io::Poll<usize> {
Pin::new(&mut self.0).poll_write(cx, buf) match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_write(cx, buf),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_write(cx, buf),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, buf),
}
} }
#[inline] #[inline]
fn poll_write_vectored( fn poll_write_vectored(
mut self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>], bufs: &[io::IoSlice<'_>],
) -> io::Poll<usize> { ) -> Poll<Result<usize, std::io::Error>> {
Pin::new(&mut self.0).poll_write_vectored(cx, bufs) match self.project() {
#[cfg(feature = "boring")]
ClientIoProj::Boring(io) => io.poll_write_vectored(cx, bufs),
#[cfg(feature = "rustls")]
ClientIoProj::Rustls(io) => io.poll_write_vectored(cx, bufs),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(cx, bufs),
}
} }
#[inline] #[inline]
fn is_write_vectored(&self) -> bool { fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored() match self {
} #[cfg(feature = "boring")]
} Self::Boring(io) => io.is_write_vectored(),
impl<I> ClientIo<I> { #[cfg(feature = "rustls")]
#[inline] Self::Rustls(io) => io.is_write_vectored(),
pub fn negotiated_protocol(&self) -> Option<NegotiatedProtocolRef<'_>> { #[cfg(not(feature = "__has_any_tls_impls"))]
self.0 _ => crate::no_tls!(),
.get_ref() }
.1
.alpn_protocol()
.map(NegotiatedProtocolRef)
} }
} }
impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> { impl<I: io::PeerAddr> io::PeerAddr for ClientIo<I> {
#[inline] #[inline]
fn peer_addr(&self) -> io::Result<std::net::SocketAddr> { fn peer_addr(&self) -> io::Result<std::net::SocketAddr> {
self.0.get_ref().0.peer_addr() match self {
#[cfg(feature = "boring")]
Self::Boring(io) => io.peer_addr(),
#[cfg(feature = "rustls")]
Self::Rustls(io) => io.peer_addr(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
} }
} }

View File

@ -1,101 +1,122 @@
mod receiver; use std::time::SystemTime;
mod store;
pub(crate) mod verify;
pub use self::{receiver::Receiver, store::Store}; use crate::{NewClient, Server};
use linkerd_dns_name as dns; use linkerd_dns_name as dns;
use linkerd_error::Result; use linkerd_error::Result;
use linkerd_identity as id; use linkerd_identity::{Credentials, DerX509, Id};
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::watch;
use tokio_rustls::rustls::{self};
use tracing::warn;
#[derive(Debug, Error)] #[cfg(feature = "boring")]
#[error("invalid trust roots")] pub use crate::boring;
pub struct InvalidTrustRoots(());
pub fn watch( #[cfg(feature = "rustls")]
local_id: id::Id, pub use crate::rustls;
server_name: dns::Name,
roots_pem: &str,
) -> Result<(Store, Receiver)> {
let mut roots = rustls::RootCertStore::empty();
let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem))
.collect::<Result<Vec<_>, _>>()
{
Err(error) => {
warn!(%error, "invalid trust anchors file");
return Err(error.into());
}
Ok(certs) if certs.is_empty() => {
warn!("no valid certs in trust anchors file");
return Err("no trust roots in PEM file".into());
}
Ok(certs) => certs,
};
let (added, skipped) = roots.add_parsable_certificates(certs); pub enum Store {
if skipped != 0 { #[cfg(feature = "boring")]
warn!("Skipped {} invalid trust anchors", skipped); Boring(boring::creds::Store),
}
if added == 0 {
return Err("no trust roots loaded".into());
}
// XXX: Rustls's built-in verifiers don't let us tweak things as fully as we'd like (e.g. #[cfg(feature = "rustls")]
// controlling the set of trusted signature algorithms), but they provide good enough Rustls(rustls::creds::Store),
// defaults for now. #[cfg(not(feature = "__has_any_tls_impls"))]
// TODO: lock down the verification further. NoTls,
let server_cert_verifier = Arc::new(verify::AnySanVerifier::new(roots.clone()));
let (client_tx, client_rx) = {
// Since we don't have a certificate yet, build a client configuration
// that doesn't attempt client authentication. Once we get a
// certificate, the `Store` will publish a new configuration with a
// client certificate resolver.
let mut c =
store::client_config_builder(server_cert_verifier.clone()).with_no_client_auth();
// Disable session resumption for the time-being until resumption is
// more tested.
c.resumption = rustls::client::Resumption::disabled();
watch::channel(Arc::new(c))
};
let (server_tx, server_rx) = {
// Since we don't have a certificate yet, use an empty cert resolver so
// that handshaking always fails. Once we get a certificate, the `Store`
// will publish a new configuration with a server certificate resolver.
let empty_resolver = Arc::new(rustls::server::ResolvesServerCertUsingSni::new());
watch::channel(store::server_config(roots.clone(), empty_resolver))
};
let rx = Receiver::new(local_id.clone(), server_name.clone(), client_rx, server_rx);
let store = Store::new(
roots,
server_cert_verifier,
local_id,
server_name,
client_tx,
server_tx,
);
Ok((store, rx))
} }
#[cfg(feature = "test-util")] #[derive(Clone, Debug)]
pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) { pub enum Receiver {
watch( #[cfg(feature = "boring")]
ent.name.parse().expect("id must be valid"), Boring(boring::creds::Receiver),
ent.name.parse().expect("name must be valid"),
std::str::from_utf8(ent.trust_anchors).expect("roots must be PEM"), #[cfg(feature = "rustls")]
) Rustls(rustls::creds::Receiver),
.expect("credentials must be valid") #[cfg(not(feature = "__has_any_tls_impls"))]
NoTls,
} }
#[cfg(feature = "test-util")] // === impl Store ===
pub fn default_for_test() -> (Store, Receiver) {
for_test(&linkerd_tls_test_util::FOO_NS1) impl Credentials for Store {
fn set_certificate(
&mut self,
leaf: DerX509,
chain: Vec<DerX509>,
key: Vec<u8>,
exp: SystemTime,
) -> Result<()> {
match self {
#[cfg(feature = "boring")]
Self::Boring(store) => store.set_certificate(leaf, chain, key, exp),
#[cfg(feature = "rustls")]
Self::Rustls(store) => store.set_certificate(leaf, chain, key, exp),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(leaf, chain, key, exp),
}
}
}
// === impl Receiver ===
#[cfg(feature = "boring")]
impl From<boring::creds::Receiver> for Receiver {
fn from(rx: boring::creds::Receiver) -> Self {
Self::Boring(rx)
}
}
#[cfg(feature = "rustls")]
impl From<rustls::creds::Receiver> for Receiver {
fn from(rx: rustls::creds::Receiver) -> Self {
Self::Rustls(rx)
}
}
impl Receiver {
pub fn local_id(&self) -> &Id {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => receiver.local_id(),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => receiver.local_id(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
pub fn server_name(&self) -> &dns::Name {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => receiver.server_name(),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => receiver.server_name(),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
pub fn new_client(&self) -> NewClient {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => NewClient::Boring(receiver.new_client()),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => NewClient::Rustls(receiver.new_client()),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
pub fn server(&self) -> Server {
match self {
#[cfg(feature = "boring")]
Self::Boring(receiver) => Server::Boring(receiver.server()),
#[cfg(feature = "rustls")]
Self::Rustls(receiver) => Server::Rustls(receiver.server()),
#[cfg(not(feature = "__has_any_tls_impls"))]
_ => crate::no_tls!(),
}
}
} }

Some files were not shown because too many files have changed in this diff Show More