Merge branch 'main' into sfleen/opentelemetry-vendor
# Conflicts: # Cargo.lock # opentelemetry-proto/src/gen/opentelemetry.proto.collector.trace.v1.rs
This commit is contained in:
commit
9299684e88
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
continue-on-error: true
|
||||
steps:
|
||||
- run: rustup toolchain install --profile=minimal beta
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: just toolchain=beta fetch
|
||||
- run: just toolchain=beta build
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- id: changed
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
|
|
@ -48,7 +48,7 @@ jobs:
|
|||
env:
|
||||
CXX: "/usr/bin/clang++-19"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run
|
||||
- run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ jobs:
|
|||
container: docker://rust:1.88.0
|
||||
steps:
|
||||
- run: apt update && apt install -y jo
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
id: changed-files
|
||||
|
|
@ -55,7 +55,7 @@ jobs:
|
|||
steps:
|
||||
- run: rustup toolchain add nightly
|
||||
- run: cargo install cargo-fuzz
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- working-directory: ${{matrix.dir}}
|
||||
run: cargo +nightly fetch
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: DavidAnson/markdownlint-cli2-action@992badcdf24e3b8eb7e87ff9287fe931bcb00c6e
|
||||
with:
|
||||
globs: "**/*.md"
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
continue-on-error: true
|
||||
steps:
|
||||
- run: rustup toolchain install --profile=minimal nightly
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: just toolchain=nightly fetch
|
||||
- run: just toolchain=nightly profile=release build
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- id: build
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
|
|
@ -77,7 +77,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just action-lint
|
||||
- run: just action-dev-check
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ jobs:
|
|||
timeout-minutes: 20
|
||||
steps:
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: just fetch
|
||||
- run: cargo deny --all-features check bans licenses sources
|
||||
|
|
@ -114,7 +114,7 @@ jobs:
|
|||
crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }}
|
||||
steps:
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
- run: just fetch
|
||||
- run: just check-crate ${{ matrix.crate }}
|
||||
|
|
@ -136,7 +136,7 @@ jobs:
|
|||
tag=$(linkerd version --client --short)
|
||||
echo "linkerd $tag"
|
||||
echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV"
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just docker
|
||||
- run: just k3d-create
|
||||
- run: just k3d-load-linkerd
|
||||
|
|
@ -168,7 +168,7 @@ jobs:
|
|||
if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
|
||||
run: exit 1
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
|
||||
- name: "Merge dependabot changes"
|
||||
if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true'
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- name: Check if the most recent commit is after the last release
|
||||
id: recency
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -61,7 +61,23 @@ jobs:
|
|||
timeout-minutes: 5
|
||||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- id: meta
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
if: github.event_name == 'pull_request'
|
||||
- id: workflow
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
files: |
|
||||
.github/workflows/release.yml
|
||||
- id: build
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
files: |
|
||||
justfile
|
||||
Cargo.toml
|
||||
|
||||
- id: version
|
||||
env:
|
||||
VERSION: ${{ inputs.version }}
|
||||
shell: bash
|
||||
|
|
@ -69,40 +85,38 @@ jobs:
|
|||
set -euo pipefail
|
||||
shopt -s extglob
|
||||
if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then
|
||||
echo version="0.0.0-test.${GITHUB_SHA:0:7}"
|
||||
echo archs='["amd64"]'
|
||||
echo oses='["linux"]'
|
||||
echo version="0.0.0-test.${GITHUB_SHA:0:7}" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then
|
||||
echo "Invalid version: $VERSION" >&2
|
||||
exit 1
|
||||
fi
|
||||
( echo version="${VERSION#v}"
|
||||
echo archs='["amd64", "arm64", "arm"]'
|
||||
echo version="${VERSION#v}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- id: platform
|
||||
shell: bash
|
||||
env:
|
||||
WORKFLOW_CHANGED: ${{ steps.workflow.outputs.any_changed }}
|
||||
run: |
|
||||
if [[ "$GITHUB_EVENT_NAME" == pull_request && "$WORKFLOW_CHANGED" != 'true' ]]; then
|
||||
( echo archs='["amd64"]'
|
||||
echo oses='["linux"]' ) >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
( echo archs='["amd64", "arm64"]'
|
||||
echo oses='["linux", "windows"]'
|
||||
) >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
if: github.event_name == 'pull_request'
|
||||
- id: changed
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c
|
||||
with:
|
||||
files: |
|
||||
.github/workflows/release.yml
|
||||
justfile
|
||||
Cargo.toml
|
||||
|
||||
outputs:
|
||||
archs: ${{ steps.meta.outputs.archs }}
|
||||
oses: ${{ steps.meta.outputs.oses }}
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
package: ${{ github.event_name == 'workflow_dispatch' || steps.changed.outputs.any_changed == 'true' }}
|
||||
archs: ${{ steps.platform.outputs.archs }}
|
||||
oses: ${{ steps.platform.outputs.oses }}
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
package: ${{ github.event_name == 'workflow_dispatch' || steps.build.outputs.any_changed == 'true' || steps.workflow.outputs.any_changed == 'true' }}
|
||||
profile: ${{ inputs.profile || 'release' }}
|
||||
publish: ${{ inputs.publish }}
|
||||
ref: ${{ inputs.ref || github.sha }}
|
||||
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.meta.outputs.version }}"
|
||||
tag: "${{ inputs.tag-prefix || 'release/' }}v${{ steps.version.outputs.version }}"
|
||||
prerelease: ${{ inputs.prerelease }}
|
||||
draft: ${{ inputs.draft }}
|
||||
latest: ${{ inputs.latest }}
|
||||
|
|
@ -135,8 +149,6 @@ jobs:
|
|||
exclude:
|
||||
- os: windows
|
||||
arch: arm64
|
||||
- os: windows
|
||||
arch: arm
|
||||
|
||||
# If we're not actually building on a release tag, don't short-circuit on
|
||||
# errors. This helps us know whether a failure is platform-specific.
|
||||
|
|
@ -151,15 +163,19 @@ jobs:
|
|||
# TODO: add to dev image
|
||||
- name: Install MiniGW
|
||||
if: matrix.os == 'windows'
|
||||
run: apt-get update && apt-get install mingw-w64 -y
|
||||
run: apt-get update && apt-get install -y mingw-w64
|
||||
- name: Install cross compilation toolchain
|
||||
if: matrix.arch == 'arm64'
|
||||
run: apt-get update && apt-get install -y binutils-aarch64-linux-gnu
|
||||
|
||||
- name: Configure git
|
||||
run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
ref: ${{ needs.meta.outputs.ref }}
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0
|
||||
with:
|
||||
key: ${{ matrix.arch }}
|
||||
key: ${{ matrix.os }}-${{ matrix.arch }}
|
||||
- run: just fetch
|
||||
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup
|
||||
- run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build
|
||||
|
|
@ -188,7 +204,7 @@ jobs:
|
|||
git config --global user.name "$GITHUB_USERNAME"
|
||||
git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com
|
||||
# Tag the release.
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }}
|
||||
ref: ${{ needs.meta.outputs.ref }}
|
||||
|
|
|
|||
|
|
@ -16,5 +16,5 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: just sh-lint
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
container: ghcr.io/linkerd/dev:v47-rust
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- run: git config --global --add safe.directory "$PWD" # actions/runner#2033
|
||||
- run: |
|
||||
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
|
||||
|
|
@ -38,7 +38,7 @@ jobs:
|
|||
runs-on: ${{ vars.LINKERD2_PROXY_RUNNER || 'ubuntu-24.04' }}
|
||||
steps:
|
||||
- uses: linkerd/dev/actions/setup-tools@v47
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- shell: bash
|
||||
run: |
|
||||
VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"'
|
||||
|
|
|
|||
431
Cargo.lock
431
Cargo.lock
File diff suppressed because it is too large
Load Diff
|
|
@ -116,8 +116,8 @@ prost-types = { version = "0.13" }
|
|||
tokio-rustls = { version = "0.26", default-features = false, features = [
|
||||
"logging",
|
||||
] }
|
||||
tonic = { version = "0.12", default-features = false }
|
||||
tonic-build = { version = "0.12", default-features = false }
|
||||
tonic = { version = "0.13", default-features = false }
|
||||
tonic-build = { version = "0.13", default-features = false }
|
||||
tower = { version = "0.5", default-features = false }
|
||||
tower-service = { version = "0.3" }
|
||||
tower-test = { version = "0.4" }
|
||||
|
|
@ -134,4 +134,4 @@ default-features = false
|
|||
features = ["tokio", "tracing"]
|
||||
|
||||
[workspace.dependencies.linkerd2-proxy-api]
|
||||
version = "0.16.0"
|
||||
version = "0.17.0"
|
||||
|
|
|
|||
|
|
@ -14,11 +14,16 @@ FROM $LINKERD2_IMAGE as linkerd2
|
|||
FROM --platform=$BUILDPLATFORM $RUST_IMAGE as fetch
|
||||
|
||||
ARG PROXY_FEATURES=""
|
||||
ARG TARGETARCH="amd64"
|
||||
RUN apt-get update && \
|
||||
apt-get install -y time && \
|
||||
if [[ "$PROXY_FEATURES" =~ .*meshtls-boring.* ]] ; then \
|
||||
apt-get install -y golang ; \
|
||||
fi && \
|
||||
case "$TARGETARCH" in \
|
||||
amd64) true ;; \
|
||||
arm64) apt-get install --no-install-recommends -y binutils-aarch64-linux-gnu ;; \
|
||||
esac && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CARGO_NET_RETRY=10
|
||||
|
|
@ -33,7 +38,6 @@ RUN --mount=type=cache,id=cargo,target=/usr/local/cargo/registry \
|
|||
FROM fetch as build
|
||||
ENV CARGO_INCREMENTAL=0
|
||||
ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable"
|
||||
ARG TARGETARCH="amd64"
|
||||
ARG PROFILE="release"
|
||||
ARG LINKERD2_PROXY_VERSION=""
|
||||
ARG LINKERD2_PROXY_VENDOR=""
|
||||
|
|
|
|||
18
deny.toml
18
deny.toml
|
|
@ -2,7 +2,6 @@
|
|||
targets = [
|
||||
{ triple = "x86_64-unknown-linux-gnu" },
|
||||
{ triple = "aarch64-unknown-linux-gnu" },
|
||||
{ triple = "armv7-unknown-linux-gnu" },
|
||||
]
|
||||
|
||||
[advisories]
|
||||
|
|
@ -24,11 +23,6 @@ allow = [
|
|||
private = { ignore = true }
|
||||
confidence-threshold = 0.8
|
||||
exceptions = [
|
||||
{ allow = [
|
||||
"ISC",
|
||||
"MIT",
|
||||
"OpenSSL",
|
||||
], name = "ring", version = "*" },
|
||||
{ allow = [
|
||||
"ISC",
|
||||
"OpenSSL",
|
||||
|
|
@ -39,14 +33,6 @@ exceptions = [
|
|||
], name = "aws-lc-fips-sys", version = "*" },
|
||||
]
|
||||
|
||||
[[licenses.clarify]]
|
||||
name = "ring"
|
||||
version = "*"
|
||||
expression = "MIT AND ISC AND OpenSSL"
|
||||
license-files = [
|
||||
{ path = "LICENSE", hash = 0xbd0eed23 },
|
||||
]
|
||||
|
||||
[bans]
|
||||
multiple-versions = "deny"
|
||||
# Wildcard dependencies are used for all workspace-local crates.
|
||||
|
|
@ -56,6 +42,8 @@ deny = [
|
|||
{ name = "rustls", wrappers = ["tokio-rustls"] },
|
||||
# rustls-webpki should be used instead.
|
||||
{ name = "webpki" },
|
||||
# aws-lc-rs should be used instead.
|
||||
{ name = "ring" }
|
||||
]
|
||||
skip = [
|
||||
# `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64`
|
||||
|
|
@ -76,6 +64,8 @@ skip-tree = [
|
|||
{ name = "pprof" },
|
||||
# aws-lc-rs uses a slightly outdated version of bindgen
|
||||
{ name = "bindgen", version = "0.69.5" },
|
||||
# socket v0.6 is still propagating through the ecosystem
|
||||
{ name = "socket2", version = "0.5" },
|
||||
]
|
||||
|
||||
[sources]
|
||||
|
|
|
|||
10
justfile
10
justfile
|
|
@ -18,6 +18,10 @@ features := ""
|
|||
export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`)
|
||||
export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`)
|
||||
|
||||
# TODO: these variables will be included in dev v48
|
||||
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_gnu", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
|
||||
export AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl := env_var_or_default("AWS_LC_SYS_CFLAGS_aarch64_unknown_linux_musl", "-fuse-ld=/usr/aarch64-linux-gnu/bin/ld")
|
||||
|
||||
# The version name to use for packages.
|
||||
package_version := "v" + LINKERD2_PROXY_VERSION
|
||||
|
||||
|
|
@ -26,7 +30,7 @@ docker-repo := "localhost/linkerd/proxy"
|
|||
docker-tag := `git rev-parse --abbrev-ref HEAD | sed 's|/|.|g'` + "." + `git rev-parse --short HEAD`
|
||||
docker-image := docker-repo + ":" + docker-tag
|
||||
|
||||
# The architecture name to use for packages. Either 'amd64', 'arm64', or 'arm'.
|
||||
# The architecture name to use for packages. Either 'amd64' or 'arm64'.
|
||||
arch := "amd64"
|
||||
# The OS name to use for packages. Either 'linux' or 'windows'.
|
||||
os := "linux"
|
||||
|
|
@ -39,8 +43,6 @@ _target := if os + '-' + arch == "linux-amd64" {
|
|||
"x86_64-unknown-linux-" + libc
|
||||
} else if os + '-' + arch == "linux-arm64" {
|
||||
"aarch64-unknown-linux-" + libc
|
||||
} else if os + '-' + arch == "linux-arm" {
|
||||
"armv7-unknown-linux-" + libc + "eabihf"
|
||||
} else if os + '-' + arch == "windows-amd64" {
|
||||
"x86_64-pc-windows-" + libc
|
||||
} else {
|
||||
|
|
@ -139,7 +141,7 @@ _strip:
|
|||
|
||||
_package_bin := _package_dir / "bin" / "linkerd2-proxy"
|
||||
|
||||
# XXX {aarch64,arm}-musl builds do not enable PIE, so we use target-specific
|
||||
# XXX aarch64-musl builds do not enable PIE, so we use target-specific
|
||||
# files to document those differences.
|
||||
_expected_checksec := '.checksec' / arch + '-' + libc + '.json'
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
use futures::future::{self, TryFutureExt};
|
||||
use http::StatusCode;
|
||||
use linkerd_app_core::{
|
||||
metrics::{self as metrics, FmtMetrics},
|
||||
metrics::{self as metrics, legacy::FmtMetrics},
|
||||
proxy::http::{Body, BoxBody, ClientHandle, Request, Response},
|
||||
trace, Error, Result,
|
||||
};
|
||||
|
|
@ -32,7 +32,7 @@ pub use self::readiness::{Latch, Readiness};
|
|||
|
||||
#[derive(Clone)]
|
||||
pub struct Admin<M> {
|
||||
metrics: metrics::Serve<M>,
|
||||
metrics: metrics::legacy::Serve<M>,
|
||||
tracing: trace::Handle,
|
||||
ready: Readiness,
|
||||
shutdown_tx: mpsc::UnboundedSender<()>,
|
||||
|
|
@ -52,7 +52,7 @@ impl<M> Admin<M> {
|
|||
tracing: trace::Handle,
|
||||
) -> Self {
|
||||
Self {
|
||||
metrics: metrics::Serve::new(metrics),
|
||||
metrics: metrics::legacy::Serve::new(metrics),
|
||||
ready,
|
||||
shutdown_tx,
|
||||
enable_shutdown,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ use linkerd_app_core::{
|
|||
classify,
|
||||
config::ServerConfig,
|
||||
drain, errors, identity,
|
||||
metrics::{self, FmtMetrics},
|
||||
metrics::{self, legacy::FmtMetrics},
|
||||
proxy::http,
|
||||
serve,
|
||||
svc::{self, ExtractParam, InsertParam, Param},
|
||||
|
|
|
|||
|
|
@ -13,31 +13,24 @@ independently of the inbound and outbound proxy logic.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
drain = { workspace = true, features = ["retain"] }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
hyper-util = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
ipnet = "2.11"
|
||||
prometheus-client = { workspace = true }
|
||||
regex = "1"
|
||||
serde_json = "1"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "sync", "parking_lot"] }
|
||||
tokio-rustls = { workspace = true }
|
||||
tokio-stream = { version = "0.1", features = ["time"] }
|
||||
tonic = { workspace = true, default-features = false, features = ["prost"] }
|
||||
tracing = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
|
||||
linkerd-addr = { path = "../../addr" }
|
||||
linkerd-conditional = { path = "../../conditional" }
|
||||
linkerd-dns = { path = "../../dns" }
|
||||
linkerd-duplex = { path = "../../duplex" }
|
||||
linkerd-errno = { path = "../../errno" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-error-respond = { path = "../../error-respond" }
|
||||
linkerd-exp-backoff = { path = "../../exp-backoff" }
|
||||
|
|
@ -83,5 +76,6 @@ features = ["make", "spawn-ready", "timeout", "util", "limit"]
|
|||
semver = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
quickcheck = { version = "1", default-features = false }
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ impl Config {
|
|||
identity: identity::NewClient,
|
||||
) -> svc::ArcNewService<
|
||||
(),
|
||||
svc::BoxCloneSyncService<http::Request<tonic::body::BoxBody>, http::Response<RspBody>>,
|
||||
svc::BoxCloneSyncService<http::Request<tonic::body::Body>, http::Response<RspBody>>,
|
||||
> {
|
||||
let addr = self.addr;
|
||||
tracing::trace!(%addr, "Building");
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ pub mod metrics;
|
|||
pub mod proxy;
|
||||
pub mod serve;
|
||||
pub mod svc;
|
||||
pub mod tls_info;
|
||||
pub mod transport;
|
||||
|
||||
pub use self::build_info::{BuildInfo, BUILD_INFO};
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ where
|
|||
// === impl Metrics ===
|
||||
|
||||
impl Metrics {
|
||||
pub fn new(retain_idle: Duration) -> (Self, impl FmtMetrics + Clone + Send + 'static) {
|
||||
pub fn new(retain_idle: Duration) -> (Self, impl legacy::FmtMetrics + Clone + Send + 'static) {
|
||||
let (control, control_report) = {
|
||||
let m = http_metrics::Requests::<ControlLabels, Class>::default();
|
||||
let r = m.clone().into_report(retain_idle).with_prefix("control");
|
||||
|
|
@ -223,6 +223,7 @@ impl Metrics {
|
|||
opentelemetry,
|
||||
};
|
||||
|
||||
use legacy::FmtMetrics as _;
|
||||
let report = endpoint_report
|
||||
.and_report(profile_route_report)
|
||||
.and_report(retry_report)
|
||||
|
|
@ -248,7 +249,7 @@ impl svc::Param<ControlLabels> for control::ControlAddr {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ControlLabels {
|
||||
impl legacy::FmtLabels for ControlLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { addr, server_id } = self;
|
||||
|
||||
|
|
@ -281,7 +282,7 @@ impl ProfileRouteLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ProfileRouteLabels {
|
||||
impl legacy::FmtLabels for ProfileRouteLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
direction,
|
||||
|
|
@ -314,7 +315,7 @@ impl From<OutboundEndpointLabels> for EndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for EndpointLabels {
|
||||
impl legacy::FmtLabels for EndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Inbound(i) => (Direction::In, i).fmt_labels(f),
|
||||
|
|
@ -323,7 +324,7 @@ impl FmtLabels for EndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for InboundEndpointLabels {
|
||||
impl legacy::FmtLabels for InboundEndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
tls,
|
||||
|
|
@ -343,7 +344,7 @@ impl FmtLabels for InboundEndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ServerLabel {
|
||||
impl legacy::FmtLabels for ServerLabel {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self(meta, port) = self;
|
||||
write!(
|
||||
|
|
@ -374,7 +375,7 @@ impl prom::EncodeLabelSetMut for ServerLabel {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for ServerAuthzLabels {
|
||||
impl legacy::FmtLabels for ServerAuthzLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { server, authz } = self;
|
||||
|
||||
|
|
@ -389,7 +390,7 @@ impl FmtLabels for ServerAuthzLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for RouteLabels {
|
||||
impl legacy::FmtLabels for RouteLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { server, route } = self;
|
||||
|
||||
|
|
@ -404,7 +405,7 @@ impl FmtLabels for RouteLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for RouteAuthzLabels {
|
||||
impl legacy::FmtLabels for RouteAuthzLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self { route, authz } = self;
|
||||
|
||||
|
|
@ -425,7 +426,7 @@ impl svc::Param<OutboundZoneLocality> for OutboundEndpointLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for OutboundEndpointLabels {
|
||||
impl legacy::FmtLabels for OutboundEndpointLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
server_id,
|
||||
|
|
@ -462,20 +463,20 @@ impl fmt::Display for Direction {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Direction {
|
||||
impl legacy::FmtLabels for Direction {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "direction=\"{self}\"")
|
||||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Authority<'_> {
|
||||
impl legacy::FmtLabels for Authority<'_> {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self(authority) = self;
|
||||
write!(f, "authority=\"{authority}\"")
|
||||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for Class {
|
||||
impl legacy::FmtLabels for Class {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let class = |ok: bool| if ok { "success" } else { "failure" };
|
||||
|
||||
|
|
@ -523,7 +524,7 @@ impl StackLabels {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtLabels for StackLabels {
|
||||
impl legacy::FmtLabels for StackLabels {
|
||||
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let Self {
|
||||
direction,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
use linkerd_metrics::prom;
|
||||
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue, LabelValueEncoder};
|
||||
use std::{
|
||||
fmt::{Error, Write},
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
static TLS_INFO: OnceLock<Arc<TlsInfo>> = OnceLock::new();
|
||||
|
||||
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, EncodeLabelSet)]
|
||||
pub struct TlsInfo {
|
||||
tls_suites: MetricValueList,
|
||||
tls_kx_groups: MetricValueList,
|
||||
tls_rand: String,
|
||||
tls_key_provider: String,
|
||||
tls_fips: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
|
||||
struct MetricValueList {
|
||||
values: Vec<&'static str>,
|
||||
}
|
||||
|
||||
impl FromIterator<&'static str> for MetricValueList {
|
||||
fn from_iter<T: IntoIterator<Item = &'static str>>(iter: T) -> Self {
|
||||
MetricValueList {
|
||||
values: iter.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodeLabelValue for MetricValueList {
|
||||
fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), Error> {
|
||||
for value in &self.values {
|
||||
value.encode(encoder)?;
|
||||
encoder.write_char(',')?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn metric() -> prom::Family<TlsInfo, prom::ConstGauge> {
|
||||
let fam = prom::Family::<TlsInfo, prom::ConstGauge>::new_with_constructor(|| {
|
||||
prom::ConstGauge::new(1)
|
||||
});
|
||||
|
||||
let Some(provider) = tokio_rustls::rustls::crypto::CryptoProvider::get_default() else {
|
||||
// If the crypto provider hasn't been initialized, we return the metrics family with an
|
||||
// empty set of metrics.
|
||||
error!("Initializing TLS info metric before crypto provider initialized, this is a bug!");
|
||||
return fam;
|
||||
};
|
||||
|
||||
let tls_info = TLS_INFO.get_or_init(|| {
|
||||
let tls_suites = provider
|
||||
.cipher_suites
|
||||
.iter()
|
||||
.flat_map(|cipher_suite| cipher_suite.suite().as_str())
|
||||
.collect::<MetricValueList>();
|
||||
let tls_kx_groups = provider
|
||||
.kx_groups
|
||||
.iter()
|
||||
.flat_map(|suite| suite.name().as_str())
|
||||
.collect::<MetricValueList>();
|
||||
Arc::new(TlsInfo {
|
||||
tls_suites,
|
||||
tls_kx_groups,
|
||||
tls_rand: format!("{:?}", provider.secure_random),
|
||||
tls_key_provider: format!("{:?}", provider.key_provider),
|
||||
tls_fips: provider.fips(),
|
||||
})
|
||||
});
|
||||
let _ = fam.get_or_create(tls_info);
|
||||
fam
|
||||
}
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
use crate::metrics::ServerLabel as PolicyServerLabel;
|
||||
pub use crate::metrics::{Direction, OutboundEndpointLabels};
|
||||
use linkerd_conditional::Conditional;
|
||||
use linkerd_metrics::FmtLabels;
|
||||
use linkerd_metrics::legacy::FmtLabels;
|
||||
use linkerd_tls as tls;
|
||||
use std::{fmt, net::SocketAddr};
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ impl InboundMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtMetrics for InboundMetrics {
|
||||
impl legacy::FmtMetrics for InboundMetrics {
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.http_authz.fmt_metrics(f)?;
|
||||
self.http_errors.fmt_metrics(f)?;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit};
|
||||
use linkerd_app_core::{
|
||||
metrics::{
|
||||
metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels,
|
||||
ServerLabel, TargetAddr, TlsAccept,
|
||||
legacy::{Counter, FmtLabels, FmtMetrics},
|
||||
metrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, ServerLabel, TargetAddr,
|
||||
TlsAccept,
|
||||
},
|
||||
tls,
|
||||
transport::OrigDstAddr,
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ use crate::{
|
|||
};
|
||||
use linkerd_app_core::{
|
||||
errors::{FailFastError, LoadShedError},
|
||||
metrics::FmtLabels,
|
||||
metrics::legacy::FmtLabels,
|
||||
tls,
|
||||
};
|
||||
use std::fmt;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics, ServerLabel},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics, ServerLabel,
|
||||
},
|
||||
svc::{self, stack::NewMonitor},
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc::{self, stack::NewMonitor},
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ServerPolicy> = once_cell::sync
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
{
|
||||
pub(super) fn new(
|
||||
|
|
@ -57,7 +57,7 @@ where
|
|||
|
||||
impl<S> Service<u16> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ impl Config {
|
|||
limits: ReceiveLimits,
|
||||
) -> impl GetPolicy + Clone + Send + Sync + 'static
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ impl<S> Store<S> {
|
|||
opaque_ports: RangeInclusiveSet<u16>,
|
||||
) -> Self
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
|
|
@ -138,7 +138,7 @@ impl<S> Store<S> {
|
|||
|
||||
impl<S> GetPolicy for Store<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
|
|
|
|||
|
|
@ -263,7 +263,7 @@ fn orig_dst_addr() -> OrigDstAddr {
|
|||
OrigDstAddr(([192, 0, 2, 2], 1000).into())
|
||||
}
|
||||
|
||||
impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
|
||||
impl tonic::client::GrpcService<tonic::body::Body> for MockSvc {
|
||||
type ResponseBody = linkerd_app_core::control::RspBody;
|
||||
type Error = Error;
|
||||
type Future = futures::future::Pending<Result<http::Response<Self::ResponseBody>, Self::Error>>;
|
||||
|
|
@ -275,7 +275,7 @@ impl tonic::client::GrpcService<tonic::body::BoxBody> for MockSvc {
|
|||
unreachable!()
|
||||
}
|
||||
|
||||
fn call(&mut self, _req: http::Request<tonic::body::BoxBody>) -> Self::Future {
|
||||
fn call(&mut self, _req: http::Request<tonic::body::Body>) -> Self::Future {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ impl Inbound<()> {
|
|||
limits: ReceiveLimits,
|
||||
) -> impl policy::GetPolicy + Clone + Send + Sync + 'static
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
|||
|
|
@ -35,11 +35,11 @@ maplit = "1"
|
|||
parking_lot = "0.12"
|
||||
regex = "1"
|
||||
rustls-pemfile = "2.2"
|
||||
socket2 = "0.5"
|
||||
socket2 = "0.6"
|
||||
tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] }
|
||||
tokio-rustls = { workspace = true }
|
||||
tokio-stream = { version = "0.1", features = ["sync"] }
|
||||
tonic = { workspace = true, features = ["transport"], default-features = false }
|
||||
tonic = { workspace = true, features = ["transport", "router"], default-features = false }
|
||||
tower = { workspace = true, default-features = false }
|
||||
tracing = { workspace = true }
|
||||
|
||||
|
|
|
|||
|
|
@ -302,7 +302,7 @@ impl Controller {
|
|||
}
|
||||
|
||||
pub async fn run(self) -> controller::Listening {
|
||||
let svc = grpc::transport::Server::builder()
|
||||
let routes = grpc::service::Routes::default()
|
||||
.add_service(
|
||||
inbound_server_policies_server::InboundServerPoliciesServer::new(Server(Arc::new(
|
||||
self.inbound,
|
||||
|
|
@ -310,9 +310,9 @@ impl Controller {
|
|||
)
|
||||
.add_service(outbound_policies_server::OutboundPoliciesServer::new(
|
||||
Server(Arc::new(self.outbound)),
|
||||
))
|
||||
.into_service();
|
||||
controller::run(RoutesSvc(svc), "support policy controller", None).await
|
||||
));
|
||||
|
||||
controller::run(RoutesSvc(routes), "support policy controller", None).await
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -525,7 +525,9 @@ impl Service<Request<hyper::body::Incoming>> for RoutesSvc {
|
|||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let Self(routes) = self;
|
||||
routes.poll_ready(cx)
|
||||
<grpc::service::Routes as Service<Request<UnsyncBoxBody<Bytes, grpc::Status>>>>::poll_ready(
|
||||
routes, cx,
|
||||
)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request<hyper::body::Incoming>) -> Self::Future {
|
||||
|
|
|
|||
|
|
@ -124,26 +124,6 @@ async fn inbound_timeout() {
|
|||
.await;
|
||||
}
|
||||
|
||||
/// Tests that the detect metric is labeled and incremented on I/O error.
|
||||
#[tokio::test]
|
||||
async fn inbound_io_err() {
|
||||
let _trace = trace_init();
|
||||
|
||||
let (proxy, metrics) = Test::default().run().await;
|
||||
let client = crate::tcp::client(proxy.inbound);
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tcp_client.write(TcpFixture::HELLO_MSG).await;
|
||||
drop(tcp_client);
|
||||
|
||||
metric(&proxy)
|
||||
.label("error", "i/o")
|
||||
.value(1u64)
|
||||
.assert_in(&metrics)
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Tests that the detect metric is not incremented when TLS is successfully
|
||||
/// detected.
|
||||
#[tokio::test]
|
||||
|
|
@ -189,44 +169,6 @@ async fn inbound_success() {
|
|||
metric.assert_in(&metrics).await;
|
||||
}
|
||||
|
||||
/// Tests both of the above cases together.
|
||||
#[tokio::test]
|
||||
async fn inbound_multi() {
|
||||
let _trace = trace_init();
|
||||
|
||||
let (proxy, metrics) = Test::default().run().await;
|
||||
let client = crate::tcp::client(proxy.inbound);
|
||||
|
||||
let metric = metric(&proxy);
|
||||
let timeout_metric = metric.clone().label("error", "tls detection timeout");
|
||||
let io_metric = metric.label("error", "i/o");
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
|
||||
.await;
|
||||
|
||||
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
drop(tcp_client);
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tcp_client.write(TcpFixture::HELLO_MSG).await;
|
||||
drop(tcp_client);
|
||||
|
||||
io_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
timeout_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
|
||||
let tcp_client = client.connect().await;
|
||||
|
||||
tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case
|
||||
.await;
|
||||
|
||||
io_metric.clone().value(1u64).assert_in(&metrics).await;
|
||||
timeout_metric.clone().value(2u64).assert_in(&metrics).await;
|
||||
drop(tcp_client);
|
||||
}
|
||||
|
||||
/// Tests that TLS detect failure metrics are collected for the direct stack.
|
||||
#[tokio::test]
|
||||
async fn inbound_direct_multi() {
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ impl Outbound<()> {
|
|||
export_hostname_labels: bool,
|
||||
) -> impl policy::GetPolicy
|
||||
where
|
||||
C: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
C: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
C: Clone + Unpin + Send + Sync + 'static,
|
||||
C::ResponseBody: proxy::http::Body<Data = tonic::codegen::Bytes, Error = Error>,
|
||||
C::ResponseBody: Send + 'static,
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ impl OutboundMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
impl FmtMetrics for OutboundMetrics {
|
||||
impl legacy::FmtMetrics for OutboundMetrics {
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.http_errors.fmt_metrics(f)?;
|
||||
self.tcp_errors.fmt_metrics(f)?;
|
||||
|
|
@ -243,7 +243,7 @@ impl EncodeLabelSet for RouteRef {
|
|||
|
||||
// === impl ConcreteLabels ===
|
||||
|
||||
impl FmtLabels for ConcreteLabels {
|
||||
impl legacy::FmtLabels for ConcreteLabels {
|
||||
fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let ConcreteLabels(parent, backend) = self;
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ pub(crate) use self::{http::Http, tcp::Tcp};
|
|||
use crate::http::IdentityRequired;
|
||||
use linkerd_app_core::{
|
||||
errors::{FailFastError, LoadShedError},
|
||||
metrics::FmtLabels,
|
||||
metrics::legacy::FmtLabels,
|
||||
proxy::http::ResponseTimeoutError,
|
||||
};
|
||||
use std::fmt;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc, Error,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
use super::ErrorKind;
|
||||
use linkerd_app_core::{
|
||||
metrics::{metrics, Counter, FmtMetrics},
|
||||
metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
},
|
||||
svc,
|
||||
transport::{labels::TargetAddr, OrigDstAddr},
|
||||
Error,
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell<ClientPolicy> = once_cell::sync
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error> + Clone,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
|
|
@ -59,7 +59,7 @@ where
|
|||
|
||||
impl<S> Service<Addr> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody, Error = Error>,
|
||||
S: tonic::client::GrpcService<tonic::body::Body, Error = Error>,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: http::Body<Data = tonic::codegen::Bytes, Error = Error> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ impl Config {
|
|||
) -> Result<
|
||||
Dst<
|
||||
impl svc::Service<
|
||||
http::Request<tonic::body::BoxBody>,
|
||||
http::Request<tonic::body::Body>,
|
||||
Response = http::Response<control::RspBody>,
|
||||
Error = Error,
|
||||
Future = impl Send,
|
||||
|
|
|
|||
|
|
@ -19,9 +19,10 @@ use linkerd_app_core::{
|
|||
config::ServerConfig,
|
||||
control::{ControlAddr, Metrics as ControlMetrics},
|
||||
dns, drain,
|
||||
metrics::{prom, FmtMetrics},
|
||||
metrics::{legacy::FmtMetrics, prom},
|
||||
serve,
|
||||
svc::Param,
|
||||
tls_info,
|
||||
transport::{addrs::*, listen::Bind},
|
||||
Error, ProxyRuntime,
|
||||
};
|
||||
|
|
@ -304,6 +305,7 @@ impl Config {
|
|||
error!(%error, "Failed to register process metrics");
|
||||
}
|
||||
registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric());
|
||||
registry.register("rustls_info", "Proxy TLS info", tls_info::metric());
|
||||
|
||||
let admin = {
|
||||
let identity = identity.receiver().server();
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ impl Config {
|
|||
) -> Result<
|
||||
Policy<
|
||||
impl svc::Service<
|
||||
http::Request<tonic::body::BoxBody>,
|
||||
http::Request<tonic::body::Body>,
|
||||
Response = http::Response<control::RspBody>,
|
||||
Error = Error,
|
||||
Future = impl Send,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ use linkerd_opencensus::{self as opencensus, metrics, proto};
|
|||
use std::{collections::HashMap, time::SystemTime};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(super) fn create_collector<S>(
|
||||
|
|
@ -18,7 +18,7 @@ pub(super) fn create_collector<S>(
|
|||
legacy_metrics: metrics::Registry,
|
||||
) -> EnabledCollector
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ use std::{
|
|||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::Instrument;
|
||||
|
||||
pub(super) struct OtelCollectorAttributes {
|
||||
|
|
@ -31,7 +31,7 @@ pub(super) fn create_collector<S>(
|
|||
legacy_metrics: metrics::Registry,
|
||||
) -> EnabledCollector
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ ahash = "0.8"
|
|||
linkerd-stack = { path = "../stack" }
|
||||
parking_lot = "0.12"
|
||||
rand = { version = "0.9", features = ["small_rng"] }
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
|
|||
|
|
@ -7,14 +7,15 @@ edition = { workspace = true }
|
|||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hickory-resolver = "0.25.2"
|
||||
linkerd-dns-name = { path = "./name" }
|
||||
linkerd-error = { path = "../error" }
|
||||
prometheus-client = { workspace = true }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
linkerd-error = { path = "../error" }
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] }
|
||||
|
|
|
|||
|
|
@ -15,10 +15,10 @@ tokio = { version = "1", default-features = false }
|
|||
tracing = { workspace = true }
|
||||
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
tower-test = { workspace = true }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
|
|
|
|||
|
|
@ -10,11 +10,9 @@ publish = { workspace = true }
|
|||
test-util = []
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
#![forbid(unsafe_code)]
|
||||
|
||||
pub use self::{requests::Requests, retries::Retries};
|
||||
use linkerd_metrics::SharedStore;
|
||||
use linkerd_metrics::legacy::SharedStore;
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash, time::Duration};
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@ mod service;
|
|||
pub use self::service::{NewHttpMetrics, ResponseBody};
|
||||
use super::Report;
|
||||
use linkerd_http_classify::ClassifyResponse;
|
||||
use linkerd_metrics::{latency, Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics};
|
||||
use linkerd_metrics::{
|
||||
latency,
|
||||
legacy::{Counter, FmtMetrics, Histogram, LastUpdate, NewMetrics},
|
||||
};
|
||||
use linkerd_stack::{self as svc, layer};
|
||||
use std::{collections::HashMap, fmt::Debug, hash::Hash};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
|
@ -146,7 +149,7 @@ impl ClassMetrics {
|
|||
mod tests {
|
||||
#[test]
|
||||
fn expiry() {
|
||||
use linkerd_metrics::FmtLabels;
|
||||
use linkerd_metrics::legacy::FmtLabels;
|
||||
use std::fmt;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
use super::{ClassMetrics, Metrics, StatusMetrics};
|
||||
use crate::{Prefixed, Report};
|
||||
use linkerd_metrics::{
|
||||
latency, Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store,
|
||||
latency,
|
||||
legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric, Store},
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash};
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ use futures::{ready, TryFuture};
|
|||
use http_body::{Body, Frame};
|
||||
use linkerd_error::Error;
|
||||
use linkerd_http_classify::{ClassifyEos, ClassifyResponse};
|
||||
use linkerd_metrics::NewMetrics;
|
||||
use linkerd_metrics::legacy::NewMetrics;
|
||||
use linkerd_stack::Proxy;
|
||||
use parking_lot::Mutex;
|
||||
use pin_project::{pin_project, pinned_drop};
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use super::{Prefixed, Registry, Report};
|
||||
use linkerd_metrics::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
|
||||
use linkerd_metrics::legacy::{Counter, FmtLabels, FmtMetric, FmtMetrics, LastUpdate, Metric};
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, hash::Hash, sync::Arc};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ bytes = { workspace = true }
|
|||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
prometheus-client = { workspace = true }
|
||||
thiserror = "2"
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ http-body-util = { workspace = true }
|
|||
http = { workspace = true }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
tokio = { version = "1", features = ["macros", "rt"] }
|
||||
tower = { workspace = true, features = ["retry"] }
|
||||
tracing = { workspace = true }
|
||||
thiserror = "2"
|
||||
|
|
@ -26,7 +25,6 @@ linkerd-metrics = { path = "../../metrics" }
|
|||
linkerd-stack = { path = "../../stack" }
|
||||
|
||||
[dev-dependencies]
|
||||
hyper = { workspace = true }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
linkerd-mock-http-body = { path = "../../mock/http-body" }
|
||||
tokio = { version = "1", features = ["macros", "rt"] }
|
||||
|
|
|
|||
|
|
@ -21,6 +21,3 @@ url = "2"
|
|||
workspace = true
|
||||
features = ["http-route", "grpc-route"]
|
||||
optional = true
|
||||
|
||||
[dev-dependencies]
|
||||
maplit = "1"
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ Facilities for HTTP/1 upgrades.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
drain = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ publish = { workspace = true }
|
|||
test-util = []
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
linkerd-error = { path = "../error" }
|
||||
linkerd-stack = { path = "../stack" }
|
||||
parking_lot = "0.12"
|
||||
tokio = { version = "1", default-features = false, features = [
|
||||
|
|
@ -28,4 +26,3 @@ tokio = { version = "1", default-features = false, features = [
|
|||
"test-util",
|
||||
"time",
|
||||
] }
|
||||
linkerd-tracing = { path = "../tracing", features = ["ansi"] }
|
||||
|
|
|
|||
|
|
@ -8,9 +8,7 @@ publish = { workspace = true }
|
|||
|
||||
[features]
|
||||
rustls = ["linkerd-meshtls-rustls", "__has_any_tls_impls"]
|
||||
rustls-aws-lc = ["rustls", "linkerd-meshtls-rustls/aws-lc"]
|
||||
rustls-aws-lc-fips = ["rustls-aws-lc", "linkerd-meshtls-rustls/aws-lc-fips"]
|
||||
rustls-ring = ["rustls", "linkerd-meshtls-rustls/ring"]
|
||||
rustls-aws-lc-fips = ["linkerd-meshtls-rustls/aws-lc-fips"]
|
||||
boring = ["linkerd-meshtls-boring", "__has_any_tls_impls"]
|
||||
boring-fips = ["boring", "linkerd-meshtls-boring/fips"]
|
||||
# Enabled if *any* TLS impl is enabled.
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ publish = { workspace = true }
|
|||
|
||||
[dependencies]
|
||||
boring = "4"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
hex = "0.4" # used for debug logging
|
||||
tokio = { version = "1", features = ["macros", "sync"] }
|
||||
tokio-boring = "4"
|
||||
|
|
@ -27,4 +26,3 @@ fips = ["boring/fips"]
|
|||
|
||||
[dev-dependencies]
|
||||
linkerd-tls-test-util = { path = "../../tls/test-util" }
|
||||
linkerd-meshtls = { path = "../../meshtls" }
|
||||
|
|
|
|||
|
|
@ -7,19 +7,16 @@ edition = "2018"
|
|||
publish = { workspace = true }
|
||||
|
||||
[features]
|
||||
default = ["ring"]
|
||||
ring = ["tokio-rustls/ring", "rustls-webpki/ring"]
|
||||
aws-lc = ["tokio-rustls/aws-lc-rs", "rustls-webpki/aws-lc-rs"]
|
||||
aws-lc-fips = ["aws-lc", "tokio-rustls/fips"]
|
||||
aws-lc-fips = ["tokio-rustls/fips"]
|
||||
test-util = ["linkerd-tls-test-util"]
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
rustls-pemfile = "2.2"
|
||||
rustls-webpki = { version = "0.103.4", default-features = false, features = ["std"] }
|
||||
rustls-webpki = { version = "0.103.4", default-features = false, features = ["std", "aws-lc-rs"] }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "rt", "sync"] }
|
||||
tokio-rustls = { workspace = true }
|
||||
tokio-rustls = { workspace = true, features = ["aws-lc-rs"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
linkerd-dns-name = { path = "../../dns/name" }
|
||||
|
|
|
|||
|
|
@ -1,16 +1,3 @@
|
|||
#[cfg(all(feature = "aws-lc", feature = "ring"))]
|
||||
compile_error!(
|
||||
"Multiple rustls backends enabled. Enabled one of the \"ring\" or \"aws-lc\" features"
|
||||
);
|
||||
#[cfg(not(any(feature = "aws-lc", feature = "ring")))]
|
||||
compile_error!("No rustls backend enabled. Enabled one of the \"ring\" or \"aws-lc\" features");
|
||||
|
||||
#[cfg(feature = "aws-lc")]
|
||||
mod aws_lc;
|
||||
#[cfg(feature = "ring")]
|
||||
mod ring;
|
||||
|
||||
#[cfg(feature = "aws-lc")]
|
||||
pub use aws_lc::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
|
||||
#[cfg(feature = "ring")]
|
||||
pub use ring::{default_provider, SUPPORTED_SIG_ALGS, TLS_SUPPORTED_CIPHERSUITES};
|
||||
|
||||
mod aws_lc;
|
||||
|
|
|
|||
|
|
@ -1,55 +0,0 @@
|
|||
pub use ring::default_provider;
|
||||
use tokio_rustls::rustls::{
|
||||
self,
|
||||
crypto::{ring, WebPkiSupportedAlgorithms},
|
||||
};
|
||||
|
||||
pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = &[
|
||||
ring::cipher_suite::TLS13_AES_128_GCM_SHA256,
|
||||
ring::cipher_suite::TLS13_AES_256_GCM_SHA384,
|
||||
ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256,
|
||||
];
|
||||
// A subset of the algorithms supported by rustls+ring, imported from
|
||||
// https://github.com/rustls/rustls/blob/v/0.23.21/rustls/src/crypto/ring/mod.rs#L107
|
||||
pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms {
|
||||
all: &[
|
||||
webpki::ring::ECDSA_P256_SHA256,
|
||||
webpki::ring::ECDSA_P256_SHA384,
|
||||
webpki::ring::ECDSA_P384_SHA256,
|
||||
webpki::ring::ECDSA_P384_SHA384,
|
||||
webpki::ring::ED25519,
|
||||
webpki::ring::RSA_PKCS1_2048_8192_SHA256,
|
||||
webpki::ring::RSA_PKCS1_2048_8192_SHA384,
|
||||
webpki::ring::RSA_PKCS1_2048_8192_SHA512,
|
||||
webpki::ring::RSA_PKCS1_3072_8192_SHA384,
|
||||
],
|
||||
mapping: &[
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
|
||||
&[
|
||||
webpki::ring::ECDSA_P384_SHA384,
|
||||
webpki::ring::ECDSA_P256_SHA384,
|
||||
],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
&[
|
||||
webpki::ring::ECDSA_P256_SHA256,
|
||||
webpki::ring::ECDSA_P384_SHA256,
|
||||
],
|
||||
),
|
||||
(rustls::SignatureScheme::ED25519, &[webpki::ring::ED25519]),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA512,
|
||||
&[webpki::ring::RSA_PKCS1_2048_8192_SHA512],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA384,
|
||||
&[webpki::ring::RSA_PKCS1_2048_8192_SHA384],
|
||||
),
|
||||
(
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA256,
|
||||
&[webpki::ring::RSA_PKCS1_2048_8192_SHA256],
|
||||
),
|
||||
],
|
||||
};
|
||||
|
|
@ -16,9 +16,7 @@ test_util = []
|
|||
bytes = { workspace = true }
|
||||
deflate = { version = "1", features = ["gzip"] }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["http1", "http2"] }
|
||||
kubert-prometheus-process = { version = "0.2", optional = true }
|
||||
parking_lot = "0.12"
|
||||
prometheus-client = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use super::{
|
||||
fmt::{FmtLabels, FmtMetric},
|
||||
Factor,
|
||||
legacy::Factor,
|
||||
};
|
||||
use std::fmt::{self, Display};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ use std::fmt;
|
|||
use std::marker::PhantomData;
|
||||
use std::{cmp, iter, slice};
|
||||
|
||||
use super::{Counter, Factor, FmtLabels, FmtMetric};
|
||||
use super::legacy::{Counter, Factor, FmtLabels, FmtMetric};
|
||||
|
||||
/// A series of latency values and counts.
|
||||
#[derive(Debug)]
|
||||
|
|
|
|||
|
|
@ -16,16 +16,37 @@ mod store;
|
|||
#[cfg(feature = "process")]
|
||||
pub use kubert_prometheus_process as process;
|
||||
|
||||
#[cfg(feature = "stack")]
|
||||
pub use self::new_metrics::NewMetrics;
|
||||
pub use self::{
|
||||
counter::Counter,
|
||||
fmt::{FmtLabels, FmtMetric, FmtMetrics, Metric},
|
||||
gauge::Gauge,
|
||||
histogram::Histogram,
|
||||
serve::Serve,
|
||||
store::{LastUpdate, SharedStore, Store},
|
||||
};
|
||||
/// A legacy metrics implementation.
|
||||
///
|
||||
/// New metrics should use the interfaces in [`prom`] instead.
|
||||
pub mod legacy {
|
||||
// TODO(kate): we will move types like `Counter` and `Gauge` into this module.
|
||||
//
|
||||
// this will help us differentiate in dependent systems which components rely on our legacy
|
||||
// metrics implementation.
|
||||
pub use super::{
|
||||
counter::Counter,
|
||||
fmt::{FmtLabels, FmtMetric, FmtMetrics, Metric},
|
||||
gauge::Gauge,
|
||||
histogram::Histogram,
|
||||
serve::Serve,
|
||||
store::{LastUpdate, SharedStore, Store},
|
||||
};
|
||||
|
||||
#[cfg(feature = "stack")]
|
||||
pub use super::new_metrics::NewMetrics;
|
||||
|
||||
pub trait Factor {
|
||||
fn factor(n: u64) -> f64;
|
||||
}
|
||||
|
||||
impl Factor for () {
|
||||
#[inline]
|
||||
fn factor(n: u64) -> f64 {
|
||||
super::to_f64(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Integration with the [`prometheus_client`]` crate.
|
||||
///
|
||||
|
|
@ -51,7 +72,7 @@ pub mod prom {
|
|||
|
||||
pub type Report = Arc<Registry>;
|
||||
|
||||
impl crate::FmtMetrics for Report {
|
||||
impl crate::legacy::FmtMetrics for Report {
|
||||
#[inline]
|
||||
fn fmt_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
encoding::text::encode(f, self)
|
||||
|
|
@ -64,8 +85,8 @@ macro_rules! metrics {
|
|||
{ $( $name:ident : $kind:ty { $help:expr } ),+ } => {
|
||||
$(
|
||||
#[allow(non_upper_case_globals)]
|
||||
const $name: $crate::Metric<'static, &str, $kind> =
|
||||
$crate::Metric {
|
||||
const $name: $crate::legacy::Metric<'static, &str, $kind> =
|
||||
$crate::legacy::Metric {
|
||||
name: stringify!($name),
|
||||
help: $help,
|
||||
_p: ::std::marker::PhantomData,
|
||||
|
|
@ -74,8 +95,8 @@ macro_rules! metrics {
|
|||
}
|
||||
}
|
||||
|
||||
pub trait Factor {
|
||||
fn factor(n: u64) -> f64;
|
||||
pub fn to_f64(n: u64) -> f64 {
|
||||
n.wrapping_rem(MAX_PRECISE_UINT64 + 1) as f64
|
||||
}
|
||||
|
||||
/// Largest `u64` that can fit without loss of precision in `f64` (2^53).
|
||||
|
|
@ -84,14 +105,3 @@ pub trait Factor {
|
|||
/// mantissa), thus integer values over 2^53 are not guaranteed to be correctly
|
||||
/// exposed.
|
||||
const MAX_PRECISE_UINT64: u64 = 0x20_0000_0000_0000;
|
||||
|
||||
impl Factor for () {
|
||||
#[inline]
|
||||
fn factor(n: u64) -> f64 {
|
||||
to_f64(n)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_f64(n: u64) -> f64 {
|
||||
n.wrapping_rem(MAX_PRECISE_UINT64 + 1) as f64
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
use crate::SharedStore;
|
||||
use crate::legacy::SharedStore;
|
||||
use linkerd_stack as svc;
|
||||
use std::{fmt, hash::Hash, marker::PhantomData, sync::Arc};
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ use linkerd_http_box::BoxBody;
|
|||
use std::io::Write;
|
||||
use tracing::trace;
|
||||
|
||||
use super::FmtMetrics;
|
||||
use super::legacy::FmtMetrics;
|
||||
|
||||
/// Serve Prometheues metrics.
|
||||
#[derive(Debug, Clone)]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{FmtLabels, FmtMetric, Metric};
|
||||
use crate::legacy::{FmtLabels, FmtMetric, Metric};
|
||||
use parking_lot::Mutex;
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
|
|
|
|||
|
|
@ -19,12 +19,12 @@ use opencensus_proto::{
|
|||
use std::collections::HashMap;
|
||||
use tokio::{sync::mpsc, time};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{self as grpc, body::BoxBody, client::GrpcService};
|
||||
use tonic::{self as grpc, body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::{debug, info, trace};
|
||||
|
||||
pub async fn export_spans<T, S>(client: T, node: Node, spans: S, metrics: Registry)
|
||||
where
|
||||
T: GrpcService<BoxBody> + Clone,
|
||||
T: GrpcService<TonicBody> + Clone,
|
||||
T::Error: Into<Error>,
|
||||
T::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<T::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
|
|
@ -49,7 +49,7 @@ struct SpanRxClosed;
|
|||
|
||||
impl<T, S> SpanExporter<T, S>
|
||||
where
|
||||
T: GrpcService<BoxBody>,
|
||||
T: GrpcService<TonicBody>,
|
||||
T::Error: Into<Error>,
|
||||
T::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<T::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
use linkerd_metrics::{metrics, Counter, FmtMetrics};
|
||||
use linkerd_metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
};
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ use opentelemetry_proto::{
|
|||
use opentelemetry_sdk::trace::{SpanData, SpanLinks};
|
||||
pub use opentelemetry_sdk::{self as sdk};
|
||||
use tokio::{sync::mpsc, time};
|
||||
use tonic::{self as grpc, body::BoxBody, client::GrpcService};
|
||||
use tonic::{self as grpc, body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::{debug, info, trace};
|
||||
|
||||
pub async fn export_spans<T, S>(
|
||||
|
|
@ -38,7 +38,7 @@ pub async fn export_spans<T, S>(
|
|||
resource: ResourceAttributesWithSchema,
|
||||
metrics: Registry,
|
||||
) where
|
||||
T: GrpcService<BoxBody> + Clone,
|
||||
T: GrpcService<TonicBody> + Clone,
|
||||
T::Error: Into<Error>,
|
||||
T::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<T::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
|
|
@ -65,7 +65,7 @@ struct SpanRxClosed;
|
|||
|
||||
impl<T, S> SpanExporter<T, S>
|
||||
where
|
||||
T: GrpcService<BoxBody> + Clone,
|
||||
T: GrpcService<TonicBody> + Clone,
|
||||
T::Error: Into<Error>,
|
||||
T::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<T::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
use linkerd_metrics::{metrics, Counter, FmtMetrics};
|
||||
use linkerd_metrics::{
|
||||
legacy::{Counter, FmtMetrics},
|
||||
metrics,
|
||||
};
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
|
|
|||
|
|
@ -9,10 +9,8 @@ publish = { workspace = true }
|
|||
[dependencies]
|
||||
ahash = "0.8"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
indexmap = "2"
|
||||
prometheus-client = { workspace = true }
|
||||
rand = { version = "0.9", features = ["small_rng"] }
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
linkerd-error = { path = "../../error" }
|
||||
|
|
@ -30,5 +28,6 @@ futures-util = { version = "0.3", default-features = false }
|
|||
linkerd-tracing = { path = "../../tracing" }
|
||||
parking_lot = "0.12"
|
||||
quickcheck = { version = "1", default-features = false }
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tokio-test = "0.4"
|
||||
tower-test = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -22,8 +22,6 @@ linkerd-tls = { path = "../../tls" }
|
|||
linkerd-identity = { path = "../../identity" }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
pin-project = "1"
|
||||
prost = { workspace = true }
|
||||
tonic = { workspace = true, default-features = false }
|
||||
tower = { workspace = true, default-features = false }
|
||||
tracing = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ use linkerd_stack::Param;
|
|||
use linkerd_tonic_stream::{LimitReceiveFuture, ReceiveLimits};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tonic::{self as grpc, body::BoxBody, client::GrpcService};
|
||||
use tonic::{self as grpc, body::Body as TonicBody, client::GrpcService};
|
||||
use tower::Service;
|
||||
use tracing::{debug, info, trace};
|
||||
|
||||
|
|
@ -22,7 +22,7 @@ pub struct Resolve<S> {
|
|||
|
||||
impl<S> Resolve<S>
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error> + Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
|
|
@ -46,7 +46,7 @@ type ResolveFuture =
|
|||
impl<T, S> Service<T> for Resolve<S>
|
||||
where
|
||||
T: Param<ConcreteAddr>,
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::Error: Into<Error> + Send,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ publish = { workspace = true }
|
|||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
rand = "0.9"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ publish = { workspace = true }
|
|||
futures = { version = "0.3", default-features = false }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
prometheus-client = { workspace = true }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tokio-util = "0.7"
|
||||
tracing = { workspace = true }
|
||||
|
|
@ -25,7 +23,6 @@ linkerd-stack = { path = "../../../stack" }
|
|||
[dev-dependencies]
|
||||
tokio-stream = { version = "0.1", features = ["sync"] }
|
||||
tokio-test = "0.4"
|
||||
tower-test = { workspace = true }
|
||||
|
||||
linkerd-pool-mock = { path = "../../../pool/mock" }
|
||||
linkerd-tracing = { path = "../../../tracing" }
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ proto = [
|
|||
|
||||
[dependencies]
|
||||
ahash = "0.8"
|
||||
ipnet = "2"
|
||||
http = { workspace = true }
|
||||
once_cell = { version = "1" }
|
||||
prost-types = { workspace = true, optional = true }
|
||||
|
|
@ -30,13 +29,8 @@ linkerd-http-route = { path = "../../http/route" }
|
|||
linkerd-tls-route = { path = "../../tls/route" }
|
||||
linkerd-opaq-route = { path = "../../opaq-route" }
|
||||
linkerd-proxy-api-resolve = { path = "../api-resolve" }
|
||||
linkerd-proxy-core = { path = "../core" }
|
||||
|
||||
[dependencies.linkerd2-proxy-api]
|
||||
workspace = true
|
||||
optional = true
|
||||
features = ["outbound"]
|
||||
|
||||
[dev-dependencies]
|
||||
maplit = "1"
|
||||
quickcheck = { version = "1", default-features = false }
|
||||
|
|
|
|||
|
|
@ -12,14 +12,11 @@ This should probably be decomposed into smaller, decoupled crates.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1"
|
||||
bytes = { workspace = true }
|
||||
drain = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
h2 = { workspace = true }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
httparse = "1"
|
||||
hyper = { workspace = true, features = [
|
||||
"client",
|
||||
"http1",
|
||||
|
|
@ -35,16 +32,12 @@ hyper-util = { workspace = true, default-features = false, features = [
|
|||
"tokio",
|
||||
"tracing",
|
||||
] }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
rand = "0.9"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["rt", "sync", "time"] }
|
||||
tower = { workspace = true, default-features = false }
|
||||
tracing = { workspace = true }
|
||||
try-lock = "0.2"
|
||||
|
||||
linkerd-duplex = { path = "../../duplex" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-http-box = { path = "../../http/box" }
|
||||
linkerd-http-classify = { path = "../../http/classify" }
|
||||
|
|
@ -61,8 +54,8 @@ linkerd-proxy-balance = { path = "../balance" }
|
|||
linkerd-stack = { path = "../../stack" }
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = { workspace = true }
|
||||
http-body-util = { workspace = true, features = ["channel"] }
|
||||
tokio-test = "0.4"
|
||||
tower-test = { workspace = true }
|
||||
linkerd-tracing = { path = "../../tracing", features = ["ansi"] }
|
||||
|
||||
|
|
|
|||
|
|
@ -7,15 +7,11 @@ edition = { workspace = true }
|
|||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
linkerd2-proxy-api = { workspace = true, features = ["identity"] }
|
||||
linkerd-dns-name = { path = "../../dns/name" }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-identity = { path = "../../identity" }
|
||||
linkerd-metrics = { path = "../../metrics" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
parking_lot = "0.12"
|
||||
pin-project = "1"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["time", "sync"] }
|
||||
tonic = { workspace = true, default-features = false }
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ use std::{
|
|||
};
|
||||
use thiserror::Error;
|
||||
use tokio::time;
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::{debug, error};
|
||||
|
||||
/// Configures the Identity service and local identity.
|
||||
|
|
@ -92,7 +92,7 @@ impl Certify {
|
|||
where
|
||||
C: Credentials,
|
||||
N: NewService<(), Service = S>,
|
||||
S: GrpcService<BoxBody>,
|
||||
S: GrpcService<TonicBody>,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
{
|
||||
|
|
@ -154,7 +154,7 @@ async fn certify<C, S>(
|
|||
) -> Result<SystemTime>
|
||||
where
|
||||
C: Credentials,
|
||||
S: GrpcService<BoxBody>,
|
||||
S: GrpcService<TonicBody>,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error: Into<Error> + Send,
|
||||
{
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ linkerd-identity = { path = "../../identity" }
|
|||
spiffe-proto = { path = "../../../spiffe-proto" }
|
||||
linkerd-tonic-watch = { path = "../../tonic-watch" }
|
||||
linkerd-exp-backoff = { path = "../../exp-backoff" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
tokio = { version = "1", features = ["time", "sync"] }
|
||||
tonic = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
|
|
@ -25,4 +24,3 @@ thiserror = "2"
|
|||
|
||||
[dev-dependencies]
|
||||
rcgen = { version = "0.14.3", default-features = false, features = ["crypto", "pem", "aws_lc_rs"] }
|
||||
tokio-test = "0.4"
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ impl TryFrom<api::X509svid> for Svid {
|
|||
|
||||
impl<S> Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body> + Clone,
|
||||
S::Error: Into<Error>,
|
||||
S::ResponseBody: Default + http::Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as http::Body>::Error: Into<Error> + Send,
|
||||
|
|
@ -127,7 +127,7 @@ where
|
|||
|
||||
impl<S> Service<()> for Api<S>
|
||||
where
|
||||
S: tonic::client::GrpcService<tonic::body::BoxBody> + Clone,
|
||||
S: tonic::client::GrpcService<tonic::body::Body> + Clone,
|
||||
S: Clone + Send + Sync + 'static,
|
||||
S::ResponseBody: Default + http::Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as http::Body>::Error: Into<Error> + Send,
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ linkerd-stack = { path = "../../stack" }
|
|||
linkerd-tls = { path = "../../tls" }
|
||||
parking_lot = "0.12"
|
||||
prost-types = { workspace = true }
|
||||
rand = { version = "0.9" }
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["time"] }
|
||||
tower = { workspace = true, default-features = false }
|
||||
|
|
|
|||
|
|
@ -12,7 +12,5 @@ linkerd-duplex = { path = "../../duplex" }
|
|||
linkerd-error = { path = "../../error" }
|
||||
linkerd-proxy-balance = { path = "../../proxy/balance" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
rand = "0.9"
|
||||
tokio = { version = "1" }
|
||||
tower = { workspace = true, default-features = false }
|
||||
pin-project = "1"
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ futures = { version = "0.3", default-features = false }
|
|||
linkerd-error = { path = "../../error" }
|
||||
linkerd-io = { path = "../../io" }
|
||||
linkerd-stack = { path = "../../stack" }
|
||||
socket2 = "0.5"
|
||||
socket2 = "0.6"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "net"] }
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
|
|
|
|||
|
|
@ -103,8 +103,8 @@ fn orig_dst(sock: TcpStream, client_addr: ClientAddr) -> io::Result<(OrigDstAddr
|
|||
// IPv4-mapped IPv6 addresses are unwrapped by BindTcp::bind() and received here as
|
||||
// SocketAddr::V4. We must call getsockopt with IPv4 constants (via
|
||||
// orig_dst_addr_v4) even if it originally was an IPv6
|
||||
ClientAddr(SocketAddr::V4(_)) => sock.original_dst()?,
|
||||
ClientAddr(SocketAddr::V6(_)) => sock.original_dst_ipv6()?,
|
||||
ClientAddr(SocketAddr::V4(_)) => sock.original_dst_v4()?,
|
||||
ClientAddr(SocketAddr::V6(_)) => sock.original_dst_v6()?,
|
||||
};
|
||||
|
||||
let orig_dst = orig_dst.as_socket().ok_or(io::Error::new(
|
||||
|
|
|
|||
|
|
@ -10,9 +10,7 @@ publish = { workspace = true }
|
|||
linkerd-error = { path = "../error" }
|
||||
linkerd-stack = { path = "../stack" }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
tower = { workspace = true, default-features = false }
|
||||
tracing = { workspace = true }
|
||||
pin-project = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
linkerd-tracing = { path = "../tracing" }
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ publish = { workspace = true }
|
|||
ahash = "0.8"
|
||||
futures = { version = "0.3", default-features = false }
|
||||
parking_lot = "0.12"
|
||||
thiserror = "2"
|
||||
tracing = { workspace = true }
|
||||
linkerd-error = { path = "../error" }
|
||||
linkerd-stack = { path = "../stack" }
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ Implements client layers for Linkerd ServiceProfiles.
|
|||
"""
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
http = { workspace = true }
|
||||
http-body = { workspace = true }
|
||||
|
|
@ -28,7 +27,6 @@ tracing = { workspace = true }
|
|||
linkerd-addr = { path = "../addr" }
|
||||
linkerd-dns-name = { path = "../dns/name" }
|
||||
linkerd-error = { path = "../error" }
|
||||
linkerd-http-box = { path = "../http/box" }
|
||||
linkerd-proxy-api-resolve = { path = "../proxy/api-resolve" }
|
||||
linkerd-stack = { path = "../stack" }
|
||||
linkerd-tonic-stream = { path = "../tonic-stream" }
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ use std::{
|
|||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tonic::{body::BoxBody, client::GrpcService};
|
||||
use tonic::{body::Body as TonicBody, client::GrpcService};
|
||||
use tracing::debug;
|
||||
|
||||
/// Creates watches on service profiles.
|
||||
|
|
@ -31,7 +31,7 @@ struct Inner<S> {
|
|||
|
||||
impl<R, S> Client<R, S>
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::ResponseBody: Send + Sync,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error:
|
||||
|
|
@ -64,7 +64,7 @@ where
|
|||
impl<T, R, S> Service<T> for Client<R, S>
|
||||
where
|
||||
T: Param<LookupAddr>,
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error:
|
||||
Into<Box<dyn std::error::Error + Send + Sync + 'static>> + Send,
|
||||
|
|
@ -111,7 +111,7 @@ type InnerFuture =
|
|||
|
||||
impl<S> Inner<S>
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error:
|
||||
Into<Box<dyn std::error::Error + Send + Sync + 'static>> + Send,
|
||||
|
|
@ -128,7 +128,7 @@ where
|
|||
|
||||
impl<S> Service<LookupAddr> for Inner<S>
|
||||
where
|
||||
S: GrpcService<BoxBody> + Clone + Send + 'static,
|
||||
S: GrpcService<TonicBody> + Clone + Send + 'static,
|
||||
S::ResponseBody: Body<Data = tonic::codegen::Bytes> + Send + 'static,
|
||||
<S::ResponseBody as Body>::Error:
|
||||
Into<Box<dyn std::error::Error + Send + Sync + 'static>> + Send,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,10 @@ mod service;
|
|||
|
||||
pub use self::layer::TrackServiceLayer;
|
||||
pub use self::service::TrackService;
|
||||
use linkerd_metrics::{metrics, Counter, FmtLabels, FmtMetrics};
|
||||
use linkerd_metrics::{
|
||||
legacy::{Counter, FmtLabels, FmtMetrics},
|
||||
metrics,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{collections::HashMap, fmt, hash::Hash, sync::Arc};
|
||||
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ edition = { workspace = true }
|
|||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
futures = { version = "0.3", default-features = false }
|
||||
linkerd-error = { path = "../../error" }
|
||||
linkerd-stack = { path = ".." }
|
||||
tower = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ edition = { workspace = true }
|
|||
publish = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1"
|
||||
bytes = { workspace = true }
|
||||
futures = { version = "0.3", default-features = false }
|
||||
linkerd-conditional = { path = "../conditional" }
|
||||
|
|
@ -19,7 +18,6 @@ linkerd-stack = { path = "../stack" }
|
|||
pin-project = "1"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", features = ["macros", "time"] }
|
||||
tower = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
untrusted = "0.9"
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ publish = { workspace = true }
|
|||
proto = ["linkerd2-proxy-api"]
|
||||
|
||||
[dependencies]
|
||||
regex = "1"
|
||||
rand = "0.9"
|
||||
thiserror = "2"
|
||||
tracing = { workspace = true }
|
||||
linkerd-tls = { path = "../" }
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue