Compare commits

..

No commits in common. "main" and "v0.1.116" have entirely different histories.

94 changed files with 4513 additions and 15321 deletions

View File

@ -1,2 +0,0 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

View File

@ -1,16 +0,0 @@
# Set to true to add reviewers to pull requests
addReviewers: true
# Set to true to add assignees to pull requests
addAssignees: author
# A list of reviewers to be added to pull requests (GitHub user name)
reviewers:
- gaius-qi
- yxxhero
- chlins
- CormickKneey
- xujihui1985
# A number of reviewers added to the pull request
numberOfReviewers: 3

View File

@ -1,11 +0,0 @@
name: "Auto Assign"
on:
pull_request_target:
types: [opened, reopened, ready_for_review]
jobs:
add-assignee:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/auto-assign-action@9f6dbe84a80c6e7639d1b9698048b201052a2a94

View File

@ -26,8 +26,6 @@ jobs:
- name: Install Protoc
uses: arduino/setup-protoc@v2
with:
repo-token: ${{ secrets.GH_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
@ -57,8 +55,6 @@ jobs:
- name: Install Protoc
uses: arduino/setup-protoc@v2
with:
repo-token: ${{ secrets.GH_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
@ -78,7 +74,7 @@ jobs:
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: lcov.info

View File

@ -1,6 +1,11 @@
name: Docker
on:
pull_request:
branches:
- main
paths:
- 'ci/Dockerfile*'
push:
branches:
- main
@ -85,114 +90,6 @@ jobs:
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Run Trivy vulnerability scanner in tarball mode
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4
with:
image-ref: dragonflyoss/client:${{ steps.get_version.outputs.VERSION }}
severity: 'CRITICAL,HIGH'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
with:
sarif_file: 'trivy-results.sarif'
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
push_client_debug_image_to_registry:
name: Push Client Debug Image
runs-on: [self-hosted, Linux, X64]
timeout-minutes: 600
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Get Version
id: get_version
run: |
VERSION=${GITHUB_REF#refs/tags/}
if [[ ${GITHUB_REF} == "refs/heads/main" || ${GITHUB_REF} =~ refs/pull/([0-9]+)/merge ]]; then
VERSION=latest
fi
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
- name: Get Git Revision
id: vars
shell: bash
run: |
echo "git_revision=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: PrepareReg Names
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-debug-
- name: Login Docker Hub
uses: docker/login-action@v3
with:
registry: docker.io
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push to Registry
uses: docker/build-push-action@v6
with:
context: .
file: ci/Dockerfile.debug
platforms: linux/amd64,linux/arm64
labels: |-
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
build-args: |
GITVERSION=git-${{ steps.vars.outputs.git_revision }}
VERSION=${{ steps.get_version.outputs.VERSION }}-debug
tags: |
dragonflyoss/client:${{ steps.get_version.outputs.VERSION }}-debug
ghcr.io/${{ env.IMAGE_REPOSITORY }}:${{ steps.get_version.outputs.VERSION }}-debug
push: ${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Run Trivy vulnerability scanner in tarball mode
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4
with:
image-ref: dragonflyoss/client:${{ steps.get_version.outputs.VERSION }}-debug
severity: 'CRITICAL,HIGH'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
with:
sarif_file: 'trivy-results.sarif'
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
@ -275,19 +172,6 @@ jobs:
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Run Trivy vulnerability scanner in tarball mode
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4
with:
image-ref: dragonflyoss/dfinit:${{ steps.get_version.outputs.VERSION }}
severity: 'CRITICAL,HIGH'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
with:
sarif_file: 'trivy-results.sarif'
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache

View File

@ -22,14 +22,11 @@ jobs:
- name: Install Protoc
uses: arduino/setup-protoc@v2
with:
repo-token: ${{ secrets.GH_TOKEN }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
toolchain: 1.85.0
- name: Set up Clang
uses: egor-tensin/setup-clang@v1

View File

@ -1,20 +0,0 @@
name: PR Label
on:
pull_request:
types: [opened, labeled, unlabeled, synchronize]
permissions:
contents: read
jobs:
classify:
name: Classify PR
runs-on: ubuntu-latest
steps:
- name: PR impact specified
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5
with:
mode: exactly
count: 1
labels: 'bug, enhancement, documentation, dependencies'

View File

@ -52,13 +52,12 @@ jobs:
target: ${{ matrix.target }}
- name: Install cargo-deb
uses: taiki-e/cache-cargo-install-action@b33c63d3b3c85540f4eba8a4f71a5cc0ce030855
uses: taiki-e/cache-cargo-install-action@v2
with:
# Don't upgrade cargo-deb, refer to https://github.com/kornelski/cargo-deb/issues/169.
tool: cargo-deb@2.10.0
tool: cargo-deb
- name: Install cargo-generate-rpm
uses: taiki-e/install-action@daa3c1f1f9a9d46f686d9fc2f65773d0c293688b
uses: taiki-e/install-action@v2
with:
tool: cargo-generate-rpm
@ -70,32 +69,33 @@ jobs:
- name: Build binaries
shell: bash
run: |
cargo build --release --bins --workspace --exclude hdfs --target ${{ matrix.target }}
cargo build --release --bins --target ${{ matrix.target }}
- name: Build archive client(DEB)
shell: bash
run: |
binary_name="dragonfly-client"
binary_name="client"
dirname="$binary_name-${{ env.VERSION }}-${{ matrix.target }}"
cargo deb -p dragonfly-client --no-build --target ${{ matrix.target }} --variant ${{ matrix.target }} --compress-type gzip --output $dirname.deb
cargo deb -p dragonfly-client --target ${{ matrix.target }} --variant ${{ matrix.target }} --output $dirname.deb
echo "CLIENT_DEB_ASSET=$dirname.deb" >> $GITHUB_ENV
- name: Build archive client(RPM)
shell: bash
run: |
binary_name="dragonfly-client"
binary_name="client"
dirname="$binary_name-${{ env.VERSION }}-${{ matrix.target }}"
cargo generate-rpm -p dragonfly-client --target ${{ matrix.target }} --variant ${{ matrix.target }} --payload-compress none --output $dirname.rpm
cargo generate-rpm -p dragonfly-client --target ${{ matrix.target }} --variant ${{ matrix.target }} --output $dirname.rpm
echo "CLIENT_RPM_ASSET=$dirname.rpm" >> $GITHUB_ENV
- name: Build archive client(TAR)
shell: bash
run: |
binary_name="dragonfly-client"
binary_name="client"
dirname="$binary_name-${{ env.VERSION }}-${{ matrix.target }}"
mkdir -p "$dirname"
mv "target/${{ matrix.target }}/release/dfget" "$dirname"
mv "target/${{ matrix.target }}/release/dfdaemon" "$dirname"
mv "target/${{ matrix.target }}/release/dfstore" "$dirname"
mv "target/${{ matrix.target }}/release/dfcache" "$dirname"
mv "target/${{ matrix.target }}/release/dfinit" "$dirname"
mv CONTRIBUTING.md LICENSE README.md "$dirname"
@ -119,7 +119,7 @@ jobs:
contents: write
steps:
- name: Download Release Artifacts
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
path: releases
pattern: release-*
@ -153,8 +153,6 @@ jobs:
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: 1.85.0
- name: Install dependencies
run: |
@ -165,4 +163,3 @@ jobs:
with:
registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }}
ignore-unpublished-changes: true
args: --locked

View File

@ -1,31 +0,0 @@
name: Close stale issues and PRs
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *"
permissions:
issues: write
pull-requests: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
id: stale
with:
delete-branch: true
days-before-close: 7
days-before-stale: 90
days-before-pr-close: 7
days-before-pr-stale: 120
stale-issue-label: "stale"
exempt-issue-labels: bug,wip,on-hold
exempt-pr-labels: bug,wip,on-hold
exempt-all-milestones: true
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity.'
close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
stale-pr-message: 'This PR is stale because it has been open 120 days with no activity.'
close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'

2149
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@ members = [
]
[workspace.package]
version = "1.0.10"
version = "0.1.116"
authors = ["The Dragonfly Developers"]
homepage = "https://d7y.io/"
repository = "https://github.com/dragonflyoss/client.git"
@ -22,31 +22,19 @@ readme = "README.md"
edition = "2021"
[workspace.dependencies]
dragonfly-client = { path = "dragonfly-client", version = "1.0.10" }
dragonfly-client-core = { path = "dragonfly-client-core", version = "1.0.10" }
dragonfly-client-config = { path = "dragonfly-client-config", version = "1.0.10" }
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "1.0.10" }
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "1.0.10" }
dragonfly-client-util = { path = "dragonfly-client-util", version = "1.0.10" }
dragonfly-client-init = { path = "dragonfly-client-init", version = "1.0.10" }
dragonfly-api = "2.1.57"
thiserror = "2.0"
futures = "0.3.31"
reqwest = { version = "0.12.4", features = [
"stream",
"native-tls",
"default-tls",
"rustls-tls",
"gzip",
"brotli",
"zstd",
"deflate",
"blocking",
] }
reqwest-middleware = "0.4"
dragonfly-client = { path = "dragonfly-client", version = "0.1.116" }
dragonfly-client-core = { path = "dragonfly-client-core", version = "0.1.116" }
dragonfly-client-config = { path = "dragonfly-client-config", version = "0.1.116" }
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "0.1.116" }
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "0.1.116" }
dragonfly-client-util = { path = "dragonfly-client-util", version = "0.1.116" }
dragonfly-client-init = { path = "dragonfly-client-init", version = "0.1.116" }
thiserror = "1.0"
dragonfly-api = "=2.0.169"
reqwest = { version = "0.12.4", features = ["stream", "native-tls", "default-tls", "rustls-tls"] }
rcgen = { version = "0.12.1", features = ["x509-parser"] }
hyper = { version = "1.6", features = ["full"] }
hyper-util = { version = "0.1.16", features = [
hyper = { version = "1.4", features = ["full"] }
hyper-util = { version = "0.1.10", features = [
"client",
"client-legacy",
"tokio",
@ -55,32 +43,33 @@ hyper-util = { version = "0.1.16", features = [
"http2",
] }
hyper-rustls = { version = "0.26", features = ["http1", "http2", "logging"] }
http-range-header = "0.4.2"
http-range-header = "0.4.1"
tracing = "0.1"
url = "2.5.4"
url = "2.5.2"
rustls = { version = "0.22.4", features = ["tls12"] }
rustls-pki-types = "1.12.0"
rustls-pki-types = "1.10.0"
rustls-pemfile = "2.2.0"
sha2 = "0.10"
crc32fast = "1.5.0"
uuid = { version = "1.16", features = ["v4"] }
blake3 = "1.5.4"
crc32fast = "1.4.2"
uuid = { version = "1.11", features = ["v4"] }
hex = "0.4"
rocksdb = "0.22.0"
serde = { version = "1.0", features = ["derive"] }
serde_yaml = "0.9"
http = "1"
tonic = { version = "0.12.2", features = ["tls"] }
tonic = { version = "0.12.2", features = ["zstd", "tls"] }
tonic-reflection = "0.12.3"
tokio = { version = "1.47.1", features = ["full", "tracing"] }
tokio-util = { version = "0.7.16", features = ["full"] }
tokio-stream = "0.1.17"
tokio = { version = "1.41.0", features = ["full"] }
tokio-util = { version = "0.7.12", features = ["full"] }
tokio-stream = "0.1.16"
validator = { version = "0.16", features = ["derive"] }
warp = "0.3.5"
headers = "0.4.1"
headers = "0.4.0"
regex = "1.11.1"
humantime = "2.1.0"
prost-wkt-types = "0.6"
chrono = { version = "0.4.41", features = ["serde", "clock"] }
chrono = { version = "0.4.35", features = ["serde", "clock"] }
openssl = { version = "0.10", features = ["vendored"] }
opendal = { version = "0.48.0", features = [
"services-s3",
@ -89,37 +78,24 @@ opendal = { version = "0.48.0", features = [
"services-oss",
"services-obs",
"services-cos",
"services-webhdfs",
] }
clap = { version = "4.5.45", features = ["derive"] }
anyhow = "1.0.98"
toml_edit = "0.22.26"
toml = "0.8.23"
bytesize = { version = "1.3.3", features = ["serde"] }
clap = { version = "4.5.20", features = ["derive"] }
anyhow = "1.0.91"
toml_edit = "0.22.22"
toml = "0.8.19"
base16ct = { version = "0.2", features = ["alloc"] }
bytesize = { version = "1.2.0", features = ["serde"] }
bytesize-serde = "0.2.1"
percent-encoding = "2.3.1"
tempfile = "3.20.0"
tempfile = "3.13.0"
tokio-rustls = "0.25.0-alpha.4"
serde_json = "1.0.142"
lru = "0.12.5"
fs2 = "0.4.3"
lazy_static = "1.5"
bytes = "1.10"
local-ip-address = "0.6.5"
sysinfo = { version = "0.32.1", default-features = false, features = ["component", "disk", "network", "system", "user"] }
[profile.release]
opt-level = 3
lto = "thin"
opt-level = "z"
lto = true
codegen-units = 1
panic = "abort"
strip = "symbols"
[profile.dev]
opt-level = 0
debug = true
incremental = true
strip = false
[profile.bench]
debug = true

View File

@ -4,9 +4,9 @@
[![CI](https://github.com/dragonflyoss/client/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/dragonflyoss/client/actions/workflows/ci.yml)
[![Coverage](https://codecov.io/gh/dragonflyoss/client/branch/main/graph/badge.svg)](https://codecov.io/gh/dragonflyoss/dfdaemon)
[![Open Source Helpers](https://www.codetriage.com/dragonflyoss/client/badges/users.svg)](https://www.codetriage.com/dragonflyoss/client)
[![Discussions](https://img.shields.io/badge/discussions-on%20github-blue?style=flat-square)](https://github.com/dragonflyoss/dragonfly/discussions)
[![Discussions](https://img.shields.io/badge/discussions-on%20github-blue?style=flat-square)](https://github.com/dragonflyoss/Dragonfly2/discussions)
[![Twitter](https://img.shields.io/twitter/url?style=social&url=https%3A%2F%2Ftwitter.com%2Fdragonfly_oss)](https://twitter.com/dragonfly_oss)
[![LICENSE](https://img.shields.io/github/license/dragonflyoss/dragonfly.svg?style=flat-square)](https://github.com/dragonflyoss/dragonfly/blob/main/LICENSE)
[![LICENSE](https://img.shields.io/github/license/dragonflyoss/Dragonfly2.svg?style=flat-square)](https://github.com/dragonflyoss/Dragonfly2/blob/main/LICENSE)
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fdragonflyoss%2Fclient.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fdragonflyoss%2Fclient?ref=badge_shield)
Dragonfly client written in Rust. It can serve as both a peer and a seed peer.
@ -20,9 +20,9 @@ You can find the full documentation on the [d7y.io](https://d7y.io).
Join the conversation and help the community.
- **Slack Channel**: [#dragonfly](https://cloud-native.slack.com/messages/dragonfly/) on [CNCF Slack](https://slack.cncf.io/)
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/dragonfly/discussions)
- **Discussion Group**: <dragonfly-discuss@googlegroups.com>
- **Developer Group**: <dragonfly-developers@googlegroups.com>
- **Maintainer Group**: <dragonfly-maintainers@googlegroups.com>
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/Dragonfly2/discussions)
- **Twitter**: [@dragonfly_oss](https://twitter.com/dragonfly_oss)
- **DingTalk**: [22880028764](https://qr.dingtalk.com/action/joingroup?code=v1,k1,pkV9IbsSyDusFQdByPSK3HfCG61ZCLeb8b/lpQ3uUqI=&_dt_no_comment=1&origin=11)
@ -30,3 +30,7 @@ Join the conversation and help the community.
You should check out our
[CONTRIBUTING](./CONTRIBUTING.md) and develop the project together.
## License
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fdragonflyoss%2Fclient.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fdragonflyoss%2Fclient?ref=badge_large)

View File

@ -1,4 +1,4 @@
FROM public.ecr.aws/docker/library/rust:1.85.0 AS builder
FROM rust:1.80.0 AS builder
WORKDIR /app/client
@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y \
&& rm -rf /var/lib/apt/lists/*
COPY Cargo.toml Cargo.lock ./
COPY .cargo ./cargo
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
COPY dragonfly-client/src ./dragonfly-client/src
@ -21,7 +20,6 @@ COPY dragonfly-client-config/build.rs ./dragonfly-client-config/build.rs
COPY dragonfly-client-storage/Cargo.toml ./dragonfly-client-storage/Cargo.toml
COPY dragonfly-client-storage/src ./dragonfly-client-storage/src
COPY dragonfly-client-storage/benches ./dragonfly-client-storage/benches
COPY dragonfly-client-backend/Cargo.toml ./dragonfly-client-backend/Cargo.toml
COPY dragonfly-client-backend/src ./dragonfly-client-backend/src
@ -35,15 +33,9 @@ COPY dragonfly-client-util/src ./dragonfly-client-util/src
COPY dragonfly-client-init/Cargo.toml ./dragonfly-client-init/Cargo.toml
COPY dragonfly-client-init/src ./dragonfly-client-init/src
ARG TARGETPLATFORM
RUN case "${TARGETPLATFORM}" in \
"linux/arm64") export JEMALLOC_SYS_WITH_LG_PAGE=16;; \
esac && \
cargo build --release --verbose --bin dfget --bin dfdaemon --bin dfcache
RUN cargo build --release --verbose --bin dfget --bin dfdaemon --bin dfstore --bin dfcache
RUN cargo install tokio-console --locked --root /usr/local
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
FROM alpine:3.20 AS health
ENV GRPC_HEALTH_PROBE_VERSION=v0.4.24
@ -56,24 +48,22 @@ RUN if [ "$(uname -m)" = "ppc64le" ]; then \
fi && \
chmod +x /bin/grpc_health_probe
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
FROM golang:1.23.0-alpine3.20 AS pprof
RUN go install github.com/google/pprof@latest
RUN go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
FROM public.ecr.aws/debian/debian:bookworm-slim
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends iperf3 fio curl \
iotop sysstat bash-completion procps apache2-utils ca-certificates binutils \
dnsutils iputils-ping llvm graphviz lsof strace dstat net-tools \
RUN apt-get update && apt-get install -y --no-install-recommends wget curl \
bash-completion procps apache2-utils ca-certificates binutils bpfcc-tools \
dnsutils iputils-ping vim linux-perf llvm graphviz \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /app/client/target/release/dfget /usr/local/bin/dfget
COPY --from=builder /app/client/target/release/dfdaemon /usr/local/bin/dfdaemon
COPY --from=builder /app/client/target/release/dfstore /usr/local/bin/dfstore
COPY --from=builder /app/client/target/release/dfcache /usr/local/bin/dfcache
COPY --from=builder /usr/local/bin/tokio-console /usr/local/bin/
COPY --from=pprof /go/bin/pprof /bin/pprof
COPY --from=pprof /go/bin/grpcurl /bin/grpcurl
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
ENTRYPOINT ["/usr/local/bin/dfdaemon"]

View File

@ -1,83 +0,0 @@
FROM public.ecr.aws/docker/library/rust:1.85.0 AS builder
WORKDIR /app/client
RUN apt-get update && apt-get install -y \
openssl libclang-dev pkg-config protobuf-compiler git \
&& rm -rf /var/lib/apt/lists/*
COPY Cargo.toml Cargo.lock ./
COPY .cargo ./cargo
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
COPY dragonfly-client/src ./dragonfly-client/src
COPY dragonfly-client-core/Cargo.toml ./dragonfly-client-core/Cargo.toml
COPY dragonfly-client-core/src ./dragonfly-client-core/src
COPY dragonfly-client-config/Cargo.toml ./dragonfly-client-config/Cargo.toml
COPY dragonfly-client-config/src ./dragonfly-client-config/src
COPY dragonfly-client-config/build.rs ./dragonfly-client-config/build.rs
COPY dragonfly-client-storage/Cargo.toml ./dragonfly-client-storage/Cargo.toml
COPY dragonfly-client-storage/src ./dragonfly-client-storage/src
COPY dragonfly-client-storage/benches ./dragonfly-client-storage/benches
COPY dragonfly-client-backend/Cargo.toml ./dragonfly-client-backend/Cargo.toml
COPY dragonfly-client-backend/src ./dragonfly-client-backend/src
COPY dragonfly-client-backend/examples/plugin/Cargo.toml ./dragonfly-client-backend/examples/plugin/Cargo.toml
COPY dragonfly-client-backend/examples/plugin/src ./dragonfly-client-backend/examples/plugin/src
COPY dragonfly-client-util/Cargo.toml ./dragonfly-client-util/Cargo.toml
COPY dragonfly-client-util/src ./dragonfly-client-util/src
COPY dragonfly-client-init/Cargo.toml ./dragonfly-client-init/Cargo.toml
COPY dragonfly-client-init/src ./dragonfly-client-init/src
ARG TARGETPLATFORM
RUN case "${TARGETPLATFORM}" in \
"linux/arm64") export JEMALLOC_SYS_WITH_LG_PAGE=16;; \
esac && \
cargo build --verbose --bin dfget --bin dfdaemon --bin dfcache
RUN cargo install flamegraph --root /usr/local
RUN cargo install bottom --locked --root /usr/local
RUN cargo install tokio-console --locked --root /usr/local
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
ENV GRPC_HEALTH_PROBE_VERSION=v0.4.24
RUN if [ "$(uname -m)" = "ppc64le" ]; then \
wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-ppc64le; \
elif [ "$(uname -m)" = "aarch64" ]; then \
wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-arm64; \
else \
wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64; \
fi && \
chmod +x /bin/grpc_health_probe
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
RUN go install github.com/google/pprof@latest
RUN go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
FROM public.ecr.aws/debian/debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends iperf3 fio curl infiniband-diags ibverbs-utils \
iotop sysstat bash-completion procps apache2-utils ca-certificates binutils bpfcc-tools \
dnsutils iputils-ping vim linux-perf llvm lsof socat strace dstat net-tools \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /app/client/target/debug/dfget /usr/local/bin/dfget
COPY --from=builder /app/client/target/debug/dfdaemon /usr/local/bin/dfdaemon
COPY --from=builder /app/client/target/debug/dfcache /usr/local/bin/dfcache
COPY --from=builder /usr/local/bin/flamegraph /usr/local/bin/
COPY --from=builder /usr/local/bin/btm /usr/local/bin/
COPY --from=builder /usr/local/bin/tokio-console /usr/local/bin/
COPY --from=pprof /go/bin/pprof /bin/pprof
COPY --from=pprof /go/bin/grpcurl /bin/grpcurl
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
ENTRYPOINT ["/usr/local/bin/dfdaemon"]

View File

@ -1,4 +1,4 @@
FROM public.ecr.aws/docker/library/rust:1.85.0 AS builder
FROM rust:1.80.0 AS builder
RUN apt-get update && apt-get install -y \
openssl libclang-dev pkg-config protobuf-compiler \
@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y \
WORKDIR /app/client
COPY Cargo.toml Cargo.lock ./
COPY .cargo ./cargo
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
COPY dragonfly-client/src ./dragonfly-client/src
@ -21,7 +20,6 @@ COPY dragonfly-client-config/build.rs ./dragonfly-client-config/build.rs
COPY dragonfly-client-storage/Cargo.toml ./dragonfly-client-storage/Cargo.toml
COPY dragonfly-client-storage/src ./dragonfly-client-storage/src
COPY dragonfly-client-storage/benches ./dragonfly-client-storage/benches
COPY dragonfly-client-backend/Cargo.toml ./dragonfly-client-backend/Cargo.toml
COPY dragonfly-client-backend/src ./dragonfly-client-backend/src
@ -35,13 +33,9 @@ COPY dragonfly-client-util/src ./dragonfly-client-util/src
COPY dragonfly-client-init/Cargo.toml ./dragonfly-client-init/Cargo.toml
COPY dragonfly-client-init/src ./dragonfly-client-init/src
ARG TARGETPLATFORM
RUN case "${TARGETPLATFORM}" in \
"linux/arm64") export JEMALLOC_SYS_WITH_LG_PAGE=16;; \
esac && \
cargo build --release --verbose --bin dfinit
RUN cargo build --release --verbose --bin dfinit
FROM public.ecr.aws/debian/debian:bookworm-slim
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends wget \
&& rm -rf /var/lib/apt/lists/*

View File

@ -5,7 +5,7 @@ After=network-online.target
After=network.target
[Service]
ExecStart=/usr/bin/dfdaemon --config /etc/dragonfly/dfdaemon.yaml --console
ExecStart=/usr/bin/dfdaemon --config /etc/dragonfly/dfdaemon.yaml --verbose
Type=simple
Environment=HOME=/root

153
ci/dfdaemon.yaml Normal file
View File

@ -0,0 +1,153 @@
# bose prints log.
verbose: true
log:
# Specify the logging level [trace, debug, info, warn, error]
level: info
# host is the host configuration for dfdaemon.
host:
## idc is the idc of the host.
idc: ''
## location is the location of the host.
location: ''
## hostname is the hostname of the host.
# hostname: ""
## ip is the advertise ip of the host.
# ip: ""
server:
# pluginDir is the directory to store plugins.
pluginDir: /var/lib/dragonfly/plugins/dfdaemon/
# cacheDir is the directory to store cache files.
cacheDir: /var/cache/dragonfly/dfdaemon/
download:
server:
# -- socketPath is the unix socket path for dfdaemon GRPC service.
socketPath: /var/run/dragonfly/dfdaemon.sock
# -- rateLimit is the default rate limit of the download speed in KiB/MiB/GiB per second, default is 10GiB/s.
rateLimit: 10GiB
# -- pieceTimeout is the timeout for downloading a piece from source.
pieceTimeout: 30s
# -- concurrentPieceCount is the number of concurrent pieces to download.
concurrentPieceCount: 10
upload:
server:
# -- port is the port to the grpc server.
port: 4000
## ip is the listen ip of the grpc server.
# ip: ""
# -- rateLimit is the default rate limit of the upload speed in KiB/MiB/GiB per second, default is 10GiB/s.
rateLimit: 10GiB
manager:
# addrs is manager addresses.
addrs: []
scheduler:
# announceInterval is the interval to announce peer to the scheduler.
# Announcer will provide the scheduler with peer information for scheduling,
# peer information includes cpu, memory, etc.
announceInterval: 1m
# scheduleTimeout is the timeout for scheduling. If the scheduling timesout, dfdaemon will back-to-source
# download if enableBackToSource is true, otherwise dfdaemon will return download failed.
scheduleTimeout: 30s
# maxScheduleCount is the max count of schedule.
maxScheduleCount: 5
# enableBackToSource indicates whether enable back-to-source download, when the scheduling failed.
enableBackToSource: true
seedPeer:
# enable indicates whether enable seed peer.
enable: true
# type is the type of seed peer.
type: super
# clusterID is the cluster id of the seed peer cluster.
clusterID: 1
# keepaliveInterval is the interval to keep alive with manager.
keepaliveInterval: 15s
dynconfig:
# refreshInterval is the interval to refresh dynamic configuration from manager.
refreshInterval: 1m
storage:
# dir is the directory to store task's metadata and content.
dir: /var/lib/dragonfly/
# keep indicates whether keep the task's metadata and content when the dfdaemon restarts.
keep: true
# writeBufferSize is the buffer size for writing piece to disk, default is 128KB.
writeBufferSize: 131072
# readBufferSize is the buffer size for reading piece from disk, default is 128KB.
readBufferSize: 131072
gc:
# interval is the interval to do gc.
interval: 900s
policy:
# taskTTL is the ttl of the task.
taskTTL: 21600s
# distHighThresholdPercent is the high threshold percent of the disk usage.
# If the disk usage is greater than the threshold, dfdaemon will do gc.
distHighThresholdPercent: 80
# distLowThresholdPercent is the low threshold percent of the disk usage.
# If the disk usage is less than the threshold, dfdaemon will stop gc.
distLowThresholdPercent: 60
proxy:
server:
# port is the port to the proxy server.
port: 4001
## ip is the listen ip of the proxy server.
# ip: ""
## caCert is the root CA cert path with PEM format for the proxy server to generate the server cert.
## If ca_cert is empty, proxy will generate a smaple CA cert by rcgen::generate_simple_self_signed.
## When client requests via the proxy, the client should not verify the server cert and set
## insecure to true. If ca_cert is not empty, proxy will sign the server cert with the CA cert. If openssl is installed,
## you can use openssl to generate the root CA cert and make the system trust the root CA cert.
## Then set the ca_cert and ca_key to the root CA cert and key path. Dfdaemon generates the server cert
## and key, and signs the server cert with the root CA cert. When client requests via the proxy,
## the proxy can intercept the request by the server cert.
# caCert: ""
## caKey is the root CA key path with PEM format for the proxy server to generate the server cert.
## If ca_key is empty, proxy will generate a smaple CA key by rcgen::generate_simple_self_signed.
## When client requests via the proxy, the client should not verify the server cert and set
## insecure to true. If ca_key is not empty, proxy will sign the server cert with the CA cert. If openssl is installed,
## you can use openssl to generate the root CA cert and make the system trust the root CA cert.
## Then set the ca_cert and ca_key to the root CA cert and key path. Dfdaemon generates the server cert
## and key, and signs the server cert with the root CA cert. When client requests via the proxy,
## the proxy can intercept the request by the server cert.
# caKey: ""
# rules is the list of rules for the proxy server.
# regex is the regex of the request url.
# useTLS indicates whether use tls for the proxy backend.
# redirect is the redirect url.
# filteredQueryParams is the filtered query params to generate the task id.
# When filter is ["Signature", "Expires", "ns"], for example:
# http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
# will generate the same task id.
# Default value includes the filtered query params of s3, gcs, oss, obs, cos.
rules:
- regex: 'blobs/sha256.*'
# useTLS: false
# redirect: ""
# filteredQueryParams: []
registryMirror:
# addr is the default address of the registry mirror. Proxy will start a registry mirror service for the
# client to pull the image. The client can use the default address of the registry mirror in
# configuration to pull the image. The `X-Dragonfly-Registry` header can instead of the default address
# of registry mirror.
addr: https://index.docker.io
## certs is the client certs path with PEM format for the registry.
## If registry use self-signed cert, the client should set the
## cert for the registry mirror.
# certs: ""
# disableBackToSource indicates whether disable to download back-to-source when download failed.
disableBackToSource: false
# prefetch pre-downloads full of the task when download with range request.
prefetch: false
# -- readBufferSize is the buffer size for reading piece from disk, default is 32KB.
readBufferSize: 32768
security:
# enable indicates whether enable security.
enable: false
metrics:
server:
# port is the port to the metrics server.
port: 4002
## ip is the listen ip of the metrics server.
# ip: ""
## tracing is the tracing configuration for dfdaemon.
# tracing:
## addr is the address to report tracing log.
# addr: ""

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

View File

@ -1,98 +0,0 @@
# Performance Optimization Guidance
This is a reference benchmark process document designed to
assist in performance analysis and optimization for **client**.
This document provides as general a testing framework as possible,
allowing developers with needs to adjust it
according to their specific circumstances across various platform.
## Flow
![architecture](images/performance-testing-arch.png)
## Preparation
### Step 1: Setup Dragonfly
- Please refer to [official doc](https://d7y.io/docs/next/getting-started/installation/helm-charts/).
### Step 2: Start a file server
- Start with docker:
```bash
export FILE_SERVER_PORT=12345
docker run -d --rm -p ${FILE_SERVER_PORT}:80 --name dragonfly-fs dragonflyoss/file-server:latest
```
- Check the file server is ready:
```bash
# return success if ready
curl -s -o /dev/null \
-w "%{http_code}" \
http://localhost:12345/nano \
| grep -q "200" \
&& echo "Success" \
|| echo "Failed"
```
- Optional:
> you can build your own image, take a reference from [**Dockerfile**](https://github.com/dragonflyoss/perf-tests/blob/main/tools/file-server/Dockerfile).
### Step 3: Install test tools
- Request Generator: [**oha**](https://github.com/hatoo/oha)
```bash
brew install oha
```
- Profiling: [**flamegraph**](https://github.com/flamegraph-rs/flamegraph)
```bash
cargo install flamegraph
```
### Step 4: Setup Dragonfly Peer
> Document: [Install with binary](https://d7y.io/docs/next/getting-started/installation/binaries/).
- Compile the target binary
```bash
cargo build --release --bin dfdaemon
```
- Connect to Dragonfly
```bash
# prepare client.yaml by yourself.
./target/release/dfdaemon --config client.yaml -l info --console
```
## FlameGraph
Now, let's start benchmark with the following params:
- $FILE_SERVER_ADDRESS
- $CLIENT_PROXY_ADDRESS
### Collect Flamegraph
- Capture the flamegraph:
```bash
## stop after all requests done.
sudo flamegraph -o my_flamegraph.svg --pid 3442
```
- Make the request:
```bash
oha -c 1000 \
-n 100 \
--rand-regex-url $FILE_SERVER_ADDRESS/\(nano\|micro\|small\|medium\|large\) \
-x $CLIENT_PROXY_ADDRESS
```

View File

@ -14,7 +14,6 @@ dragonfly-client-core.workspace = true
dragonfly-client-util.workspace = true
dragonfly-api.workspace = true
reqwest.workspace = true
reqwest-middleware.workspace = true
tokio.workspace = true
tokio-util.workspace = true
rustls.workspace = true
@ -24,14 +23,12 @@ url.workspace = true
tracing.workspace = true
opendal.workspace = true
percent-encoding.workspace = true
futures.workspace = true
reqwest-retry = "0.7"
reqwest-tracing = "0.5"
libloading = "0.8.8"
futures = "0.3.28"
libloading = "0.8.5"
[dev-dependencies]
tempfile.workspace = true
wiremock = "0.6.4"
wiremock = "0.6.2"
rustls-pki-types.workspace = true
rustls-pemfile.workspace = true
hyper.workspace = true

View File

@ -14,7 +14,7 @@ cargo build --all && mv target/debug/libhdfs.so {plugin_dir}/backend/libhdfs.so
## Run Client with Plugin
```shell
$ cargo run --bin dfdaemon -- --config {config_dir}/config.yaml -l info --console
$ cargo run --bin dfdaemon -- --config {config_dir}/config.yaml -l info --verbose
INFO load [http] builtin backend
INFO load [https] builtin backend
INFO load [hdfs] plugin backend

View File

@ -1,272 +0,0 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use dragonfly_api::common;
use dragonfly_client_core::error::BackendError;
use dragonfly_client_core::{Error as ClientError, Result as ClientResult};
use opendal::{layers::TimeoutLayer, Metakey, Operator};
use percent_encoding::percent_decode_str;
use std::time::Duration;
use tokio_util::io::StreamReader;
use tracing::{error, info, instrument};
use url::Url;
/// HDFS_SCHEME is the scheme of the HDFS.
pub const HDFS_SCHEME: &str = "hdfs";
/// DEFAULT_NAMENODE_PORT is the default port of the HDFS namenode.
const DEFAULT_NAMENODE_PORT: u16 = 9870;
/// Hdfs is a struct that implements the Backend trait.
#[derive(Default)]
pub struct Hdfs {
/// scheme is the scheme of the HDFS.
scheme: String,
}
/// Hdfs implements the Backend trait.
impl Hdfs {
/// new returns a new HDFS backend.
pub fn new() -> Self {
Self {
scheme: HDFS_SCHEME.to_string(),
}
}
/// operator initializes the operator with the parsed URL and HDFS config.
pub fn operator(
&self,
url: Url,
config: Option<common::v2::Hdfs>,
timeout: Duration,
) -> ClientResult<Operator> {
// Get the host and port from the URL.
let host = url
.host_str()
.ok_or_else(|| ClientError::InvalidURI(url.to_string()))?
.to_string();
let port = url.port().unwrap_or(DEFAULT_NAMENODE_PORT);
// Initialize the HDFS operator.
let mut builder = opendal::services::Webhdfs::default();
builder = builder
.root("/")
.endpoint(format!("http://{}:{}", host, port).as_str());
// If HDFS config is not None, set the config for builder.
if let Some(config) = config {
if let Some(delegation_token) = &config.delegation_token {
builder = builder.delegation(delegation_token.as_str());
}
}
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
}
}
/// Implement the Backend trait for Hdfs.
#[tonic::async_trait]
impl super::Backend for Hdfs {
/// scheme returns the scheme of the HDFS backend.
fn scheme(&self) -> String {
self.scheme.clone()
}
/// head gets the header of the request.
#[instrument(skip_all)]
async fn head(&self, request: super::HeadRequest) -> ClientResult<super::HeadResponse> {
info!(
"head request {} {}: {:?}",
request.task_id, request.url, request.http_header
);
// Parse the URL.
let url = Url::parse(request.url.as_ref())
.map_err(|_| ClientError::InvalidURI(request.url.clone()))?;
let decoded_path = percent_decode_str(url.path())
.decode_utf8_lossy()
.to_string();
// Initialize the operator with the parsed URL and HDFS config.
let operator = self.operator(url.clone(), request.hdfs, request.timeout)?;
// Get the entries if url point to a directory.
let entries = if url.path().ends_with('/') {
operator
.list_with(decoded_path.as_str())
.recursive(true)
.metakey(Metakey::ContentLength | Metakey::Mode)
.await // Do the list op here.
.map_err(|err| {
error!(
"list request failed {} {}: {}",
request.task_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})?
.into_iter()
.map(|entry| {
let metadata = entry.metadata();
let mut url = url.clone();
url.set_path(entry.path());
super::DirEntry {
url: url.to_string(),
content_length: metadata.content_length() as usize,
is_dir: metadata.is_dir(),
}
})
.collect()
} else {
Vec::new()
};
// Stat the path to get the response from HDFS operator.
let response = operator
.stat_with(decoded_path.as_str())
.await
.map_err(|err| {
error!(
"stat request failed {} {}: {}",
request.task_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})?;
info!(
"head response {} {}: {}",
request.task_id,
request.url,
response.content_length()
);
Ok(super::HeadResponse {
success: true,
content_length: Some(response.content_length()),
http_header: None,
http_status_code: None,
error_message: None,
entries,
})
}
/// get returns content of requested file.
#[instrument(skip_all)]
async fn get(
&self,
request: super::GetRequest,
) -> ClientResult<super::GetResponse<super::Body>> {
info!(
"get request {} {}: {:?}",
request.piece_id, request.url, request.http_header
);
// Parse the URL.
let url = Url::parse(request.url.as_ref())
.map_err(|_| ClientError::InvalidURI(request.url.clone()))?;
let decoded_path = percent_decode_str(url.path())
.decode_utf8_lossy()
.to_string();
// Initialize the operator with the parsed URL and HDFS config.
let operator_reader = self
.operator(url.clone(), request.hdfs, request.timeout)?
.reader(decoded_path.as_ref())
.await
.map_err(|err| {
error!(
"get request failed {} {}: {}",
request.piece_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})?;
let stream = match request.range {
Some(range) => operator_reader
.into_bytes_stream(range.start..range.start + range.length)
.await
.map_err(|err| {
error!(
"get request failed {} {}: {}",
request.piece_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})?,
None => operator_reader.into_bytes_stream(..).await.map_err(|err| {
error!(
"get request failed {} {}: {}",
request.piece_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})?,
};
Ok(crate::GetResponse {
success: true,
http_header: None,
http_status_code: Some(reqwest::StatusCode::OK),
reader: Box::new(StreamReader::new(stream)),
error_message: None,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn should_get_operator() {
let url: Url = Url::parse("hdfs://127.0.0.1:9870/file").unwrap();
let operator = Hdfs::new().operator(url, None, Duration::from_secs(10));
assert!(
operator.is_ok(),
"can not get hdfs operator, due to: {}",
operator.unwrap_err()
);
}
#[test]
fn should_return_error_when_url_not_valid() {
let url: Url = Url::parse("hdfs:/127.0.0.1:9870/file").unwrap();
let result = Hdfs::new().operator(url, None, Duration::from_secs(10));
assert!(result.is_err());
assert!(matches!(result.unwrap_err(), ClientError::InvalidURI(..)));
}
}

View File

@ -17,126 +17,59 @@
use dragonfly_client_core::{Error, Result};
use dragonfly_client_util::tls::NoVerifier;
use futures::TryStreamExt;
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
use reqwest_tracing::TracingMiddleware;
use rustls_pki_types::CertificateDer;
use std::io::{Error as IOError, ErrorKind};
use tokio_util::io::StreamReader;
use tracing::{debug, error, instrument};
/// HTTP_SCHEME is the HTTP scheme.
pub const HTTP_SCHEME: &str = "http";
/// HTTPS_SCHEME is the HTTPS scheme.
pub const HTTPS_SCHEME: &str = "https";
use tracing::{error, info, instrument};
/// HTTP is the HTTP backend.
pub struct HTTP {
/// scheme is the scheme of the HTTP backend.
scheme: String,
/// client is the reqwest client.
client: ClientWithMiddleware,
}
/// HTTP implements the http interface.
impl HTTP {
/// new returns a new HTTP.
pub fn new(scheme: &str) -> Result<HTTP> {
// Default TLS client config with no validation.
let client_config_builder = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(NoVerifier::new())
.with_no_client_auth();
// Disable automatic compression to prevent double-decompression issues.
//
// Problem scenario:
// 1. Origin server supports gzip and returns "content-encoding: gzip" header.
// 2. Backend decompresses the response and stores uncompressed content to disk.
// 3. When user's client downloads via dfdaemon proxy, the original "content-encoding: gzip".
// header is forwarded to it.
// 4. User's client attempts to decompress the already-decompressed content, causing errors.
//
// Solution: Disable all compression formats (gzip, brotli, zstd, deflate) to ensure
// we receive and store uncompressed content, eliminating the double-decompression issue.
let client = reqwest::Client::builder()
.no_gzip()
.no_brotli()
.no_zstd()
.no_deflate()
.use_preconfigured_tls(client_config_builder)
.pool_max_idle_per_host(super::POOL_MAX_IDLE_PER_HOST)
.tcp_keepalive(super::KEEP_ALIVE_INTERVAL)
.build()?;
let retry_policy =
ExponentialBackoff::builder().build_with_max_retries(super::MAX_RETRY_TIMES);
let client = ClientBuilder::new(client)
.with(TracingMiddleware::default())
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
.build();
Ok(Self {
#[instrument(skip_all)]
pub fn new(scheme: &str) -> HTTP {
Self {
scheme: scheme.to_string(),
client,
})
}
}
/// client returns a new reqwest client.
fn client(
&self,
client_cert: Option<Vec<CertificateDer<'static>>>,
) -> Result<ClientWithMiddleware> {
match client_cert.as_ref() {
#[instrument(skip_all)]
fn client(&self, client_cert: Option<Vec<CertificateDer<'static>>>) -> Result<reqwest::Client> {
let client_config_builder = match client_cert.as_ref() {
Some(client_cert) => {
let mut root_cert_store = rustls::RootCertStore::empty();
root_cert_store.add_parsable_certificates(client_cert.to_owned());
// TLS client config using the custom CA store for lookups.
let client_config_builder = rustls::ClientConfig::builder()
rustls::ClientConfig::builder()
.with_root_certificates(root_cert_store)
.with_no_client_auth();
.with_no_client_auth()
}
// Default TLS client config with native roots.
None => rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(NoVerifier::new())
.with_no_client_auth(),
};
// Disable automatic compression to prevent double-decompression issues.
//
// Problem scenario:
// 1. Origin server supports gzip and returns "content-encoding: gzip" header.
// 2. Backend decompresses the response and stores uncompressed content to disk.
// 3. When user's client downloads via dfdaemon proxy, the original "content-encoding: gzip".
// header is forwarded to it.
// 4. User's client attempts to decompress the already-decompressed content, causing errors.
//
// Solution: Disable all compression formats (gzip, brotli, zstd, deflate) to ensure
// we receive and store uncompressed content, eliminating the double-decompression issue.
let client = reqwest::Client::builder()
.no_gzip()
.no_brotli()
.no_zstd()
.no_deflate()
.use_preconfigured_tls(client_config_builder)
.build()?;
let retry_policy =
ExponentialBackoff::builder().build_with_max_retries(super::MAX_RETRY_TIMES);
let client = ClientBuilder::new(client)
.with(TracingMiddleware::default())
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
.build();
Ok(client)
}
// Default TLS client config with no validation.
None => Ok(self.client.clone()),
}
}
}
/// Backend implements the Backend trait.
#[tonic::async_trait]
impl super::Backend for HTTP {
/// scheme returns the scheme of the HTTP backend.
#[instrument(skip_all)]
fn scheme(&self) -> String {
self.scheme.clone()
}
@ -144,7 +77,7 @@ impl super::Backend for HTTP {
/// head gets the header of the request.
#[instrument(skip_all)]
async fn head(&self, request: super::HeadRequest) -> Result<super::HeadResponse> {
debug!(
info!(
"head request {} {}: {:?}",
request.task_id, request.url, request.http_header
);
@ -160,37 +93,27 @@ impl super::Backend for HTTP {
.client(request.client_cert)?
.get(&request.url)
.headers(header)
// Add Range header to ensure Content-Length is returned in response headers.
// Some servers (especially when using Transfer-Encoding: chunked,
// refer to https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Transfer-Encoding.) may not
// include Content-Length in HEAD requests. Using "bytes=0-" requests the
// entire file starting from byte 0, forcing the server to include file size
// information in the response headers.
.header(reqwest::header::RANGE, "bytes=0-")
.timeout(request.timeout)
.send()
.await
.inspect_err(|err| {
.map_err(|err| {
error!(
"head request failed {} {}: {}",
request.task_id, request.url, err
);
err
})?;
let header = response.headers().clone();
let status_code = response.status();
let content_length = response.content_length();
debug!(
"head response {} {}: {:?} {:?} {:?}",
request.task_id, request.url, status_code, content_length, header
info!(
"head response {} {}: {:?} {:?}",
request.task_id, request.url, status_code, header
);
// Drop the response body to avoid reading it.
drop(response);
Ok(super::HeadResponse {
success: status_code.is_success(),
content_length,
content_length: response.content_length(),
http_header: Some(header),
http_status_code: Some(status_code),
error_message: Some(status_code.to_string()),
@ -201,7 +124,7 @@ impl super::Backend for HTTP {
/// get gets the content of the request.
#[instrument(skip_all)]
async fn get(&self, request: super::GetRequest) -> Result<super::GetResponse<super::Body>> {
debug!(
info!(
"get request {} {} {}: {:?}",
request.task_id, request.piece_id, request.url, request.http_header
);
@ -215,11 +138,12 @@ impl super::Backend for HTTP {
.timeout(request.timeout)
.send()
.await
.inspect_err(|err| {
.map_err(|err| {
error!(
"get request failed {} {} {}: {}",
request.task_id, request.piece_id, request.url, err
);
err
})?;
let header = response.headers().clone();
@ -229,8 +153,7 @@ impl super::Backend for HTTP {
.bytes_stream()
.map_err(|err| IOError::new(ErrorKind::Other, err)),
));
debug!(
info!(
"get response {} {}: {:?} {:?}",
request.task_id, request.piece_id, status_code, header
);
@ -245,12 +168,17 @@ impl super::Backend for HTTP {
}
}
/// Default implements the Default trait.
impl Default for HTTP {
/// default returns a new default HTTP.
fn default() -> Self {
Self::new("http")
}
}
#[cfg(test)]
mod tests {
use crate::{
http::{HTTP, HTTPS_SCHEME, HTTP_SCHEME},
Backend, GetRequest, HeadRequest,
};
use crate::{http::HTTP, Backend, GetRequest, HeadRequest};
use dragonfly_client_util::tls::{load_certs_from_pem, load_key_from_pem};
use hyper_util::rt::{TokioExecutor, TokioIo};
use reqwest::{header::HeaderMap, StatusCode};
@ -419,8 +347,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
.mount(&server)
.await;
let resp = HTTP::new(HTTP_SCHEME)
.unwrap()
let resp = HTTP::new("http")
.head(HeadRequest {
task_id: "test".to_string(),
url: format!("{}/head", server.uri()),
@ -428,7 +355,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: std::time::Duration::from_secs(5),
client_cert: None,
object_storage: None,
hdfs: None,
})
.await
.unwrap();
@ -448,8 +374,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
.mount(&server)
.await;
let resp = HTTP::new(HTTP_SCHEME)
.unwrap()
let resp = HTTP::new("http")
.head(HeadRequest {
task_id: "test".to_string(),
url: format!("{}/head", server.uri()),
@ -457,7 +382,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: std::time::Duration::from_secs(5),
client_cert: None,
object_storage: None,
hdfs: None,
})
.await;
@ -477,8 +401,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
.mount(&server)
.await;
let mut resp = HTTP::new(HTTP_SCHEME)
.unwrap()
let mut resp = HTTP::new("http")
.get(GetRequest {
task_id: "test".to_string(),
piece_id: "test".to_string(),
@ -488,7 +411,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: std::time::Duration::from_secs(5),
client_cert: None,
object_storage: None,
hdfs: None,
})
.await
.unwrap();
@ -500,8 +422,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
#[tokio::test]
async fn should_get_head_response_with_self_signed_cert() {
let server_addr = start_https_server(SERVER_CERT, SERVER_KEY).await;
let resp = HTTP::new(HTTPS_SCHEME)
.unwrap()
let resp = HTTP::new("https")
.head(HeadRequest {
task_id: "test".to_string(),
url: server_addr,
@ -509,7 +430,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: Duration::from_secs(5),
client_cert: Some(load_certs_from_pem(CA_CERT).unwrap()),
object_storage: None,
hdfs: None,
})
.await
.unwrap();
@ -520,8 +440,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
#[tokio::test]
async fn should_return_error_response_when_head_with_wrong_cert() {
let server_addr = start_https_server(SERVER_CERT, SERVER_KEY).await;
let resp = HTTP::new(HTTPS_SCHEME)
.unwrap()
let resp = HTTP::new("https")
.head(HeadRequest {
task_id: "test".to_string(),
url: server_addr,
@ -529,7 +448,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: Duration::from_secs(5),
client_cert: Some(load_certs_from_pem(WRONG_CA_CERT).unwrap()),
object_storage: None,
hdfs: None,
})
.await;
@ -539,8 +457,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
#[tokio::test]
async fn should_get_response_with_self_signed_cert() {
let server_addr = start_https_server(SERVER_CERT, SERVER_KEY).await;
let mut resp = HTTP::new(HTTPS_SCHEME)
.unwrap()
let mut resp = HTTP::new("https")
.get(GetRequest {
task_id: "test".to_string(),
piece_id: "test".to_string(),
@ -550,7 +467,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: std::time::Duration::from_secs(5),
client_cert: Some(load_certs_from_pem(CA_CERT).unwrap()),
object_storage: None,
hdfs: None,
})
.await
.unwrap();
@ -562,8 +478,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
#[tokio::test]
async fn should_return_error_response_when_get_with_wrong_cert() {
let server_addr = start_https_server(SERVER_CERT, SERVER_KEY).await;
let resp = HTTP::new(HTTPS_SCHEME)
.unwrap()
let resp = HTTP::new("https")
.get(GetRequest {
task_id: "test".to_string(),
piece_id: "test".to_string(),
@ -573,7 +488,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: std::time::Duration::from_secs(5),
client_cert: Some(load_certs_from_pem(WRONG_CA_CERT).unwrap()),
object_storage: None,
hdfs: None,
})
.await;
@ -583,8 +497,7 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
#[tokio::test]
async fn should_get_head_response_with_no_verifier() {
let server_addr = start_https_server(SERVER_CERT, SERVER_KEY).await;
let resp = HTTP::new(HTTPS_SCHEME)
.unwrap()
let resp = HTTP::new("https")
.head(HeadRequest {
task_id: "test".to_string(),
url: server_addr,
@ -592,7 +505,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: Duration::from_secs(5),
client_cert: None,
object_storage: None,
hdfs: None,
})
.await
.unwrap();
@ -603,9 +515,8 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
#[tokio::test]
async fn should_get_response_with_no_verifier() {
let server_addr = start_https_server(SERVER_CERT, SERVER_KEY).await;
let http_backend = HTTP::new(HTTPS_SCHEME);
let http_backend = HTTP::new("https");
let mut resp = http_backend
.unwrap()
.get(GetRequest {
task_id: "test".to_string(),
piece_id: "test".to_string(),
@ -615,7 +526,6 @@ TrIVG3cErZoBC6zqBs/Ibe9q3gdHGqS3QLAKy/k=
timeout: std::time::Duration::from_secs(5),
client_cert: None,
object_storage: None,
hdfs: None,
})
.await
.unwrap();

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
use dragonfly_api::common::v2::{Hdfs, ObjectStorage, Range};
use dragonfly_api::common::v2::{ObjectStorage, Range};
use dragonfly_client_core::{
error::{ErrorType, OrErr},
Error, Result,
@ -26,28 +26,12 @@ use std::path::Path;
use std::{collections::HashMap, pin::Pin, time::Duration};
use std::{fmt::Debug, fs};
use tokio::io::{AsyncRead, AsyncReadExt};
use tracing::{error, info, warn};
use tracing::{error, info, instrument, warn};
use url::Url;
pub mod hdfs;
pub mod http;
pub mod object_storage;
/// POOL_MAX_IDLE_PER_HOST is the max idle connections per host.
const POOL_MAX_IDLE_PER_HOST: usize = 1024;
/// KEEP_ALIVE_INTERVAL is the keep alive interval for TCP connection.
const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(60);
/// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive.
const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300);
/// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive.
const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20);
/// MAX_RETRY_TIMES is the max retry times for the request.
const MAX_RETRY_TIMES: u32 = 1;
/// NAME is the name of the package.
pub const NAME: &str = "backend";
@ -73,9 +57,6 @@ pub struct HeadRequest {
/// object_storage is the object storage related information.
pub object_storage: Option<ObjectStorage>,
/// hdfs is the hdfs related information.
pub hdfs: Option<Hdfs>,
}
/// HeadResponse is the head response for backend.
@ -125,9 +106,6 @@ pub struct GetRequest {
/// the object storage related information.
pub object_storage: Option<ObjectStorage>,
/// hdfs is the hdfs related information.
pub hdfs: Option<Hdfs>,
}
/// GetResponse is the get response for backend.
@ -166,7 +144,7 @@ where
}
/// The File Entry of a directory, including some relevant file metadata.
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
#[derive(Debug, PartialEq, Eq)]
pub struct DirEntry {
/// url is the url of the entry.
pub url: String,
@ -197,7 +175,7 @@ pub struct BackendFactory {
/// backends is the backends of the factory, including the plugin backends and
/// the builtin backends.
backends: HashMap<String, Box<dyn Backend + Send + Sync>>,
/// libraries are used to store the plugin's dynamic library, because when not saving the `Library`,
/// libraries is used to store the plugin's dynamic library, because when not saving the `Library`,
/// it will drop when out of scope, resulting in the null pointer error.
libraries: Vec<Library>,
}
@ -205,7 +183,7 @@ pub struct BackendFactory {
/// BackendFactory implements the factory of the backend. It supports loading builtin
/// backends and plugin backends.
///
/// The builtin backends are http, https, etc., which are implemented
/// The builtin backends are http, https, etc, which are implemented
/// by the HTTP struct.
///
/// The plugin backends are shared libraries, which are loaded
@ -226,26 +204,24 @@ pub struct BackendFactory {
/// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/.
impl BackendFactory {
/// new returns a new BackendFactory.
#[instrument(skip_all)]
pub fn new(plugin_dir: Option<&Path>) -> Result<Self> {
let mut backend_factory = Self::default();
backend_factory.load_builtin_backends()?;
backend_factory.load_builtin_backends();
if let Some(plugin_dir) = plugin_dir {
backend_factory
.load_plugin_backends(plugin_dir)
.inspect_err(|err| {
.map_err(|err| {
error!("failed to load plugin backends: {}", err);
err
})?;
}
Ok(backend_factory)
}
/// unsupported_download_directory returns whether the scheme does not support directory download.
pub fn unsupported_download_directory(scheme: &str) -> bool {
scheme == http::HTTP_SCHEME || scheme == http::HTTPS_SCHEME
}
/// build returns the backend by the scheme of the url.
#[instrument(skip_all)]
pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> {
let url = Url::parse(url).or_err(ErrorType::ParseError)?;
let scheme = url.scheme();
@ -256,24 +232,21 @@ impl BackendFactory {
}
/// load_builtin_backends loads the builtin backends.
fn load_builtin_backends(&mut self) -> Result<()> {
self.backends.insert(
"http".to_string(),
Box::new(http::HTTP::new(http::HTTP_SCHEME)?),
);
#[instrument(skip_all)]
fn load_builtin_backends(&mut self) {
self.backends
.insert("http".to_string(), Box::new(http::HTTP::new("http")));
info!("load [http] builtin backend");
self.backends.insert(
"https".to_string(),
Box::new(http::HTTP::new(http::HTTPS_SCHEME)?),
);
info!("load [https] builtin backend");
self.backends
.insert("https".to_string(), Box::new(http::HTTP::new("https")));
info!("load [https] builtin backend ");
self.backends.insert(
"s3".to_string(),
Box::new(object_storage::ObjectStorage::new(
object_storage::Scheme::S3,
)?),
)),
);
info!("load [s3] builtin backend");
@ -281,7 +254,7 @@ impl BackendFactory {
"gs".to_string(),
Box::new(object_storage::ObjectStorage::new(
object_storage::Scheme::GCS,
)?),
)),
);
info!("load [gcs] builtin backend");
@ -289,7 +262,7 @@ impl BackendFactory {
"abs".to_string(),
Box::new(object_storage::ObjectStorage::new(
object_storage::Scheme::ABS,
)?),
)),
);
info!("load [abs] builtin backend");
@ -297,15 +270,15 @@ impl BackendFactory {
"oss".to_string(),
Box::new(object_storage::ObjectStorage::new(
object_storage::Scheme::OSS,
)?),
)),
);
info!("load [oss] builtin backend");
info!("load [oss] builtin backend ");
self.backends.insert(
"obs".to_string(),
Box::new(object_storage::ObjectStorage::new(
object_storage::Scheme::OBS,
)?),
)),
);
info!("load [obs] builtin backend");
@ -313,24 +286,19 @@ impl BackendFactory {
"cos".to_string(),
Box::new(object_storage::ObjectStorage::new(
object_storage::Scheme::COS,
)?),
)),
);
info!("load [cos] builtin backend");
self.backends
.insert("hdfs".to_string(), Box::new(hdfs::Hdfs::new()));
info!("load [hdfs] builtin backend");
Ok(())
}
/// load_plugin_backends loads the plugin backends.
#[instrument(skip_all)]
fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> {
let backend_plugin_dir = plugin_dir.join(NAME);
if !backend_plugin_dir.exists() {
warn!(
"skip loading plugin backends, because the plugin directory {} does not exist",
backend_plugin_dir.display()
plugin_dir.display()
);
return Ok(());
}
@ -379,9 +347,7 @@ mod tests {
#[test]
fn should_load_builtin_backends() {
let factory = BackendFactory::new(None).unwrap();
let expected_backends = vec![
"http", "https", "s3", "gs", "abs", "oss", "obs", "cos", "hdfs",
];
let expected_backends = vec!["http", "https", "s3", "gs", "abs", "oss", "obs", "cos"];
for backend in expected_backends {
assert!(factory.backends.contains_key(backend));
}
@ -412,7 +378,7 @@ mod tests {
let plugin_dir = dir.path().join("non_existent_plugin_dir");
let factory = BackendFactory::new(Some(&plugin_dir)).unwrap();
assert_eq!(factory.backends.len(), 9);
assert_eq!(factory.backends.len(), 8);
}
#[test]
@ -430,15 +396,9 @@ mod tests {
let result = BackendFactory::new(Some(&plugin_dir));
assert!(result.is_err());
let err_msg = format!("{}", result.err().unwrap());
assert!(
err_msg.starts_with("PluginError cause:"),
"error message should start with 'PluginError cause:'"
);
assert!(
err_msg.contains(&lib_path.display().to_string()),
"error message should contain library path"
assert_eq!(
format!("{}", result.err().unwrap()),
format!("PluginError cause: {}: file too short", lib_path.display()),
);
}

View File

@ -17,14 +17,14 @@
use dragonfly_api::common;
use dragonfly_client_core::error::BackendError;
use dragonfly_client_core::{Error as ClientError, Result as ClientResult};
use opendal::{layers::TimeoutLayer, raw::HttpClient, Metakey, Operator};
use opendal::{raw::HttpClient, Metakey, Operator};
use percent_encoding::percent_decode_str;
use std::fmt;
use std::result::Result;
use std::str::FromStr;
use std::time::Duration;
use tokio_util::io::StreamReader;
use tracing::{debug, error, instrument};
use tracing::{error, info, instrument};
use url::Url;
/// Scheme is the scheme of the object storage.
@ -68,7 +68,7 @@ impl fmt::Display for Scheme {
impl FromStr for Scheme {
type Err = String;
/// from_str parses a scheme string.
/// from_str parses an scheme string.
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"s3" => Ok(Scheme::S3),
@ -169,32 +169,18 @@ macro_rules! make_need_fields_message {
pub struct ObjectStorage {
/// scheme is the scheme of the object storage.
scheme: Scheme,
/// client is the reqwest client.
client: reqwest::Client,
}
/// ObjectStorage implements the ObjectStorage trait.
impl ObjectStorage {
/// Returns ObjectStorage that implements the Backend trait.
pub fn new(scheme: Scheme) -> ClientResult<ObjectStorage> {
// Initialize the reqwest client.
let client = reqwest::Client::builder()
.gzip(true)
.brotli(true)
.zstd(true)
.deflate(true)
.pool_max_idle_per_host(super::POOL_MAX_IDLE_PER_HOST)
.tcp_keepalive(super::KEEP_ALIVE_INTERVAL)
.http2_keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.http2_keep_alive_interval(super::HTTP2_KEEP_ALIVE_INTERVAL)
.http2_keep_alive_while_idle(true)
.build()?;
Ok(Self { scheme, client })
#[instrument(skip_all)]
pub fn new(scheme: Scheme) -> ObjectStorage {
Self { scheme }
}
/// operator initializes the operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
@ -203,11 +189,11 @@ impl ObjectStorage {
) -> ClientResult<Operator> {
// If download backend is object storage, object_storage parameter is required.
let Some(object_storage) = object_storage else {
return Err(ClientError::BackendError(Box::new(BackendError {
return Err(ClientError::BackendError(BackendError {
message: format!("{} need object_storage parameter", self.scheme),
status_code: None,
header: None,
})));
}));
};
match self.scheme {
@ -221,19 +207,23 @@ impl ObjectStorage {
}
/// s3_operator initializes the S3 operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn s3_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
object_storage: common::v2::ObjectStorage,
timeout: Duration,
) -> ClientResult<Operator> {
// Create a reqwest http client.
let client = reqwest::Client::builder().timeout(timeout).build()?;
// S3 requires the access key id and the secret access key.
let (Some(access_key_id), Some(access_key_secret), Some(region)) = (
&object_storage.access_key_id,
&object_storage.access_key_secret,
&object_storage.region,
) else {
return Err(ClientError::BackendError(Box::new(BackendError {
return Err(ClientError::BackendError(BackendError {
message: format!(
"{} {}",
self.scheme,
@ -245,7 +235,7 @@ impl ObjectStorage {
),
status_code: None,
header: None,
})));
}));
};
// Initialize the S3 operator with the object storage.
@ -253,7 +243,7 @@ impl ObjectStorage {
builder = builder
.access_key_id(access_key_id)
.secret_access_key(access_key_secret)
.http_client(HttpClient::with(self.client.clone()))
.http_client(HttpClient::with(client))
.bucket(&parsed_url.bucket)
.region(region);
@ -267,25 +257,27 @@ impl ObjectStorage {
builder = builder.session_token(session_token);
}
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
Ok(Operator::new(builder)?.finish())
}
/// gcs_operator initializes the GCS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn gcs_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
object_storage: common::v2::ObjectStorage,
timeout: Duration,
) -> ClientResult<Operator> {
// Create a reqwest http client.
let client = reqwest::Client::builder().timeout(timeout).build()?;
// Initialize the GCS operator with the object storage.
let mut builder = opendal::services::Gcs::default();
builder = builder
.http_client(HttpClient::with(self.client.clone()))
.http_client(HttpClient::with(client))
.bucket(&parsed_url.bucket);
// Configure the credentials using the local path to the credential file if provided.
// Configure the credentials using the local path to the crendential file if provided.
// Otherwise, configure using the Application Default Credentials (ADC).
if let Some(credential_path) = object_storage.credential_path.as_deref() {
builder = builder.credential_path(credential_path);
@ -301,25 +293,27 @@ impl ObjectStorage {
builder = builder.predefined_acl(predefined_acl);
}
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
Ok(Operator::new(builder)?.finish())
}
/// abs_operator initializes the ABS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn abs_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
object_storage: common::v2::ObjectStorage,
timeout: Duration,
) -> ClientResult<Operator> {
// Create a reqwest http client.
let client = reqwest::Client::builder().timeout(timeout).build()?;
// ABS requires the account name and the account key.
let (Some(access_key_id), Some(access_key_secret), Some(endpoint)) = (
&object_storage.access_key_id,
&object_storage.access_key_secret,
&object_storage.endpoint,
) else {
return Err(ClientError::BackendError(Box::new(BackendError {
return Err(ClientError::BackendError(BackendError {
message: format!(
"{} {}",
self.scheme,
@ -331,7 +325,7 @@ impl ObjectStorage {
),
status_code: None,
header: None,
})));
}));
};
// Initialize the ABS operator with the object storage.
@ -339,29 +333,31 @@ impl ObjectStorage {
builder = builder
.account_name(access_key_id)
.account_key(access_key_secret)
.http_client(HttpClient::with(self.client.clone()))
.http_client(HttpClient::with(client))
.container(&parsed_url.bucket)
.endpoint(endpoint);
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
Ok(Operator::new(builder)?.finish())
}
/// oss_operator initializes the OSS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn oss_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
object_storage: common::v2::ObjectStorage,
timeout: Duration,
) -> ClientResult<Operator> {
// Create a reqwest http client.
let client = reqwest::Client::builder().timeout(timeout).build()?;
// OSS requires the access key id, access key secret, and endpoint.
let (Some(access_key_id), Some(access_key_secret), Some(endpoint)) = (
&object_storage.access_key_id,
&object_storage.access_key_secret,
&object_storage.endpoint,
) else {
return Err(ClientError::BackendError(Box::new(BackendError {
return Err(ClientError::BackendError(BackendError {
message: format!(
"{} {}",
self.scheme,
@ -373,7 +369,7 @@ impl ObjectStorage {
),
status_code: None,
header: None,
})));
}));
};
// Initialize the OSS operator with the object storage.
@ -382,29 +378,31 @@ impl ObjectStorage {
.access_key_id(access_key_id)
.access_key_secret(access_key_secret)
.endpoint(endpoint)
.http_client(HttpClient::with(self.client.clone()))
.http_client(HttpClient::with(client))
.root("/")
.bucket(&parsed_url.bucket);
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
Ok(Operator::new(builder)?.finish())
}
/// obs_operator initializes the OBS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn obs_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
object_storage: common::v2::ObjectStorage,
timeout: Duration,
) -> ClientResult<Operator> {
// Create a reqwest http client.
let client = reqwest::Client::builder().timeout(timeout).build()?;
// OBS requires the endpoint, access key id, and access key secret.
let (Some(access_key_id), Some(access_key_secret), Some(endpoint)) = (
&object_storage.access_key_id,
&object_storage.access_key_secret,
&object_storage.endpoint,
) else {
return Err(ClientError::BackendError(Box::new(BackendError {
return Err(ClientError::BackendError(BackendError {
message: format!(
"{} {}",
self.scheme,
@ -416,7 +414,7 @@ impl ObjectStorage {
),
status_code: None,
header: None,
})));
}));
};
// Initialize the OBS operator with the object storage.
@ -425,12 +423,10 @@ impl ObjectStorage {
.access_key_id(access_key_id)
.secret_access_key(access_key_secret)
.endpoint(endpoint)
.http_client(HttpClient::with(self.client.clone()))
.http_client(HttpClient::with(client))
.bucket(&parsed_url.bucket);
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
Ok(Operator::new(builder)?.finish())
}
/// cos_operator initializes the COS operator with the parsed URL and object storage.
@ -440,13 +436,16 @@ impl ObjectStorage {
object_storage: common::v2::ObjectStorage,
timeout: Duration,
) -> ClientResult<Operator> {
// Create a reqwest http client.
let client = reqwest::Client::builder().timeout(timeout).build()?;
// COS requires the access key id, the access key secret, and the endpoint.
let (Some(access_key_id), Some(access_key_secret), Some(endpoint)) = (
&object_storage.access_key_id,
&object_storage.access_key_secret,
&object_storage.endpoint,
) else {
return Err(ClientError::BackendError(Box::new(BackendError {
return Err(ClientError::BackendError(BackendError {
message: format!(
"{} {}",
self.scheme,
@ -458,7 +457,7 @@ impl ObjectStorage {
),
status_code: None,
header: None,
})));
}));
};
// Initialize the COS operator with the object storage.
@ -467,12 +466,10 @@ impl ObjectStorage {
.secret_id(access_key_id)
.secret_key(access_key_secret)
.endpoint(endpoint)
.http_client(HttpClient::with(self.client.clone()))
.http_client(HttpClient::with(client))
.bucket(&parsed_url.bucket);
Ok(Operator::new(builder)?
.finish()
.layer(TimeoutLayer::new().with_timeout(timeout)))
Ok(Operator::new(builder)?.finish())
}
}
@ -480,6 +477,7 @@ impl ObjectStorage {
#[tonic::async_trait]
impl crate::Backend for ObjectStorage {
/// scheme returns the scheme of the object storage.
#[instrument(skip_all)]
fn scheme(&self) -> String {
self.scheme.to_string()
}
@ -487,7 +485,7 @@ impl crate::Backend for ObjectStorage {
/// head gets the header of the request.
#[instrument(skip_all)]
async fn head(&self, request: super::HeadRequest) -> ClientResult<super::HeadResponse> {
debug!(
info!(
"head request {} {}: {:?}",
request.task_id, request.url, request.http_header
);
@ -497,11 +495,12 @@ impl crate::Backend for ObjectStorage {
.url
.parse()
.map_err(|_| ClientError::InvalidURI(request.url.clone()))?;
let parsed_url: super::object_storage::ParsedURL = url.try_into().inspect_err(|err| {
let parsed_url: super::object_storage::ParsedURL = url.try_into().map_err(|err| {
error!(
"parse head request url failed {} {}: {}",
request.task_id, request.url, err
);
err
})?;
// Initialize the operator with the parsed URL, object storage, and timeout.
@ -519,11 +518,11 @@ impl crate::Backend for ObjectStorage {
"list request failed {} {}: {}",
request.task_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
ClientError::BackendError(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})
})?
.into_iter()
.map(|entry| {
@ -545,14 +544,14 @@ impl crate::Backend for ObjectStorage {
"stat request failed {} {}: {}",
request.task_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
ClientError::BackendError(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})
})?;
debug!(
info!(
"head response {} {}: {}",
request.task_id,
request.url,
@ -569,13 +568,13 @@ impl crate::Backend for ObjectStorage {
})
}
/// get returns content of requested file.
/// Returns content of requested file.
#[instrument(skip_all)]
async fn get(
&self,
request: super::GetRequest,
) -> ClientResult<super::GetResponse<super::Body>> {
debug!(
info!(
"get request {} {}: {:?}",
request.piece_id, request.url, request.http_header
);
@ -585,11 +584,12 @@ impl crate::Backend for ObjectStorage {
.url
.parse()
.map_err(|_| ClientError::InvalidURI(request.url.clone()))?;
let parsed_url: super::object_storage::ParsedURL = url.try_into().inspect_err(|err| {
let parsed_url: super::object_storage::ParsedURL = url.try_into().map_err(|err| {
error!(
"parse get request url failed {} {}: {}",
request.piece_id, request.url, err
);
err
})?;
// Initialize the operator with the parsed URL, object storage, and timeout.
@ -602,11 +602,11 @@ impl crate::Backend for ObjectStorage {
"get request failed {} {}: {}",
request.piece_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
ClientError::BackendError(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})
})?;
let stream = match request.range {
@ -618,22 +618,22 @@ impl crate::Backend for ObjectStorage {
"get request failed {} {}: {}",
request.piece_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
ClientError::BackendError(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})
})?,
None => operator_reader.into_bytes_stream(..).await.map_err(|err| {
error!(
"get request failed {} {}: {}",
request.piece_id, request.url, err
);
ClientError::BackendError(Box::new(BackendError {
ClientError::BackendError(BackendError {
message: err.to_string(),
status_code: None,
header: None,
}))
})
})?,
};
@ -798,7 +798,7 @@ mod tests {
let url: Url = format!("{}://test-bucket/file", scheme).parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(scheme).unwrap().operator(
let result = ObjectStorage::new(scheme).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -847,7 +847,7 @@ mod tests {
let url: Url = "s3://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::S3).unwrap().operator(
let result = ObjectStorage::new(Scheme::S3).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -900,7 +900,7 @@ mod tests {
let url: Url = "gs://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::GCS).unwrap().operator(
let result = ObjectStorage::new(Scheme::GCS).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -916,11 +916,8 @@ mod tests {
let url: Url = "s3://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::S3).unwrap().operator(
&parsed_url,
None,
Duration::from_secs(3),
);
let result =
ObjectStorage::new(Scheme::S3).operator(&parsed_url, None, Duration::from_secs(3));
assert!(result.is_err());
assert_eq!(
@ -987,7 +984,7 @@ mod tests {
let url: Url = "s3://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::S3).unwrap().operator(
let result = ObjectStorage::new(Scheme::S3).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -1056,7 +1053,7 @@ mod tests {
let url: Url = "abs://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::ABS).unwrap().operator(
let result = ObjectStorage::new(Scheme::ABS).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -1125,7 +1122,7 @@ mod tests {
let url: Url = "oss://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::OSS).unwrap().operator(
let result = ObjectStorage::new(Scheme::OSS).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -1194,7 +1191,7 @@ mod tests {
let url: Url = "obs://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::OBS).unwrap().operator(
let result = ObjectStorage::new(Scheme::OBS).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),
@ -1263,7 +1260,7 @@ mod tests {
let url: Url = "cos://test-bucket/file".parse().unwrap();
let parsed_url: ParsedURL = url.try_into().unwrap();
let result = ObjectStorage::new(Scheme::COS).unwrap().operator(
let result = ObjectStorage::new(Scheme::COS).operator(
&parsed_url,
Some(object_storage),
Duration::from_secs(3),

View File

@ -13,7 +13,6 @@ build = "build.rs"
[dependencies]
dragonfly-client-core.workspace = true
dragonfly-client-util.workspace = true
local-ip-address.workspace = true
clap.workspace = true
regex.workspace = true
serde.workspace = true
@ -22,16 +21,13 @@ validator.workspace = true
humantime.workspace = true
serde_yaml.workspace = true
tokio.workspace = true
tempfile.workspace = true
serde_json.workspace = true
bytesize.workspace = true
bytesize-serde.workspace = true
tonic.workspace = true
rustls-pki-types.workspace = true
rcgen.workspace = true
reqwest.workspace = true
home = "0.5.11"
home = "0.5.4"
local-ip-address = "0.6.3"
hostname = "^0.4"
humantime-serde = "1.1.1"
serde_regex = "1.1.0"
http-serde = "2.1.1"

View File

@ -61,8 +61,9 @@ fn get_commit_from_git() -> Option<Commit> {
fn main() {
// Set the environment variables for the build platform.
let target = env::var("TARGET").unwrap_or_default();
if let Ok(target) = env::var("TARGET") {
println!("cargo:rustc-env=BUILD_PLATFORM={}", target);
}
// Set the environment variables for the build time.
if let Ok(build_time) = SystemTime::now().duration_since(UNIX_EPOCH) {

View File

@ -19,10 +19,6 @@ use std::path::PathBuf;
/// NAME is the name of dfcache.
pub const NAME: &str = "dfcache";
// DEFAULT_OUTPUT_FILE_MODE defines the default file mode for output files when downloading with dfcache
// using the `--transfer-from-dfdaemon=true` option.
pub const DEFAULT_OUTPUT_FILE_MODE: u32 = 0o644;
/// default_dfcache_log_dir is the default log directory for dfcache.
#[inline]
pub fn default_dfcache_log_dir() -> PathBuf {

File diff suppressed because it is too large Load Diff

View File

@ -19,10 +19,6 @@ use std::path::PathBuf;
/// NAME is the name of dfget.
pub const NAME: &str = "dfget";
// DEFAULT_OUTPUT_FILE_MODE defines the default file mode for output files when downloading with dfget
// using the `--transfer-from-dfdaemon=true` option.
pub const DEFAULT_OUTPUT_FILE_MODE: u32 = 0o644;
/// default_dfget_log_dir is the default log directory for dfget.
pub fn default_dfget_log_dir() -> PathBuf {
crate::default_log_dir().join(NAME)

View File

@ -173,7 +173,7 @@ pub struct CRIO {
pub registries: Vec<CRIORegistry>,
}
/// PodmanRegistry is the registry configuration for podman.
/// CRIORegistry is the registry configuration for cri-o.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize, PartialEq, Eq)]
#[serde(default, rename_all = "camelCase")]
pub struct PodmanRegistry {
@ -352,62 +352,6 @@ impl Config {
#[cfg(test)]
mod tests {
use super::*;
use std::path::Path;
#[test]
fn test_default_dfinit_config_path() {
let expected = crate::default_config_dir().join("dfinit.yaml");
assert_eq!(default_dfinit_config_path(), expected);
}
#[test]
fn test_default_dfinit_log_dir() {
let expected = crate::default_log_dir().join(NAME);
assert_eq!(default_dfinit_log_dir(), expected);
}
#[test]
fn test_container_runtime_default_paths() {
assert_eq!(
default_container_runtime_containerd_config_path(),
Path::new("/etc/containerd/config.toml")
);
assert_eq!(
default_container_runtime_docker_config_path(),
Path::new("/etc/docker/daemon.json")
);
assert_eq!(
default_container_runtime_crio_config_path(),
Path::new("/etc/containers/registries.conf")
);
assert_eq!(
default_container_runtime_podman_config_path(),
Path::new("/etc/containers/registries.conf")
);
}
#[test]
fn test_default_unqualified_search_registries() {
let crio_registries = default_container_runtime_crio_unqualified_search_registries();
assert_eq!(
crio_registries,
vec![
"registry.fedoraproject.org",
"registry.access.redhat.com",
"docker.io"
]
);
let podman_registries = default_container_runtime_podman_unqualified_search_registries();
assert_eq!(
podman_registries,
vec![
"registry.fedoraproject.org",
"registry.access.redhat.com",
"docker.io"
]
);
}
#[test]
fn serialize_container_runtime() {
@ -529,43 +473,4 @@ containerRuntime:
panic!("failed to deserialize");
}
}
#[test]
fn deserialize_container_runtime_podman_correctly() {
let raw_data = r#"
proxy:
addr: "hello"
containerRuntime:
podman:
configPath: "test_path"
unqualifiedSearchRegistries:
- "reg1"
- "reg2"
registries:
- prefix: "prefix1"
location: "location1"
- prefix: "prefix2"
location: "location2"
"#;
let cfg: Config = serde_yaml::from_str(raw_data).expect("failed to deserialize");
if let Some(ContainerRuntimeConfig::Podman(c)) = cfg.container_runtime.config {
assert_eq!(PathBuf::from("test_path"), c.config_path);
assert_eq!(vec!["reg1", "reg2"], c.unqualified_search_registries);
assert_eq!(
vec![
PodmanRegistry {
location: "location1".to_string(),
prefix: "prefix1".to_string()
},
PodmanRegistry {
location: "location2".to_string(),
prefix: "prefix2".to_string()
},
],
c.registries
);
} else {
panic!("failed to deserialize");
}
}
}

View File

@ -0,0 +1,25 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::path::PathBuf;
/// NAME is the name of dfstore.
pub const NAME: &str = "dfstore";
/// default_dfstore_log_dir is the default log directory for dfstore.
pub fn default_dfstore_log_dir() -> PathBuf {
crate::default_log_dir().join(NAME)
}

View File

@ -21,6 +21,7 @@ pub mod dfcache;
pub mod dfdaemon;
pub mod dfget;
pub mod dfinit;
pub mod dfstore;
/// SERVICE_NAME is the name of the service.
pub const SERVICE_NAME: &str = "dragonfly";
@ -104,7 +105,7 @@ pub fn default_lock_dir() -> PathBuf {
/// default_plugin_dir is the default plugin directory for client.
pub fn default_plugin_dir() -> PathBuf {
#[cfg(target_os = "linux")]
return PathBuf::from("/usr/local/lib/dragonfly/plugins/");
return PathBuf::from("/var/lib/dragonfly/plugins/");
#[cfg(target_os = "macos")]
return home::home_dir().unwrap().join(".dragonfly").join("plugins");
@ -148,20 +149,3 @@ impl clap::builder::TypedValueParser for VersionValueParser {
Ok(false)
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::{builder::TypedValueParser, Command};
use std::ffi::OsStr;
#[test]
fn version_value_parser_references_non_real_values() {
let parser = VersionValueParser;
let cmd = Command::new("test_app");
let value = OsStr::new("false");
let result = parser.parse_ref(&cmd, None, value);
assert!(result.is_ok());
assert!(!result.unwrap());
}
}

View File

@ -11,7 +11,6 @@ edition.workspace = true
[dependencies]
reqwest.workspace = true
reqwest-middleware.workspace = true
thiserror.workspace = true
tonic.workspace = true
tonic-reflection.workspace = true
@ -22,3 +21,4 @@ hyper-util.workspace = true
opendal.workspace = true
url.workspace = true
headers.workspace = true
libloading = "0.8.5"

View File

@ -171,10 +171,10 @@ pub struct BackendError {
pub header: Option<reqwest::header::HeaderMap>,
}
/// DownloadFromParentFailed is the error when the download from parent is failed.
/// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
#[derive(Debug, thiserror::Error)]
#[error("download piece {piece_number} from parent {parent_id} failed")]
pub struct DownloadFromParentFailed {
#[error("download piece {piece_number} from remote peer {parent_id} failed")]
pub struct DownloadFromRemotePeerFailed {
/// piece_number is the number of the piece.
pub piece_number: u32,

View File

@ -21,7 +21,7 @@ pub use errors::ErrorType;
pub use errors::ExternalError;
pub use errors::OrErr;
pub use errors::{BackendError, DownloadFromParentFailed};
pub use errors::{BackendError, DownloadFromRemotePeerFailed};
/// DFError is the error for dragonfly.
#[derive(thiserror::Error, Debug)]
@ -42,10 +42,6 @@ pub enum DFError {
#[error{"hashring {0} is failed"}]
HashRing(String),
/// NoSpace is the error when there is no space left on device.
#[error("no space left on device: {0}")]
NoSpace(String),
/// HostNotFound is the error when the host is not found.
#[error{"host {0} not found"}]
HostNotFound(String),
@ -62,10 +58,6 @@ pub enum DFError {
#[error{"piece {0} state is failed"}]
PieceStateIsFailed(String),
/// DownloadPieceFinished is the error when the download piece finished timeout.
#[error{"download piece {0} finished timeout"}]
DownloadPieceFinished(String),
/// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout.
#[error{"wait for piece {0} finished timeout"}]
WaitForPieceFinishedTimeout(String),
@ -78,9 +70,9 @@ pub enum DFError {
#[error{"available schedulers not found"}]
AvailableSchedulersNotFound,
/// DownloadFromParentFailed is the error when the download from parent is failed.
/// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
#[error(transparent)]
DownloadFromParentFailed(DownloadFromParentFailed),
DownloadFromRemotePeerFailed(DownloadFromRemotePeerFailed),
/// ColumnFamilyNotFound is the error when the column family is not found.
#[error{"column family {0} not found"}]
@ -134,11 +126,6 @@ pub enum DFError {
#[error("invalid parameter")]
InvalidParameter,
/// Infallible is the error for infallible.
#[error(transparent)]
Infallible(#[from] std::convert::Infallible),
/// Utf8 is the error for utf8.
#[error(transparent)]
Utf8(#[from] std::str::Utf8Error),
@ -186,10 +173,6 @@ pub enum DFError {
#[error(transparent)]
ReqwestError(#[from] reqwest::Error),
/// ReqwestMiddlewareError is the error for reqwest middleware.
#[error(transparent)]
ReqwestMiddlewareError(#[from] reqwest_middleware::Error),
/// OpenDALError is the error for opendal.
#[error(transparent)]
OpenDALError(#[from] opendal::Error),
@ -200,7 +183,7 @@ pub enum DFError {
/// BackendError is the error for backend.
#[error(transparent)]
BackendError(Box<BackendError>),
BackendError(BackendError),
/// HyperUtilClientLegacyError is the error for hyper util client legacy.
#[error(transparent)]

View File

@ -23,6 +23,6 @@ tokio.workspace = true
anyhow.workspace = true
tracing.workspace = true
toml_edit.workspace = true
toml.workspace = true
url.workspace = true
tempfile.workspace = true
serde_json.workspace = true

View File

@ -64,8 +64,12 @@ struct Args {
)]
log_max_files: usize,
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
console: bool,
#[arg(
long = "verbose",
default_value_t = false,
help = "Specify whether to print log"
)]
verbose: bool,
#[arg(
short = 'V',
@ -90,23 +94,22 @@ async fn main() -> Result<(), anyhow::Error> {
args.log_level,
args.log_max_files,
None,
None,
None,
None,
None,
false,
args.console,
false,
args.verbose,
);
// Load config.
let config = dfinit::Config::load(&args.config).inspect_err(|err| {
let config = dfinit::Config::load(&args.config).map_err(|err| {
error!("failed to load config: {}", err);
err
})?;
// Handle features of the container runtime.
let container_runtime = container_runtime::ContainerRuntime::new(&config);
container_runtime.run().await.inspect_err(|err| {
container_runtime.run().await.map_err(|err| {
error!("failed to run container runtime: {}", err);
err
})?;
Ok(())

View File

@ -66,9 +66,6 @@ impl Containerd {
.and_then(|config_path| config_path.as_str())
.filter(|config_path| !config_path.is_empty())
{
// Rebind config_path to the first entry if multiple paths are present
let config_path = config_path.split(':').next().unwrap_or(config_path);
info!(
"containerd supports config_path mode, config_path: {}",
config_path.to_string()
@ -179,74 +176,3 @@ impl Containerd {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
use tokio::fs;
#[tokio::test]
async fn test_containerd_config_with_existing_config_path() {
let temp_dir = TempDir::new().unwrap();
let config_path = temp_dir.path().join("config.toml");
let certs_dir = temp_dir.path().join("certs.d");
let certs_dir_str = certs_dir.to_str().unwrap();
// Create initial containerd config with config_path
let initial_config = format!(
r#"
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "{}"
"#,
certs_dir_str
);
fs::write(&config_path, initial_config).await.unwrap();
// Create Containerd instance
let containerd = Containerd::new(
dfinit::Containerd {
config_path: config_path.clone(),
registries: vec![ContainerdRegistry {
host_namespace: "docker.io".into(),
server_addr: "https://registry.example.com".into(),
skip_verify: Some(true),
ca: Some(vec!["test-ca-cert".into()]),
capabilities: vec!["pull".into(), "resolve".into()],
}],
},
dfinit::Proxy {
addr: "http://127.0.0.1:65001".into(),
},
);
// Run containerd configuration
let result = containerd.run().await;
if let Err(e) = &result {
println!("Error: {:?}", e);
if let Ok(contents) = fs::read_to_string(&config_path).await {
println!("Current config file contents:\n{}", contents);
}
}
assert!(result.is_ok());
// Verify the hosts.toml file content
let hosts_file_path = certs_dir.join("docker.io").join("hosts.toml");
let contents = fs::read_to_string(&hosts_file_path).await.unwrap();
let expected_contents = r#"server = "https://registry.example.com"
[host."http://127.0.0.1:65001"]
capabilities = ["pull", "resolve"]
skip_verify = true
ca = ["test-ca-cert"]
[host."http://127.0.0.1:65001".header]
X-Dragonfly-Registry = "https://registry.example.com"
"#;
assert_eq!(contents.trim(), expected_contents.trim());
}
}

View File

@ -111,54 +111,3 @@ impl CRIO {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_crio_config() {
use tempfile::NamedTempFile;
let crio_config_file = NamedTempFile::new().unwrap();
let crio = CRIO::new(
dfinit::CRIO {
config_path: crio_config_file.path().to_path_buf(),
registries: vec![dfinit::CRIORegistry {
prefix: "registry.example.com".into(),
location: "registry.example.com".into(),
}],
unqualified_search_registries: vec!["registry.example.com".into()],
},
dfinit::Proxy {
addr: "http://127.0.0.1:65001".into(),
},
);
let result = crio.run().await;
assert!(result.is_ok());
// get the contents of the file
let contents = fs::read_to_string(crio_config_file.path().to_path_buf())
.await
.unwrap();
let expected_contents = r#"unqualified-search-registries = ["registry.example.com"]
[[registry]]
prefix = "registry.example.com"
location = "registry.example.com"
[[registry.mirror]]
insecure = true
location = "127.0.0.1:65001"
"#;
// assert that the contents of the file are as expected
assert_eq!(contents, expected_contents);
// clean up
fs::remove_file(crio_config_file.path().to_path_buf())
.await
.unwrap();
}
}

View File

@ -15,14 +15,8 @@
*/
use dragonfly_client_config::dfinit;
use dragonfly_client_core::{
error::{ErrorType, OrErr},
Error, Result,
};
use serde_json::{json, Value};
use tokio::{self, fs};
use dragonfly_client_core::{Error, Result};
use tracing::{info, instrument};
use url::Url;
/// Docker represents the docker runtime manager.
#[derive(Debug, Clone)]
@ -46,6 +40,8 @@ impl Docker {
}
}
/// TODO: Implement the run method for Docker.
///
/// run runs the docker runtime to initialize
/// runtime environment for the dfdaemon.
#[instrument(skip_all)]
@ -54,200 +50,6 @@ impl Docker {
"docker feature is enabled, proxy_addr: {}, config_path: {:?}",
self.proxy_config.addr, self.config.config_path,
);
// Parse proxy address to get host and port.
let proxy_url = Url::parse(&self.proxy_config.addr).or_err(ErrorType::ParseError)?;
let proxy_host = proxy_url
.host_str()
.ok_or(Error::Unknown("host not found".to_string()))?;
let proxy_port = proxy_url
.port_or_known_default()
.ok_or(Error::Unknown("port not found".to_string()))?;
let proxy_location = format!("{}:{}", proxy_host, proxy_port);
// Prepare proxies configuration.
let mut proxies_map = serde_json::Map::new();
proxies_map.insert(
"http-proxy".to_string(),
json!(format!("http://{}", proxy_location)),
);
proxies_map.insert(
"https-proxy".to_string(),
json!(format!("http://{}", proxy_location)),
);
let config_path = &self.config.config_path;
let mut docker_config: serde_json::Map<String, Value> = if config_path.exists() {
let contents = fs::read_to_string(config_path).await?;
if contents.trim().is_empty() {
serde_json::Map::new()
} else {
serde_json::from_str(&contents).or_err(ErrorType::ParseError)?
}
} else {
serde_json::Map::new()
};
// Insert or update proxies configuration.
docker_config.insert("proxies".to_string(), Value::Object(proxies_map));
// Create config directory if it doesn't exist.
let config_dir = config_path
.parent()
.ok_or(Error::Unknown("invalid config path".to_string()))?;
fs::create_dir_all(config_dir).await?;
// Write configuration to file.
fs::write(
config_path,
serde_json::to_string_pretty(&Value::Object(docker_config))
.or_err(ErrorType::SerializeError)?,
)
.await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::NamedTempFile;
use tokio::fs;
#[tokio::test]
async fn test_docker_config_empty() {
let docker_config_file = NamedTempFile::new().unwrap();
let docker = Docker::new(
dfinit::Docker {
config_path: docker_config_file.path().to_path_buf(),
},
dfinit::Proxy {
addr: "http://127.0.0.1:5000".into(),
},
);
let result = docker.run().await;
println!("{:?}", result);
assert!(result.is_ok());
// Read and verify configuration.
let contents = fs::read_to_string(docker_config_file.path()).await.unwrap();
let config: serde_json::Value = serde_json::from_str(&contents).unwrap();
// Verify proxies configuration.
assert_eq!(config["proxies"]["http-proxy"], "http://127.0.0.1:5000");
assert_eq!(config["proxies"]["https-proxy"], "http://127.0.0.1:5000");
}
#[tokio::test]
async fn test_docker_config_existing() {
let docker_config_file = NamedTempFile::new().unwrap();
let initial_config = r#"
{
"log-driver": "json-file",
"experimental": true
}
"#;
fs::write(docker_config_file.path(), initial_config)
.await
.unwrap();
let docker = Docker::new(
dfinit::Docker {
config_path: docker_config_file.path().to_path_buf(),
},
dfinit::Proxy {
addr: "http://127.0.0.1:5000".into(),
},
);
let result = docker.run().await;
assert!(result.is_ok());
// Read and verify configuration.
let contents = fs::read_to_string(docker_config_file.path()).await.unwrap();
let config: serde_json::Value = serde_json::from_str(&contents).unwrap();
// Verify existing configurations.
assert_eq!(config["log-driver"], "json-file");
assert_eq!(config["experimental"], true);
// Verify proxies configuration.
assert_eq!(config["proxies"]["http-proxy"], "http://127.0.0.1:5000");
assert_eq!(config["proxies"]["https-proxy"], "http://127.0.0.1:5000");
}
#[tokio::test]
async fn test_docker_config_invalid_json() {
let docker_config_file = NamedTempFile::new().unwrap();
let invalid_config = r#"
{
"log-driver": "json-file",
"experimental": true,
}
"#;
fs::write(docker_config_file.path(), invalid_config)
.await
.unwrap();
let docker = Docker::new(
dfinit::Docker {
config_path: docker_config_file.path().to_path_buf(),
},
dfinit::Proxy {
addr: "http://127.0.0.1:5000".into(),
},
);
let result = docker.run().await;
assert!(result.is_err());
if let Err(e) = result {
assert_eq!(
format!("{}", e),
"ParseError cause: trailing comma at line 5 column 9"
);
}
}
#[tokio::test]
async fn test_docker_config_proxies_existing() {
let docker_config_file = NamedTempFile::new().unwrap();
let existing_proxies = r#"
{
"proxies": {
"http-proxy": "http://old-proxy:3128",
"https-proxy": "https://old-proxy:3129",
"no-proxy": "old-no-proxy"
},
"log-driver": "json-file"
}
"#;
fs::write(docker_config_file.path(), existing_proxies)
.await
.unwrap();
let docker = Docker::new(
dfinit::Docker {
config_path: docker_config_file.path().to_path_buf(),
},
dfinit::Proxy {
addr: "http://127.0.0.1:5000".into(),
},
);
let result = docker.run().await;
assert!(result.is_ok());
// Read and verify configuration.
let contents = fs::read_to_string(docker_config_file.path()).await.unwrap();
let config: serde_json::Value = serde_json::from_str(&contents).unwrap();
// Verify existing configurations.
assert_eq!(config["log-driver"], "json-file");
// Verify proxies configuration.
assert_eq!(config["proxies"]["http-proxy"], "http://127.0.0.1:5000");
assert_eq!(config["proxies"]["https-proxy"], "http://127.0.0.1:5000");
Err(Error::Unimplemented)
}
}

View File

@ -50,6 +50,8 @@ impl ContainerRuntime {
/// run runs the container runtime to initialize runtime environment for the dfdaemon.
#[instrument(skip_all)]
pub async fn run(&self) -> Result<()> {
// If containerd is enabled, override the default containerd
// configuration.
match &self.engine {
None => Ok(()),
Some(Engine::Containerd(containerd)) => containerd.run().await,

View File

@ -22,22 +22,12 @@ tracing.workspace = true
prost-wkt-types.workspace = true
tokio.workspace = true
tokio-util.workspace = true
sha2.workspace = true
crc32fast.workspace = true
fs2.workspace = true
bytes.workspace = true
bytesize.workspace = true
num_cpus = "1.17"
base16ct.workspace = true
num_cpus = "1.0"
bincode = "1.3.3"
walkdir = "2.5.0"
rayon = "1.10.0"
[dev-dependencies]
tempfile.workspace = true
criterion = "0.5"
[[bench]]
name = "cache"
harness = false
[[bench]]
name = "lru_cache"
harness = false
tempdir = "0.3"

View File

@ -1,468 +0,0 @@
/*
* Copyright 2025 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use bytes::Bytes;
use bytesize::ByteSize;
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use dragonfly_client_config::dfdaemon::{Config, Storage};
use dragonfly_client_storage::{cache::Cache, metadata::Piece};
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::runtime::Runtime;
// Number of pieces to write/read in each benchmark.
const PIECE_COUNT: usize = 100;
fn create_config(capacity: ByteSize) -> Config {
Config {
storage: Storage {
cache_capacity: capacity,
..Default::default()
},
..Default::default()
}
}
fn create_piece(length: u64) -> Piece {
Piece {
number: 0,
offset: 0,
length,
digest: String::new(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
}
}
pub fn put_task(c: &mut Criterion) {
let rt: Runtime = Runtime::new().unwrap();
let mut group = c.benchmark_group("Put Task");
group.bench_with_input(
BenchmarkId::new("Put Task", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| rt.block_on(async { Cache::new(Arc::new(create_config(ByteSize::gb(2)))) }),
|mut cache| {
rt.block_on(async {
cache.put_task("task", black_box(size.as_u64())).await;
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Put Task", "100MB"),
&ByteSize::mb(100),
|b, size| {
b.iter_batched(
|| rt.block_on(async { Cache::new(Arc::new(create_config(ByteSize::gb(2)))) }),
|mut cache| {
rt.block_on(async {
cache.put_task("task", black_box(size.as_u64())).await;
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Put Task", "1GB"),
&ByteSize::gb(1),
|b, size| {
b.iter_batched(
|| rt.block_on(async { Cache::new(Arc::new(create_config(ByteSize::gb(2)))) }),
|mut cache| {
rt.block_on(async {
cache.put_task("task", black_box(size.as_u64())).await;
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn delete_task(c: &mut Criterion) {
let rt: Runtime = Runtime::new().unwrap();
let mut group = c.benchmark_group("Delete Task");
group.bench_with_input(
BenchmarkId::new("Delete Task", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| {
let mut cache =
rt.block_on(async { Cache::new(Arc::new(create_config(ByteSize::gb(2)))) });
rt.block_on(async {
cache.put_task("task", black_box(size.as_u64())).await;
});
cache
},
|mut cache| {
rt.block_on(async {
cache.delete_task("task").await.unwrap();
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Delete Task", "100MB"),
&ByteSize::mb(100),
|b, size| {
b.iter_batched(
|| {
let mut cache =
rt.block_on(async { Cache::new(Arc::new(create_config(ByteSize::gb(2)))) });
rt.block_on(async {
cache.put_task("task", black_box(size.as_u64())).await;
});
cache
},
|mut cache| {
rt.block_on(async {
cache.delete_task("task").await.unwrap();
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Delete Task", "1GB"),
&ByteSize::gb(1),
|b, size| {
b.iter_batched(
|| {
let mut cache =
rt.block_on(async { Cache::new(Arc::new(create_config(ByteSize::gb(2)))) });
rt.block_on(async {
cache.put_task("task", black_box(size.as_u64())).await;
});
cache
},
|mut cache| {
rt.block_on(async {
cache.delete_task("task").await.unwrap();
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn write_piece(c: &mut Criterion) {
let rt = Runtime::new().unwrap();
let mut group = c.benchmark_group("Write Piece");
group.bench_with_input(
BenchmarkId::new("Write Piece", "4MB"),
&vec![1u8; ByteSize::mb(4).as_u64() as usize],
|b, data| {
b.iter_batched(
|| {
let mut cache = rt.block_on(async {
Cache::new(Arc::new(create_config(
ByteSize::mb(4) * PIECE_COUNT as u64,
)))
});
rt.block_on(async {
cache
.put_task("task", (ByteSize::mb(4) * PIECE_COUNT as u64).as_u64())
.await;
});
cache
},
|cache| {
rt.block_on(async {
for i in 0..PIECE_COUNT {
cache
.write_piece(
"task",
&format!("piece{}", i),
Bytes::copy_from_slice(data),
)
.await
.unwrap();
}
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Write Piece", "10MB"),
&vec![1u8; ByteSize::mb(10).as_u64() as usize],
|b, data| {
b.iter_batched(
|| {
let mut cache = rt.block_on(async {
Cache::new(Arc::new(create_config(
ByteSize::mb(10) * PIECE_COUNT as u64,
)))
});
rt.block_on(async {
cache
.put_task("task", (ByteSize::mb(10) * PIECE_COUNT as u64).as_u64())
.await;
});
cache
},
|cache| {
rt.block_on(async {
for i in 0..PIECE_COUNT {
cache
.write_piece(
"task",
&format!("piece{}", i),
Bytes::copy_from_slice(data),
)
.await
.unwrap();
}
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Write Piece", "16MB"),
&vec![1u8; ByteSize::mb(16).as_u64() as usize],
|b, data| {
b.iter_batched(
|| {
let mut cache = rt.block_on(async {
Cache::new(Arc::new(create_config(
ByteSize::mb(16) * PIECE_COUNT as u64,
)))
});
rt.block_on(async {
cache
.put_task("task", (ByteSize::mb(16) * PIECE_COUNT as u64).as_u64())
.await;
});
cache
},
|cache| {
rt.block_on(async {
for i in 0..PIECE_COUNT {
cache
.write_piece(
"task",
&format!("piece{}", i),
Bytes::copy_from_slice(data),
)
.await
.unwrap();
}
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn read_piece(c: &mut Criterion) {
let rt = Runtime::new().unwrap();
let mut group = c.benchmark_group("Read Piece");
group.bench_with_input(
BenchmarkId::new("Read Piece", "4MB"),
&vec![1u8; ByteSize::mb(4).as_u64() as usize],
|b, data| {
b.iter_batched(
|| {
let mut cache = rt.block_on(async {
Cache::new(Arc::new(create_config(
ByteSize::mb(4) * PIECE_COUNT as u64,
)))
});
rt.block_on(async {
cache
.put_task("task", (ByteSize::mb(4) * PIECE_COUNT as u64).as_u64())
.await;
for i in 0..PIECE_COUNT {
cache
.write_piece(
"task",
&format!("piece{}", i),
Bytes::copy_from_slice(data),
)
.await
.unwrap();
}
});
cache
},
|cache| {
rt.block_on(async {
for i in 0..PIECE_COUNT {
let mut reader = cache
.read_piece(
"task",
&format!("piece{}", i),
create_piece(data.len() as u64),
None,
)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
}
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Read Piece", "10MB"),
&vec![1u8; ByteSize::mb(10).as_u64() as usize],
|b, data| {
b.iter_batched(
|| {
let mut cache = rt.block_on(async {
Cache::new(Arc::new(create_config(
ByteSize::mb(10) * PIECE_COUNT as u64,
)))
});
rt.block_on(async {
cache
.put_task("task", (ByteSize::mb(10) * PIECE_COUNT as u64).as_u64())
.await;
for i in 0..PIECE_COUNT {
cache
.write_piece(
"task",
&format!("piece{}", i),
Bytes::copy_from_slice(data),
)
.await
.unwrap();
}
});
cache
},
|cache| {
rt.block_on(async {
for i in 0..PIECE_COUNT {
let mut reader = cache
.read_piece(
"task",
&format!("piece{}", i),
create_piece(data.len() as u64),
None,
)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
}
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Read Piece", "16MB"),
&vec![1u8; ByteSize::mb(16).as_u64() as usize],
|b, data| {
b.iter_batched(
|| {
let mut cache = rt.block_on(async {
Cache::new(Arc::new(create_config(
ByteSize::mb(16) * PIECE_COUNT as u64,
)))
});
rt.block_on(async {
cache
.put_task("task", (ByteSize::mb(16) * PIECE_COUNT as u64).as_u64())
.await;
for i in 0..PIECE_COUNT {
cache
.write_piece(
"task",
&format!("piece{}", i),
Bytes::copy_from_slice(data),
)
.await
.unwrap();
}
});
cache
},
|cache| {
rt.block_on(async {
for i in 0..PIECE_COUNT {
let mut reader = cache
.read_piece(
"task",
&format!("piece{}", i),
create_piece(data.len() as u64),
None,
)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
}
});
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
criterion_group!(benches, put_task, delete_task, write_piece, read_piece,);
criterion_main!(benches);

View File

@ -1,448 +0,0 @@
/*
* Copyright 2025 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use bytesize::ByteSize;
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use dragonfly_client_storage::cache::lru_cache::LruCache;
// Number of operations to perform in each benchmark
const OPERATION_COUNT: usize = 1000;
pub fn lru_cache_put(c: &mut Criterion) {
let mut group = c.benchmark_group("Lru Cache Put");
group.bench_with_input(
BenchmarkId::new("Lru Cache Put", "4MB"),
&ByteSize::mb(4),
|b, size| {
b.iter_batched(
|| LruCache::new(OPERATION_COUNT),
|mut cache| {
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Put", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| LruCache::new(OPERATION_COUNT),
|mut cache| {
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Put", "16MB"),
&ByteSize::mb(16),
|b, size| {
b.iter_batched(
|| LruCache::new(OPERATION_COUNT),
|mut cache| {
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn lru_cache_get(c: &mut Criterion) {
let mut group = c.benchmark_group("Lru Cache Get");
group.bench_with_input(
BenchmarkId::new("Lru Cache Get", "4MB"),
&ByteSize::mb(4),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.get(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Get", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.get(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Get", "16MB"),
&ByteSize::mb(16),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.get(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn lru_cache_peek(c: &mut Criterion) {
let mut group = c.benchmark_group("Lru Cache Peek");
group.bench_with_input(
BenchmarkId::new("Lru Cache Peek", "4MB"),
&ByteSize::mb(4),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.peek(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Peek", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.peek(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Peek", "16MB"),
&ByteSize::mb(16),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.peek(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn lru_cache_contains(c: &mut Criterion) {
let mut group = c.benchmark_group("Lru Cache Contains");
group.bench_with_input(
BenchmarkId::new("Lru Cache Contains", "4MB"),
&ByteSize::mb(4),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.contains(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Contains", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.contains(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Contains", "16MB"),
&ByteSize::mb(16),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.contains(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn lru_cache_pop(c: &mut Criterion) {
let mut group = c.benchmark_group("Lru Cache Pop");
group.bench_with_input(
BenchmarkId::new("Lru Cache Pop", "4MB"),
&ByteSize::mb(4),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.pop(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Pop", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.pop(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Pop", "16MB"),
&ByteSize::mb(16),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
for i in 0..OPERATION_COUNT {
black_box(cache.pop(&format!("key{}", i)));
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
pub fn lru_cache_pop_lru(c: &mut Criterion) {
let mut group = c.benchmark_group("Lru Cache Pop Lru");
group.bench_with_input(
BenchmarkId::new("Lru Cache Pop Lru", "4MB"),
&ByteSize::mb(4),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
while !cache.is_empty() {
black_box(cache.pop_lru());
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Pop Lru", "10MB"),
&ByteSize::mb(10),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
while !cache.is_empty() {
black_box(cache.pop_lru());
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.bench_with_input(
BenchmarkId::new("Lru Cache Pop Lru", "16MB"),
&ByteSize::mb(16),
|b, size| {
b.iter_batched(
|| {
let mut cache = LruCache::new(OPERATION_COUNT);
for i in 0..OPERATION_COUNT {
cache.put(format!("key{}", i), size.as_u64());
}
cache
},
|mut cache| {
while !cache.is_empty() {
black_box(cache.pop_lru());
}
},
criterion::BatchSize::SmallInput,
);
},
);
group.finish();
}
criterion_group!(
benches,
lru_cache_put,
lru_cache_get,
lru_cache_peek,
lru_cache_contains,
lru_cache_pop,
lru_cache_pop_lru,
);
criterion_main!(benches);

View File

@ -1,509 +0,0 @@
/*
* Copyright 2025 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{borrow::Borrow, collections::HashMap, hash::Hash, hash::Hasher};
/// KeyRef is a reference to the key.
#[derive(Debug, Clone, Copy)]
struct KeyRef<K> {
k: *const K,
}
/// KeyRef implements Hash for KeyRef.
impl<K: Hash> Hash for KeyRef<K> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
unsafe {
let key = &*self.k;
key.hash(state)
}
}
}
/// KeyRef implements PartialEq for KeyRef.
impl<K: PartialEq> PartialEq for KeyRef<K> {
fn eq(&self, other: &Self) -> bool {
unsafe {
let key1 = &*self.k;
let key2 = &*other.k;
key1.eq(key2)
}
}
}
/// KeyRef implements Eq for KeyRef.
impl<K: Eq> Eq for KeyRef<K> {}
/// KeyWrapper is a wrapper for the key.
#[repr(transparent)]
struct KeyWrapper<K: ?Sized>(K);
/// KeyWrapper implements reference conversion.
impl<K: ?Sized> KeyWrapper<K> {
/// from_ref creates a new KeyWrapper from a reference to the key.
fn from_ref(key: &K) -> &Self {
unsafe { &*(key as *const K as *const KeyWrapper<K>) }
}
}
/// KeyWrapper implements Hash for KeyWrapper.
impl<K: ?Sized + Hash> Hash for KeyWrapper<K> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}
/// KeyWrapper implements PartialEq for KeyWrapper.
impl<K: ?Sized + PartialEq> PartialEq for KeyWrapper<K> {
#![allow(unknown_lints)]
#[allow(clippy::unconditional_recursion)]
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
/// KeyWrapper implements Eq for KeyWrapper.
impl<K: ?Sized + Eq> Eq for KeyWrapper<K> {}
/// KeyWrapper implements Borrow for KeyWrapper.
impl<K, Q> Borrow<KeyWrapper<Q>> for KeyRef<K>
where
K: Borrow<Q>,
Q: ?Sized,
{
/// borrow borrows the key.
fn borrow(&self) -> &KeyWrapper<Q> {
unsafe {
let key = &*self.k;
KeyWrapper::from_ref(key.borrow())
}
}
}
/// Entry is a cache entry.
struct Entry<K, V> {
key: K,
value: V,
prev: Option<*mut Entry<K, V>>,
next: Option<*mut Entry<K, V>>,
}
/// Entry implements Drop for Entry.
impl<K, V> Entry<K, V> {
/// new creates a new Entry.
fn new(key: K, value: V) -> Self {
Self {
key,
value,
prev: None,
next: None,
}
}
}
/// LruCache is a least recently used cache.
pub struct LruCache<K, V> {
capacity: usize,
map: HashMap<KeyRef<K>, Box<Entry<K, V>>>,
head: Option<*mut Entry<K, V>>,
tail: Option<*mut Entry<K, V>>,
_marker: std::marker::PhantomData<K>,
}
/// LruCache implements LruCache.
impl<K: Hash + Eq, V> LruCache<K, V> {
/// new creates a new LruCache.
pub fn new(capacity: usize) -> Self {
Self {
capacity,
map: HashMap::new(),
head: None,
tail: None,
_marker: std::marker::PhantomData,
}
}
/// get gets the value of the key.
pub fn get<'a, Q>(&'a mut self, k: &Q) -> Option<&'a V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if let Some(entry) = self.map.get_mut(KeyWrapper::from_ref(k)) {
let entry_ptr: *mut Entry<K, V> = &mut **entry;
self.detach(entry_ptr);
self.attach(entry_ptr);
Some(&unsafe { &*entry_ptr }.value)
} else {
None
}
}
/// put puts the key and value into the cache.
pub fn put(&mut self, key: K, mut value: V) -> Option<V> {
if let Some(existing_entry) = self.map.get_mut(KeyWrapper::from_ref(&key)) {
let entry = existing_entry.as_mut();
std::mem::swap(&mut entry.value, &mut value);
let entry_ptr: *mut Entry<K, V> = entry;
self.detach(entry_ptr);
self.attach(entry_ptr);
return Some(value);
}
let mut evicted_value = None;
if self.map.len() >= self.capacity {
if let Some(tail) = self.tail {
self.detach(tail);
unsafe {
if let Some(entry) = self.map.remove(KeyWrapper::from_ref(&(*tail).key)) {
evicted_value = Some(entry.value);
}
}
}
}
let new_entry = Box::new(Entry::new(key, value));
let key_ptr: *const K = &new_entry.key;
let entry_ptr = Box::into_raw(new_entry);
unsafe {
self.attach(entry_ptr);
self.map
.insert(KeyRef { k: key_ptr }, Box::from_raw(entry_ptr));
}
evicted_value
}
/// detach detaches the entry from the cache.
fn detach(&mut self, entry: *mut Entry<K, V>) {
unsafe {
let prev = (*entry).prev;
let next = (*entry).next;
match prev {
Some(prev) => (*prev).next = next,
None => self.head = next,
}
match next {
Some(next) => (*next).prev = prev,
None => self.tail = prev,
}
(*entry).prev = None;
(*entry).next = None;
}
}
/// attach attaches the entry to the cache.
fn attach(&mut self, entry: *mut Entry<K, V>) {
match self.head {
Some(head) => {
unsafe {
(*entry).next = Some(head);
(*head).prev = Some(entry);
}
self.head = Some(entry);
}
None => {
self.head = Some(entry);
self.tail = Some(entry);
}
}
}
/// contains checks whether the key exists in the cache.
pub fn contains<Q>(&self, k: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.map.contains_key(KeyWrapper::from_ref(k))
}
/// peek peeks the value of the key. It does not move the key to the front of the cache.
pub fn peek<'a, Q>(&'a self, k: &Q) -> Option<&'a V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.map
.get(KeyWrapper::from_ref(k))
.map(|entry| &entry.value)
}
/// pop_lru pops the least recently used value from the cache.
pub fn pop_lru(&mut self) -> Option<(K, V)> {
if self.is_empty() {
return None;
}
let tail = self.tail?;
self.detach(tail);
unsafe {
self.map
.remove(KeyWrapper::from_ref(&(*tail).key))
.map(|entry| (entry.key, entry.value))
}
}
/// pop removes and returns the value for a given key, if it does not exist, it returns None.
pub fn pop<Q>(&mut self, k: &Q) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
match self.map.remove(KeyWrapper::from_ref(k)) {
None => None,
Some(entry) => {
let entry_ptr = Box::into_raw(entry);
self.detach(entry_ptr);
unsafe {
let entry = Box::from_raw(entry_ptr);
Some((entry.key, entry.value))
}
}
}
}
/// is_empty checks whether the cache is empty.
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
}
unsafe impl<K: Send, V: Send> Send for LruCache<K, V> {}
unsafe impl<K: Sync, V: Sync> Sync for LruCache<K, V> {}
impl<K, V> Drop for LruCache<K, V> {
fn drop(&mut self) {
self.map.clear();
self.head = None;
self.tail = None;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new() {
let test_cases = vec![
// Normal capacity.
(5, 5),
// Minimum meaningful capacity.
(1, 1),
// Zero capacity.
(0, 0),
// Maximum capacity.
(usize::MAX, usize::MAX),
];
for (capacity, expected_capacity) in test_cases {
let cache: LruCache<String, i32> = LruCache::new(capacity);
assert!(cache.is_empty());
assert_eq!(cache.capacity, expected_capacity);
}
}
#[test]
fn test_get() {
let mut cache: LruCache<String, i32> = LruCache::new(3);
let test_cases = vec![
// Initial insertions.
("key1", 1, None),
("key2", 2, None),
("key3", 3, None),
// Update existing key.
("key2", 22, Some(2)),
// Eviction of oldest key.
("key4", 4, Some(1)),
];
for (key, value, expected_result) in test_cases {
let result = cache.put(key.to_string(), value);
assert_eq!(result, expected_result);
}
// Verify final cache state.
assert_eq!(cache.get(&"key1".to_string()), None);
assert_eq!(cache.get(&"key2".to_string()).copied(), Some(22));
assert_eq!(cache.get(&"key3".to_string()).copied(), Some(3));
assert_eq!(cache.get(&"key4".to_string()).copied(), Some(4));
}
#[test]
fn test_get_after_evction() {
let mut cache = LruCache::new(3);
assert_eq!(cache.get(&"nonexistent".to_string()), None);
// Prepare cache with initial values.
for (key, value) in [("key1", 1), ("key2", 2), ("key3", 3)] {
cache.put(key.to_string(), value);
}
let test_cases = vec![
("key1", Some(1)),
("nonexistent", None),
("key1", Some(1)),
("key3", Some(3)),
];
for (key, expected_value) in test_cases {
assert_eq!(cache.get(&key.to_string()).copied(), expected_value);
}
// Test eviction after getting.
cache.put("key4".to_string(), 4);
assert_eq!(cache.get(&"key1".to_string()).copied(), Some(1));
assert_eq!(cache.get(&"key2".to_string()), None);
assert_eq!(cache.get(&"key3".to_string()).copied(), Some(3));
assert_eq!(cache.get(&"key4".to_string()).copied(), Some(4));
}
#[test]
fn test_put() {
let mut cache = LruCache::new(3);
let test_cases = vec![
// Initial insertions within capacity.
("key1", 1, None),
("key2", 2, None),
("key3", 3, None),
// Overflow capacity, should evict oldest.
("key4", 4, Some(1)),
("key5", 5, Some(2)),
// Update existing key.
("key4", 44, Some(4)),
];
for (key, value, expected_result) in test_cases {
let result = cache.put(key.to_string(), value);
assert_eq!(result, expected_result);
}
// Verify final cache state.
assert_eq!(cache.get(&"key1".to_string()), None);
assert_eq!(cache.get(&"key2".to_string()), None);
assert_eq!(cache.get(&"key3".to_string()).copied(), Some(3));
assert_eq!(cache.get(&"key4".to_string()).copied(), Some(44));
assert_eq!(cache.get(&"key5".to_string()).copied(), Some(5));
}
#[test]
fn test_peek() {
let mut cache: LruCache<String, i32> = LruCache::new(3);
assert_eq!(cache.peek(&"nonexistent".to_string()), None);
// Prepare cache with initial values.
for (key, value) in [("key1", 1), ("key2", 2), ("key3", 3)] {
cache.put(key.to_string(), value);
}
let test_cases = vec![
("nonexistent", None),
("key1", Some(1)),
("key2", Some(2)),
("key3", Some(3)),
];
for (key, expected_value) in test_cases {
assert_eq!(cache.peek(&key.to_string()).copied(), expected_value);
}
// Test eviction after peeking.
cache.put("key4".to_string(), 4);
assert_eq!(cache.peek(&"key1".to_string()), None);
assert_eq!(cache.peek(&"key2".to_string()).copied(), Some(2));
assert_eq!(cache.peek(&"key3".to_string()).copied(), Some(3));
assert_eq!(cache.peek(&"key4".to_string()).copied(), Some(4));
}
#[test]
fn test_contains() {
let mut cache: LruCache<String, i32> = LruCache::new(3);
assert!(!cache.contains(&"nonexistent".to_string()));
// Prepare cache with initial values.
for (key, value) in [("key1", 1), ("key2", 2), ("key3", 3)] {
cache.put(key.to_string(), value);
}
let test_cases = vec![
("nonexistent", false),
("key1", true),
("key2", true),
("key3", true),
];
for (key, expected_result) in test_cases {
assert_eq!(cache.contains(&key.to_string()), expected_result);
}
// Test eviction after contains.
cache.put("key4".to_string(), 4);
assert!(!cache.contains(&"key1".to_string()));
assert!(cache.contains(&"key2".to_string()));
assert!(cache.contains(&"key3".to_string()));
assert!(cache.contains(&"key4".to_string()));
}
#[test]
fn test_pop_lru() {
let mut cache: LruCache<String, i32> = LruCache::new(3);
assert_eq!(cache.pop_lru(), None);
for (key, value) in [("key1", 1), ("key2", 2), ("key3", 3)] {
cache.put(key.to_string(), value);
}
assert_eq!(cache.pop_lru(), Some(("key1".to_string(), 1)));
assert_eq!(cache.pop_lru(), Some(("key2".to_string(), 2)));
assert_eq!(cache.pop_lru(), Some(("key3".to_string(), 3)));
assert_eq!(cache.pop_lru(), None);
assert!(cache.is_empty());
}
#[test]
fn test_pop() {
let mut cache: LruCache<String, i32> = LruCache::new(3);
let test_cases = vec![
("key1".to_string(), Some(("key1".to_string(), 1))),
("key2".to_string(), Some(("key2".to_string(), 2))),
("key3".to_string(), Some(("key3".to_string(), 3))),
("key1".to_string(), None),
("key2".to_string(), None),
("key3".to_string(), None),
];
cache.put("key1".to_string(), 1);
cache.put("key2".to_string(), 2);
cache.put("key3".to_string(), 3);
for (key, expected) in test_cases {
assert_eq!(cache.pop(&key), expected);
}
assert!(cache.is_empty());
}
}

View File

@ -1,989 +0,0 @@
/*
* Copyright 2025 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use bytes::Bytes;
use dragonfly_api::common::v2::Range;
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::{Error, Result};
use lru_cache::LruCache;
use std::cmp::{max, min};
use std::collections::HashMap;
use std::io::Cursor;
use std::sync::Arc;
use tokio::io::{AsyncRead, BufReader};
use tokio::sync::RwLock;
use tracing::info;
pub mod lru_cache;
/// Task is the task content in the cache.
#[derive(Clone, Debug)]
struct Task {
/// content_length is the length of the task content.
content_length: u64,
/// pieces is the pieces content of the task.
pieces: Arc<RwLock<HashMap<String, Bytes>>>,
}
/// Task implements the task content in the cache.
impl Task {
/// new creates a new task.
fn new(content_length: u64) -> Self {
Self {
content_length,
pieces: Arc::new(RwLock::new(HashMap::new())),
}
}
/// write_piece writes the piece content to the task.
async fn write_piece(&self, id: &str, piece: Bytes) {
let mut pieces = self.pieces.write().await;
pieces.insert(id.to_string(), piece);
}
/// read_piece reads the piece content from the task.
async fn read_piece(&self, id: &str) -> Option<Bytes> {
let pieces = self.pieces.read().await;
pieces.get(id).cloned()
}
/// contains checks whether the piece exists in the task.
async fn contains(&self, id: &str) -> bool {
let pieces = self.pieces.read().await;
pieces.contains_key(id)
}
/// content_length returns the content length of the task.
fn content_length(&self) -> u64 {
self.content_length
}
}
/// Cache is the cache for storing piece content by LRU algorithm.
///
/// Cache storage:
/// 1. Users can preheat task by caching to memory (via CacheTask) or to disk (via Task).
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/dfdaemon.proto#L174.
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
/// page cache for the first read.
///
///```text
/// +--------+
/// │ Source │
/// +--------+
/// ^ ^ Preheat
/// │ │ |
/// +-----------------+ │ │ +----------------------------+
/// │ Other Peers │ │ │ │ Peer | │
/// │ │ │ │ │ v │
/// │ +----------+ │ │ │ │ +----------+ │
/// │ │ Cache |<--|----------|<-Miss--| Cache |--Hit-->|<----Download CacheTask
/// │ +----------+ │ │ │ +----------+ │
/// │ │ │ │ │
/// │ +----------+ │ │ │ +----------+ │
/// │ │ Disk |<--|----------|<-Miss--| Disk |--Hit-->|<----Download Task
/// │ +----------+ │ │ +----------+ │
/// │ │ │ ^ │
/// │ │ │ | │
/// +-----------------+ +----------------------------+
/// |
/// Preheat
///```
/// Task is the metadata of the task.
#[derive(Clone)]
pub struct Cache {
/// config is the configuration of the dfdaemon.
config: Arc<Config>,
/// size is the size of the cache in bytes.
size: u64,
/// capacity is the maximum capacity of the cache in bytes.
capacity: u64,
/// tasks stores the tasks with their task id.
tasks: Arc<RwLock<LruCache<String, Task>>>,
}
/// Cache implements the cache for storing piece content by LRU algorithm.
impl Cache {
/// new creates a new cache with the specified capacity.
pub fn new(config: Arc<Config>) -> Self {
Cache {
config: config.clone(),
size: 0,
capacity: config.storage.cache_capacity.as_u64(),
// LRU cache capacity is set to usize::MAX to avoid evicting tasks. LRU cache will evict tasks
// by cache capacity(cache size) itself, and used pop_lru to evict the least recently
// used task.
tasks: Arc::new(RwLock::new(LruCache::new(usize::MAX))),
}
}
/// read_piece reads the piece from the cache.
pub async fn read_piece(
&self,
task_id: &str,
piece_id: &str,
piece: super::metadata::Piece,
range: Option<Range>,
) -> Result<impl AsyncRead> {
let mut tasks = self.tasks.write().await;
let Some(task) = tasks.get(task_id) else {
return Err(Error::TaskNotFound(task_id.to_string()));
};
let Some(piece_content) = task.read_piece(piece_id).await else {
return Err(Error::PieceNotFound(piece_id.to_string()));
};
drop(tasks);
// Calculate the range of bytes to return based on the range provided.
let (target_offset, target_length) = if let Some(range) = range {
let target_offset = max(piece.offset, range.start) - piece.offset;
let target_length = min(
piece.offset + piece.length - 1,
range.start + range.length - 1,
) - target_offset
- piece.offset
+ 1;
(target_offset as usize, target_length as usize)
} else {
(0, piece.length as usize)
};
// Check if the target range is valid.
let begin = target_offset;
let end = target_offset + target_length;
if begin >= piece_content.len() || end > piece_content.len() {
return Err(Error::InvalidParameter);
}
let content = piece_content.slice(begin..end);
let reader =
BufReader::with_capacity(self.config.storage.read_buffer_size, Cursor::new(content));
Ok(reader)
}
/// write_piece writes the piece content to the cache.
pub async fn write_piece(&self, task_id: &str, piece_id: &str, content: Bytes) -> Result<()> {
let mut tasks = self.tasks.write().await;
let Some(task) = tasks.get(task_id) else {
return Err(Error::TaskNotFound(task_id.to_string()));
};
if task.contains(piece_id).await {
return Ok(());
}
task.write_piece(piece_id, content).await;
Ok(())
}
/// put_task puts a new task into the cache, constrained by the capacity of the cache.
pub async fn put_task(&mut self, task_id: &str, content_length: u64) {
// If the content length is 0, we don't cache the task.
if content_length == 0 {
return;
}
// If the content length is larger than the cache capacity and the task cannot be cached.
if content_length > self.capacity {
info!(
"task {} is too large and cannot be cached: {}",
task_id, content_length
);
return;
}
let mut tasks = self.tasks.write().await;
while self.size + content_length > self.capacity {
match tasks.pop_lru() {
Some((_, task)) => {
self.size -= task.content_length();
}
None => {
break;
}
}
}
let task = Task::new(content_length);
tasks.put(task_id.to_string(), task);
self.size += content_length;
}
pub async fn delete_task(&mut self, task_id: &str) -> Result<()> {
let mut tasks = self.tasks.write().await;
let Some((_, task)) = tasks.pop(task_id) else {
return Err(Error::TaskNotFound(task_id.to_string()));
};
self.size -= task.content_length();
Ok(())
}
/// contains_task checks whether the task exists in the cache.
pub async fn contains_task(&self, id: &str) -> bool {
let tasks = self.tasks.read().await;
tasks.contains(id)
}
/// contains_piece checks whether the piece exists in the specified task.
pub async fn contains_piece(&self, task_id: &str, piece_id: &str) -> bool {
let tasks = self.tasks.read().await;
if let Some(task) = tasks.peek(task_id) {
task.contains(piece_id).await
} else {
false
}
}
}
#[cfg(test)]
mod tests {
use super::super::metadata::Piece;
use super::*;
use bytesize::ByteSize;
use dragonfly_api::common::v2::Range;
use dragonfly_client_config::dfdaemon::Storage;
use tokio::io::AsyncReadExt;
#[tokio::test]
async fn test_new() {
let test_cases = vec![
// Default configuration with 64MiB capacity.
(Config::default(), 0, ByteSize::mib(64).as_u64()),
// Custom configuration with 100MiB capacity.
(
Config {
storage: Storage {
cache_capacity: ByteSize::mib(100),
..Default::default()
},
..Default::default()
},
0,
ByteSize::mib(100).as_u64(),
),
// Zero capacity configuration.
(
Config {
storage: Storage {
cache_capacity: ByteSize::b(0),
..Default::default()
},
..Default::default()
},
0,
0,
),
];
for (config, expected_size, expected_capacity) in test_cases {
let cache = Cache::new(Arc::new(config));
assert_eq!(cache.size, expected_size);
assert_eq!(cache.capacity, expected_capacity);
}
}
#[tokio::test]
async fn test_contains_task() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let cache = Cache::new(Arc::new(config));
let test_cases = vec![
// Test non-existent task.
("check", "non_existent", 0, false),
// Add and verify task.
("add", "task1", ByteSize::mib(1).as_u64(), true),
("check", "task1", 0, true),
// Remove and verify task.
("remove", "task1", 0, false),
("check", "task1", 0, false),
// Test multiple tasks.
("add", "task1", ByteSize::mib(1).as_u64(), true),
("add", "task2", ByteSize::mib(2).as_u64(), true),
("check", "task1", 0, true),
("check", "task2", 0, true),
("check", "task3", 0, false),
];
for (operation, task_id, content_length, expected_result) in test_cases {
match operation {
"check" => {
assert_eq!(cache.contains_task(task_id).await, expected_result);
}
"add" => {
let task = Task::new(content_length);
cache.tasks.write().await.put(task_id.to_string(), task);
assert_eq!(cache.contains_task(task_id).await, expected_result);
}
"remove" => {
cache.tasks.write().await.pop_lru();
assert_eq!(cache.contains_task(task_id).await, expected_result);
}
_ => panic!("Unknown operation."),
}
}
}
#[tokio::test]
async fn test_put_task() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
let test_cases = vec![
// Empty task should not be cached.
("empty_task", 0, false),
// Task equal to capacity should not be cached.
("equal_capacity", ByteSize::mib(10).as_u64(), true),
// Task exceeding capacity should not be cached.
("exceed_capacity", ByteSize::mib(10).as_u64() + 1, false),
// Normal sized task should be cached.
("normal_task", ByteSize::mib(1).as_u64(), true),
];
for (task_id, size, should_exist) in test_cases {
if size > 0 {
cache.put_task(task_id, size).await;
}
assert_eq!(cache.contains_task(task_id).await, should_exist);
}
}
#[tokio::test]
async fn test_put_task_lru() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(5),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
let test_cases = vec![
// Add tasks until eviction triggers.
("lru_task_1", ByteSize::mib(2).as_u64(), true),
("lru_task_2", ByteSize::mib(2).as_u64(), true),
// Third task triggers eviction.
("lru_task_3", ByteSize::mib(2).as_u64(), true),
// Verify eviction results.
("lru_task_1", 0, false),
("lru_task_2", 0, true),
("lru_task_3", 0, true),
];
for (task_id, size, should_exist) in test_cases {
if size > 0 {
cache.put_task(task_id, size).await;
}
assert_eq!(cache.contains_task(task_id).await, should_exist);
}
}
#[tokio::test]
async fn test_delete_task() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
cache.put_task("task1", ByteSize::mib(1).as_u64()).await;
cache.put_task("task2", ByteSize::mib(1).as_u64()).await;
cache.put_task("task3", ByteSize::mib(1).as_u64()).await;
let test_cases = vec![
("task1", true),
("task2", true),
("task3", true),
("nonexistent", false),
("", false),
("large_task", false),
];
for (task_id, exists) in test_cases {
assert_eq!(cache.contains_task(task_id).await, exists);
let result = cache.delete_task(task_id).await;
if exists {
assert!(result.is_ok());
} else {
assert!(result.is_err());
}
assert!(!cache.contains_task(task_id).await);
}
assert!(!cache.contains_task("task1").await);
assert!(!cache.contains_task("task2").await);
assert!(!cache.contains_task("task3").await);
assert!(!cache.contains_task("nonexistent").await);
assert!(!cache.contains_task("").await);
assert!(!cache.contains_task("large_task").await);
}
#[tokio::test]
async fn test_contains_piece() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
let test_cases = vec![
// Check non-existent task.
("check", "non_existent", "piece1", "", false),
// Check empty piece ID in non-existent task.
("check", "non_existent", "", "", false),
// Add task and verify empty task behavior.
("add_task", "task1", "", "", true),
("check", "task1", "piece1", "", false),
// Add piece and verify existence.
("add_piece", "task1", "piece1", "test data", true),
("check", "task1", "piece1", "", true),
// Check empty piece ID in existing task.
("check", "task1", "", "", false),
// Check non-existent piece in existing task.
("check", "task1", "non_existent_piece", "", false),
// Test piece ID with special characters.
("add_piece", "task1", "piece#$%^&*", "test data", true),
("check", "task1", "piece#$%^&*", "", true),
];
for (operation, task_id, piece_id, content, expected_result) in test_cases {
match operation {
"check" => {
assert_eq!(
cache.contains_piece(task_id, piece_id).await,
expected_result
);
}
"add_task" => {
cache.put_task(task_id, 1000).await;
assert!(cache.contains_task(task_id).await);
}
"add_piece" => {
cache
.write_piece(task_id, piece_id, Bytes::from(content))
.await
.unwrap();
assert_eq!(
cache.contains_piece(task_id, piece_id).await,
expected_result
);
}
_ => panic!("Unknown operation."),
}
}
}
#[tokio::test]
async fn test_write_piece() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
// Test writing to non-existent task.
let test_data = b"test data".to_vec();
let result = cache
.write_piece("non_existent", "piece1", Bytes::from(test_data))
.await;
assert!(matches!(result, Err(Error::TaskNotFound(_))));
// Create a task for testing.
cache.put_task("task1", ByteSize::mib(1).as_u64()).await;
assert!(cache.contains_task("task1").await);
let test_cases = vec![
("piece1", b"hello world".to_vec()),
("piece2", b"rust programming".to_vec()),
("piece3", b"dragonfly cache".to_vec()),
("piece4", b"unit testing".to_vec()),
("piece5", b"async await".to_vec()),
("piece6", b"error handling".to_vec()),
("piece7", vec![0u8; 1024]),
("piece8", vec![1u8; 2048]),
];
for (piece_id, content) in &test_cases {
let result = cache
.write_piece("task1", piece_id, Bytes::copy_from_slice(content))
.await;
assert!(result.is_ok());
assert!(cache.contains_piece("task1", piece_id).await);
let piece = Piece {
number: 0,
offset: 0,
length: content.len() as u64,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
};
let mut reader = cache
.read_piece("task1", piece_id, piece, None)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
assert_eq!(buffer, *content);
}
// Test attempting to overwrite existing pieces.
// The write should succeed (return Ok) but content should not change.
for (piece_id, original_content) in &test_cases {
let new_content = format!("updated content for {}", piece_id);
let result = cache
.write_piece("task1", piece_id, Bytes::from(new_content))
.await;
assert!(result.is_ok());
// Verify content remains unchanged.
let piece = Piece {
number: 0,
offset: 0,
length: original_content.len() as u64,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
};
let mut reader = cache
.read_piece("task1", piece_id, piece, None)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
assert_eq!(buffer, *original_content);
}
}
#[tokio::test]
async fn test_read_piece() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(100),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
let piece = Piece {
number: 0,
offset: 0,
length: 11,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
};
let result = cache
.read_piece("non_existent", "piece1", piece.clone(), None)
.await;
assert!(matches!(result, Err(Error::TaskNotFound(_))));
cache.put_task("task1", ByteSize::mib(50).as_u64()).await;
let result = cache
.read_piece("task1", "non_existent", piece.clone(), None)
.await;
assert!(matches!(result, Err(Error::PieceNotFound(_))));
let test_pieces = vec![
// Small pieces for basic functionality testing.
(
"piece1",
b"hello world".to_vec(),
Piece {
number: 0,
offset: 0,
length: 11,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
},
vec![
(None, b"hello world".to_vec()),
(
Some(Range {
start: 0,
length: 5,
}),
b"hello".to_vec(),
),
],
),
(
"piece2",
b"rust lang".to_vec(),
Piece {
number: 1,
offset: 11,
length: 9,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
},
vec![
(None, b"rust lang".to_vec()),
(
Some(Range {
start: 11,
length: 4,
}),
b"rust".to_vec(),
),
],
),
(
"piece3",
b"unit test".to_vec(),
Piece {
number: 2,
offset: 20,
length: 9,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
},
vec![
(None, b"unit test".to_vec()),
(
Some(Range {
start: 20,
length: 4,
}),
b"unit".to_vec(),
),
],
),
// Large piece for boundary testing.
(
"large_piece",
{
let size = ByteSize::mib(50).as_u64();
(0..size).map(|i| (i % 256) as u8).collect()
},
Piece {
number: 2,
offset: 0,
length: ByteSize::mib(50).as_u64(),
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
},
vec![
// Full read.
(
None,
(0..ByteSize::mib(50).as_u64())
.map(|i| (i % 256) as u8)
.collect(),
),
// Read first 1MiB.
(
Some(Range {
start: 0,
length: ByteSize::mib(1).as_u64(),
}),
(0..ByteSize::mib(1).as_u64())
.map(|i| (i % 256) as u8)
.collect(),
),
// Read last 1MiB.
(
Some(Range {
start: ByteSize::mib(49).as_u64(),
length: ByteSize::mib(1).as_u64(),
}),
(ByteSize::mib(49).as_u64()..ByteSize::mib(50).as_u64())
.map(|i| (i % 256) as u8)
.collect(),
),
],
),
];
// Write all pieces.
for (id, content, _, _) in &test_pieces {
cache
.write_piece("task1", id, Bytes::copy_from_slice(content))
.await
.unwrap();
}
// Test all pieces with their read ranges.
for (id, _, piece, ranges) in &test_pieces {
for (range, expected_content) in ranges {
let mut reader = cache
.read_piece("task1", id, piece.clone(), *range)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
assert_eq!(&buffer, expected_content);
}
}
}
#[tokio::test]
async fn test_concurrent_read_same_piece() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
cache.put_task("task1", ByteSize::mib(1).as_u64()).await;
let content = b"test data for concurrent read".to_vec();
cache
.write_piece("task1", "piece1", Bytes::from(content.clone()))
.await
.unwrap();
let cache_arc = Arc::new(cache);
let mut join_set = tokio::task::JoinSet::new();
// Spawn concurrent readers.
for i in 0..50 {
let cache_clone = cache_arc.clone();
let expected_content = content.clone();
join_set.spawn(async move {
let piece = Piece {
number: 0,
offset: 0,
length: expected_content.len() as u64,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
};
let range = if i % 2 == 0 {
None
} else {
Some(Range {
start: 0,
length: 5,
})
};
let mut reader = cache_clone
.read_piece("task1", "piece1", piece, range)
.await
.unwrap_or_else(|e| panic!("Reader {} failed: {:?}.", i, e));
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
if let Some(range) = range {
assert_eq!(buffer, &expected_content[..range.length as usize]);
} else {
assert_eq!(buffer, expected_content);
}
});
}
while let Some(result) = join_set.join_next().await {
assert!(result.is_ok());
}
}
#[tokio::test]
async fn test_concurrent_write_different_pieces() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
cache.put_task("task1", ByteSize::mib(1).as_u64()).await;
let cache_arc = Arc::new(cache);
let mut join_set = tokio::task::JoinSet::new();
// Spawn concurrent writers.
for i in 0..50 {
let cache_clone = cache_arc.clone();
let content = format!("content for piece {}", i).into_bytes();
join_set.spawn(async move {
let piece_id = format!("piece{}", i);
let result = cache_clone
.write_piece("task1", &piece_id, Bytes::from(content.clone()))
.await;
assert!(result.is_ok());
let piece = Piece {
number: 0,
offset: 0,
length: content.len() as u64,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
};
let mut reader = cache_clone
.read_piece("task1", &piece_id, piece, None)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
assert_eq!(buffer, content);
});
}
while let Some(result) = join_set.join_next().await {
assert!(result.is_ok());
}
}
#[tokio::test]
async fn test_concurrent_write_same_piece() {
let config = Config {
storage: Storage {
cache_capacity: ByteSize::mib(10),
..Default::default()
},
..Default::default()
};
let mut cache = Cache::new(Arc::new(config));
cache.put_task("task1", ByteSize::mib(1).as_u64()).await;
let original_content = b"original content".to_vec();
cache
.write_piece("task1", "piece1", Bytes::from(original_content.clone()))
.await
.unwrap();
let cache_arc = Arc::new(cache);
let mut join_set = tokio::task::JoinSet::new();
// Spawn concurrent writers.
for i in 0..50 {
let cache_clone = cache_arc.clone();
let new_content = format!("new content from writer {}", i).into_bytes();
join_set.spawn(async move {
let result = cache_clone
.write_piece("task1", "piece1", Bytes::from(new_content))
.await;
assert!(result.is_ok());
});
}
while let Some(result) = join_set.join_next().await {
assert!(result.is_ok());
}
let piece = Piece {
number: 0,
offset: 0,
length: original_content.len() as u64,
digest: "".to_string(),
parent_id: None,
uploading_count: 0,
uploaded_count: 0,
updated_at: chrono::Utc::now().naive_utc(),
created_at: chrono::Utc::now().naive_utc(),
finished_at: None,
};
let mut reader = cache_arc
.read_piece("task1", "piece1", piece, None)
.await
.unwrap();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer).await.unwrap();
assert_eq!(buffer, original_content);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,6 @@
* limitations under the License.
*/
use chrono::NaiveDateTime;
use dragonfly_api::common::v2::Range;
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::{Error, Result};
@ -25,17 +24,14 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::AsyncRead;
use tokio::time::sleep;
use tokio_util::either::Either;
use tracing::{debug, error, info, instrument, warn};
use tracing::{error, info, instrument};
pub mod cache;
pub mod content;
pub mod metadata;
pub mod storage_engine;
/// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished.
pub const DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL: Duration = Duration::from_millis(100);
pub const DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL: Duration = Duration::from_millis(500);
/// Storage is the storage of the task.
pub struct Storage {
@ -47,84 +43,54 @@ pub struct Storage {
/// content implements the content storage.
content: content::Content,
/// cache implements the cache storage.
cache: cache::Cache,
}
/// Storage implements the storage.
impl Storage {
/// new returns a new storage.
#[instrument(skip_all)]
pub async fn new(config: Arc<Config>, dir: &Path, log_dir: PathBuf) -> Result<Self> {
let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?;
let content = content::Content::new(config.clone(), dir).await?;
let cache = cache::Cache::new(config.clone());
Ok(Storage {
config,
metadata,
content,
cache,
})
}
/// total_space returns the total space of the disk.
pub fn total_space(&self) -> Result<u64> {
self.content.total_space()
}
/// available_space returns the available space of the disk.
pub fn available_space(&self) -> Result<u64> {
self.content.available_space()
}
/// has_enough_space checks if the storage has enough space to store the content.
pub fn has_enough_space(&self, content_length: u64) -> Result<bool> {
self.content.has_enough_space(content_length)
}
/// hard_link_task hard links the task content to the destination.
/// hard_link_or_copy_task hard links or copies the task content to the destination.
#[instrument(skip_all)]
pub async fn hard_link_task(&self, task_id: &str, to: &Path) -> Result<()> {
self.content.hard_link_task(task_id, to).await
pub async fn hard_link_or_copy_task(
&self,
task: metadata::Task,
to: &Path,
range: Option<Range>,
) -> Result<()> {
self.content.hard_link_or_copy_task(task, to, range).await
}
/// copy_task copies the task content to the destination.
/// read_task_by_range returns the reader of the task by range.
#[instrument(skip_all)]
pub async fn copy_task(&self, id: &str, to: &Path) -> Result<()> {
self.content.copy_task(id, to).await
pub async fn read_task_by_range(
&self,
task_id: &str,
range: Range,
) -> Result<impl AsyncRead + 'static> {
self.content.read_task_by_range(task_id, range).await
}
/// is_same_dev_inode_as_task checks if the task content is on the same device inode as the
/// destination.
pub async fn is_same_dev_inode_as_task(&self, id: &str, to: &Path) -> Result<bool> {
self.content.is_same_dev_inode_as_task(id, to).await
}
/// prepare_download_task_started prepares the metadata of the task when the task downloads
/// started.
pub async fn prepare_download_task_started(&self, id: &str) -> Result<metadata::Task> {
self.metadata.download_task_started(id, None, None, None)
}
/// download_task_started updates the metadata of the task and create task content
/// when the task downloads started.
/// download_task_started updates the metadata of the task when the task downloads started.
#[instrument(skip_all)]
pub async fn download_task_started(
pub fn download_task_started(
&self,
id: &str,
piece_length: u64,
content_length: u64,
piece_length: Option<u64>,
content_length: Option<u64>,
response_header: Option<HeaderMap>,
) -> Result<metadata::Task> {
self.content.create_task(id, content_length).await?;
self.metadata.download_task_started(
id,
Some(piece_length),
Some(content_length),
response_header,
)
self.metadata
.download_task_started(id, piece_length, content_length, response_header)
}
/// download_task_finished updates the metadata of the task when the task downloads finished.
@ -163,12 +129,6 @@ impl Storage {
self.metadata.get_task(id)
}
/// is_task_exists returns whether the task exists.
#[instrument(skip_all)]
pub fn is_task_exists(&self, id: &str) -> Result<bool> {
self.metadata.is_task_exists(id)
}
/// get_tasks returns the task metadatas.
#[instrument(skip_all)]
pub fn get_tasks(&self) -> Result<Vec<metadata::Task>> {
@ -189,103 +149,66 @@ impl Storage {
self.content.delete_task(id).await.unwrap_or_else(|err| {
error!("delete task content failed: {}", err);
});
let mut cache = self.cache.clone();
cache.delete_task(id).await.unwrap_or_else(|err| {
info!("delete task from cache failed: {}", err);
});
}
/// hard_link_persistent_cache_task hard links the persistent cache task content to the destination.
/// hard_link_or_copy_persistent_cache_task hard links or copies the persistent cache task content to the destination.
#[instrument(skip_all)]
pub async fn hard_link_persistent_cache_task(&self, task_id: &str, to: &Path) -> Result<()> {
self.content
.hard_link_persistent_cache_task(task_id, to)
.await
}
/// copy_taskcopy_persistent_cache_taskcopies the persistent cache task content to the destination.
#[instrument(skip_all)]
pub async fn copy_persistent_cache_task(&self, id: &str, to: &Path) -> Result<()> {
self.content.copy_persistent_cache_task(id, to).await
}
/// is_same_dev_inode_as_persistent_cache_task checks if the persistent cache task content is on the same device inode as the
/// destination.
pub async fn is_same_dev_inode_as_persistent_cache_task(
pub async fn hard_link_or_copy_persistent_cache_task(
&self,
id: &str,
task: metadata::PersistentCacheTask,
to: &Path,
) -> Result<bool> {
) -> Result<()> {
self.content
.is_same_dev_inode_as_persistent_cache_task(id, to)
.hard_link_or_copy_persistent_cache_task(task, to)
.await
}
/// create_persistent_cache_task_started creates a new persistent cache task.
/// create_persistent_persistent_cache_task creates a new persistent cache task.
#[instrument(skip_all)]
pub async fn create_persistent_cache_task_started(
pub async fn create_persistent_persistent_cache_task(
&self,
id: &str,
ttl: Duration,
path: &Path,
piece_length: u64,
content_length: u64,
expected_digest: &str,
) -> Result<metadata::PersistentCacheTask> {
let metadata = self.metadata.create_persistent_cache_task_started(
let response = self.content.write_persistent_cache_task(id, path).await?;
let digest = Digest::new(Algorithm::Crc32, response.hash);
if expected_digest != digest.to_string() {
return Err(Error::DigestMismatch(
expected_digest.to_string(),
digest.to_string(),
));
}
self.metadata.create_persistent_persistent_cache_task(
id,
ttl,
piece_length,
content_length,
)?;
self.content
.create_persistent_cache_task(id, content_length)
.await?;
Ok(metadata)
digest.to_string().as_str(),
)
}
/// create_persistent_cache_task_finished updates the metadata of the persistent cache task
/// when the persistent cache task creates finished.
/// download_persistent_cache_task_started updates the metadata of the persistent cache task when the persistent cache task downloads started.
#[instrument(skip_all)]
pub async fn create_persistent_cache_task_finished(
&self,
id: &str,
) -> Result<metadata::PersistentCacheTask> {
self.metadata.create_persistent_cache_task_finished(id)
}
/// create_persistent_cache_task_failed deletes the persistent cache task when
/// the persistent cache task creates failed.
#[instrument(skip_all)]
pub async fn create_persistent_cache_task_failed(&self, id: &str) {
self.delete_persistent_cache_task(id).await;
}
/// download_persistent_cache_task_started updates the metadata of the persistent cache task
/// and creates the persistent cache task content when the persistent cache task downloads started.
#[instrument(skip_all)]
pub async fn download_persistent_cache_task_started(
pub fn download_persistent_cache_task_started(
&self,
id: &str,
ttl: Duration,
persistent: bool,
piece_length: u64,
content_length: u64,
created_at: NaiveDateTime,
) -> Result<metadata::PersistentCacheTask> {
let metadata = self.metadata.download_persistent_cache_task_started(
self.metadata.download_persistent_cache_task_started(
id,
ttl,
persistent,
piece_length,
content_length,
created_at,
)?;
self.content
.create_persistent_cache_task(id, content_length)
.await?;
Ok(metadata)
)
}
/// download_persistent_cache_task_finished updates the metadata of the persistent cache task when the persistent cache task downloads finished.
@ -324,18 +247,6 @@ impl Storage {
self.metadata.get_persistent_cache_task(id)
}
/// persist_persistent_cache_task persists the persistent cache task metadata.
#[instrument(skip_all)]
pub fn persist_persistent_cache_task(&self, id: &str) -> Result<metadata::PersistentCacheTask> {
self.metadata.persist_persistent_cache_task(id)
}
/// is_persistent_cache_task_exists returns whether the persistent cache task exists.
#[instrument(skip_all)]
pub fn is_persistent_cache_task_exists(&self, id: &str) -> Result<bool> {
self.metadata.is_persistent_cache_task_exists(id)
}
/// get_tasks returns the task metadatas.
#[instrument(skip_all)]
pub fn get_persistent_cache_tasks(&self) -> Result<Vec<metadata::PersistentCacheTask>> {
@ -351,10 +262,6 @@ impl Storage {
error!("delete persistent cache task metadata failed: {}", err);
});
self.metadata.delete_pieces(id).unwrap_or_else(|err| {
error!("delete persistent cache piece metadatas failed: {}", err);
});
self.content
.delete_persistent_cache_task(id)
.await
@ -363,88 +270,38 @@ impl Storage {
});
}
/// create_persistent_cache_piece creates a new persistent cache piece.
#[instrument(skip_all)]
pub async fn create_persistent_cache_piece<R: AsyncRead + Unpin + ?Sized>(
&self,
piece_id: &str,
task_id: &str,
number: u32,
offset: u64,
length: u64,
reader: &mut R,
) -> Result<metadata::Piece> {
let response = self
.content
.write_persistent_cache_piece(task_id, offset, length, reader)
.await?;
let digest = Digest::new(Algorithm::Crc32, response.hash);
self.metadata.create_persistent_cache_piece(
piece_id,
number,
offset,
length,
digest.to_string().as_str(),
)
}
/// download_piece_started updates the metadata of the piece and writes
/// the data of piece to file when the piece downloads started.
#[instrument(skip_all)]
pub async fn download_piece_started(
&self,
piece_id: &str,
task_id: &str,
number: u32,
) -> Result<metadata::Piece> {
// Wait for the piece to be finished.
match self.wait_for_piece_finished(piece_id).await {
match self.wait_for_piece_finished(task_id, number).await {
Ok(piece) => Ok(piece),
// If piece is not found or wait timeout, create piece metadata.
Err(_) => self.metadata.download_piece_started(piece_id, number),
Err(_) => self.metadata.download_piece_started(task_id, number),
}
}
/// download_piece_from_source_finished is used for downloading piece from source.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all)]
pub async fn download_piece_from_source_finished<R: AsyncRead + Unpin + ?Sized>(
&self,
piece_id: &str,
task_id: &str,
offset: u64,
length: u64,
reader: &mut R,
timeout: Duration,
) -> Result<metadata::Piece> {
tokio::select! {
piece = self.handle_downloaded_from_source_finished(piece_id, task_id, offset, length, reader) => {
piece
}
_ = sleep(timeout) => {
Err(Error::DownloadPieceFinished(piece_id.to_string()))
}
}
}
// handle_downloaded_from_source_finished handles the downloaded piece from source.
#[instrument(skip_all)]
async fn handle_downloaded_from_source_finished<R: AsyncRead + Unpin + ?Sized>(
&self,
piece_id: &str,
task_id: &str,
number: u32,
offset: u64,
length: u64,
reader: &mut R,
) -> Result<metadata::Piece> {
let response = self
.content
.write_piece(task_id, offset, length, reader)
.await?;
let response = self.content.write_piece(task_id, offset, reader).await?;
let digest = Digest::new(Algorithm::Crc32, response.hash);
self.metadata.download_piece_finished(
piece_id,
task_id,
number,
offset,
length,
digest.to_string().as_str(),
@ -452,48 +309,18 @@ impl Storage {
)
}
/// download_piece_from_parent_finished is used for downloading piece from parent.
#[allow(clippy::too_many_arguments)]
/// download_piece_from_remote_peer_finished is used for downloading piece from remote peer.
#[instrument(skip_all)]
pub async fn download_piece_from_parent_finished<R: AsyncRead + Unpin + ?Sized>(
pub async fn download_piece_from_remote_peer_finished<R: AsyncRead + Unpin + ?Sized>(
&self,
piece_id: &str,
task_id: &str,
number: u32,
offset: u64,
length: u64,
expected_digest: &str,
parent_id: &str,
reader: &mut R,
timeout: Duration,
) -> Result<metadata::Piece> {
tokio::select! {
piece = self.handle_downloaded_piece_from_parent_finished(piece_id, task_id, offset, length, expected_digest, parent_id, reader) => {
piece
}
_ = sleep(timeout) => {
Err(Error::DownloadPieceFinished(piece_id.to_string()))
}
}
}
// handle_downloaded_piece_from_parent_finished handles the downloaded piece from parent.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all)]
async fn handle_downloaded_piece_from_parent_finished<R: AsyncRead + Unpin + ?Sized>(
&self,
piece_id: &str,
task_id: &str,
offset: u64,
length: u64,
expected_digest: &str,
parent_id: &str,
reader: &mut R,
) -> Result<metadata::Piece> {
let response = self
.content
.write_piece(task_id, offset, length, reader)
.await?;
let response = self.content.write_piece(task_id, offset, reader).await?;
let length = response.length;
let digest = Digest::new(Algorithm::Crc32, response.hash);
@ -506,7 +333,8 @@ impl Storage {
}
self.metadata.download_piece_finished(
piece_id,
task_id,
number,
offset,
length,
digest.to_string().as_str(),
@ -516,8 +344,8 @@ impl Storage {
/// download_piece_failed updates the metadata of the piece when the piece downloads failed.
#[instrument(skip_all)]
pub fn download_piece_failed(&self, piece_id: &str) -> Result<()> {
self.metadata.download_piece_failed(piece_id)
pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> {
self.metadata.download_piece_failed(task_id, number)
}
/// upload_piece updates the metadata of the piece and
@ -525,37 +353,26 @@ impl Storage {
#[instrument(skip_all)]
pub async fn upload_piece(
&self,
piece_id: &str,
task_id: &str,
number: u32,
range: Option<Range>,
) -> Result<impl AsyncRead> {
// Wait for the piece to be finished.
self.wait_for_piece_finished(piece_id).await?;
self.wait_for_piece_finished(task_id, number).await?;
// Start uploading the task.
self.metadata.upload_task_started(task_id)?;
// Get the piece metadata and return the content of the piece.
match self.metadata.get_piece(piece_id) {
Ok(Some(piece)) => {
if self.cache.contains_piece(task_id, piece_id).await {
match self
.cache
.read_piece(task_id, piece_id, piece.clone(), range)
.await
{
Ok(reader) => {
// Finish uploading the task.
self.metadata.upload_task_finished(task_id)?;
debug!("get piece from cache: {}", piece_id);
return Ok(Either::Left(reader));
}
Err(err) => {
// Start uploading the piece.
if let Err(err) = self.metadata.upload_piece_started(task_id, number) {
// Failed uploading the task.
self.metadata.upload_task_failed(task_id)?;
return Err(err);
}
}
}
// Get the piece metadata and return the content of the piece.
match self.metadata.get_piece(task_id, number) {
Ok(Some(piece)) => {
match self
.content
.read_piece(task_id, piece.offset, piece.length, range)
@ -564,11 +381,17 @@ impl Storage {
Ok(reader) => {
// Finish uploading the task.
self.metadata.upload_task_finished(task_id)?;
Ok(Either::Right(reader))
// Finish uploading the piece.
self.metadata.upload_piece_finished(task_id, number)?;
Ok(reader)
}
Err(err) => {
// Failed uploading the task.
self.metadata.upload_task_failed(task_id)?;
// Failed uploading the piece.
self.metadata.upload_piece_failed(task_id, number)?;
Err(err)
}
}
@ -576,238 +399,70 @@ impl Storage {
Ok(None) => {
// Failed uploading the task.
self.metadata.upload_task_failed(task_id)?;
Err(Error::PieceNotFound(piece_id.to_string()))
// Failed uploading the piece.
self.metadata.upload_piece_failed(task_id, number)?;
Err(Error::PieceNotFound(self.piece_id(task_id, number)))
}
Err(err) => {
// Failed uploading the task.
self.metadata.upload_task_failed(task_id)?;
// Failed uploading the piece.
self.metadata.upload_piece_failed(task_id, number)?;
Err(err)
}
}
}
/// get_piece returns the piece metadata.
pub fn get_piece(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
self.metadata.get_piece(piece_id)
}
/// is_piece_exists returns whether the piece exists.
#[instrument(skip_all)]
pub fn is_piece_exists(&self, piece_id: &str) -> Result<bool> {
self.metadata.is_piece_exists(piece_id)
pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> {
self.metadata.get_piece(task_id, number)
}
/// get_pieces returns the piece metadatas.
#[instrument(skip_all)]
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
self.metadata.get_pieces(task_id)
}
/// piece_id returns the piece id.
#[inline]
#[instrument(skip_all)]
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
self.metadata.piece_id(task_id, number)
}
/// download_persistent_cache_piece_started updates the metadata of the persistent cache piece and writes
/// the data of piece to file when the persistent cache piece downloads started.
#[instrument(skip_all)]
pub async fn download_persistent_cache_piece_started(
&self,
piece_id: &str,
number: u32,
) -> Result<metadata::Piece> {
// Wait for the piece to be finished.
match self
.wait_for_persistent_cache_piece_finished(piece_id)
.await
{
Ok(piece) => Ok(piece),
// If piece is not found or wait timeout, create piece metadata.
Err(_) => self.metadata.download_piece_started(piece_id, number),
}
}
/// download_persistent_cache_piece_from_parent_finished is used for downloading persistent cache piece from parent.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all)]
pub async fn download_persistent_cache_piece_from_parent_finished<
R: AsyncRead + Unpin + ?Sized,
>(
&self,
piece_id: &str,
task_id: &str,
offset: u64,
length: u64,
expected_digest: &str,
parent_id: &str,
reader: &mut R,
) -> Result<metadata::Piece> {
let response = self
.content
.write_persistent_cache_piece(task_id, offset, length, reader)
.await?;
let length = response.length;
let digest = Digest::new(Algorithm::Crc32, response.hash);
// Check the digest of the piece.
if expected_digest != digest.to_string() {
return Err(Error::DigestMismatch(
expected_digest.to_string(),
digest.to_string(),
));
}
self.metadata.download_piece_finished(
piece_id,
offset,
length,
digest.to_string().as_str(),
Some(parent_id.to_string()),
)
}
/// download_persistent_cache_piece_failed updates the metadata of the persistent cache piece when the persistent cache piece downloads failed.
#[instrument(skip_all)]
pub fn download_persistent_cache_piece_failed(&self, piece_id: &str) -> Result<()> {
self.metadata.download_piece_failed(piece_id)
}
/// upload_persistent_cache_piece updates the metadata of the piece and_then
/// returns the data of the piece.
#[instrument(skip_all)]
pub async fn upload_persistent_cache_piece(
&self,
piece_id: &str,
task_id: &str,
range: Option<Range>,
) -> Result<impl AsyncRead> {
// Wait for the persistent cache piece to be finished.
self.wait_for_persistent_cache_piece_finished(piece_id)
.await?;
// Start uploading the persistent cache task.
self.metadata
.upload_persistent_cache_task_started(task_id)?;
// Get the persistent cache piece metadata and return the content of the persistent cache piece.
match self.metadata.get_piece(piece_id) {
Ok(Some(piece)) => {
match self
.content
.read_persistent_cache_piece(task_id, piece.offset, piece.length, range)
.await
{
Ok(reader) => {
// Finish uploading the persistent cache task.
self.metadata
.upload_persistent_cache_task_finished(task_id)?;
Ok(reader)
}
Err(err) => {
// Failed uploading the persistent cache task.
self.metadata.upload_persistent_cache_task_failed(task_id)?;
Err(err)
}
}
}
Ok(None) => {
// Failed uploading the persistent cache task.
self.metadata.upload_persistent_cache_task_failed(task_id)?;
Err(Error::PieceNotFound(piece_id.to_string()))
}
Err(err) => {
// Failed uploading the persistent cache task.
self.metadata.upload_persistent_cache_task_failed(task_id)?;
Err(err)
}
}
}
/// get_persistent_cache_piece returns the persistent cache piece metadata.
#[instrument(skip_all)]
pub fn get_persistent_cache_piece(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
self.metadata.get_piece(piece_id)
}
/// is_persistent_cache_piece_exists returns whether the persistent cache piece exists.
#[instrument(skip_all)]
pub fn is_persistent_cache_piece_exists(&self, piece_id: &str) -> Result<bool> {
self.metadata.is_piece_exists(piece_id)
}
/// get_persistent_cache_pieces returns the persistent cache piece metadatas.
pub fn get_persistent_cache_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
self.metadata.get_pieces(task_id)
}
/// persistent_cache_piece_id returns the persistent cache piece id.
#[inline]
pub fn persistent_cache_piece_id(&self, task_id: &str, number: u32) -> String {
self.metadata.piece_id(task_id, number)
}
/// wait_for_piece_finished waits for the piece to be finished.
#[instrument(skip_all)]
async fn wait_for_piece_finished(&self, piece_id: &str) -> Result<metadata::Piece> {
// Total timeout for downloading a piece, combining the download time and the time to write to storage.
let wait_timeout = tokio::time::sleep(
self.config.download.piece_timeout + self.config.storage.write_piece_timeout,
);
tokio::pin!(wait_timeout);
async fn wait_for_piece_finished(&self, task_id: &str, number: u32) -> Result<metadata::Piece> {
// Initialize the timeout of piece.
let piece_timeout = tokio::time::sleep(self.config.download.piece_timeout);
tokio::pin!(piece_timeout);
// Initialize the interval of piece.
let mut wait_for_piece_count = 0;
let mut interval = tokio::time::interval(DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL);
loop {
tokio::select! {
_ = interval.tick() => {
let piece = self
.get_piece(piece_id)?
.ok_or_else(|| Error::PieceNotFound(piece_id.to_string()))?;
.get_piece(task_id, number)?
.ok_or_else(|| Error::PieceNotFound(self.piece_id(task_id, number)))?;
// If the piece is finished, return.
if piece.is_finished() {
debug!("wait piece finished success");
info!("wait piece finished success");
return Ok(piece);
}
}
_ = &mut wait_timeout => {
self.metadata.wait_for_piece_finished_failed(piece_id).unwrap_or_else(|err| error!("delete piece metadata failed: {}", err));
return Err(Error::WaitForPieceFinishedTimeout(piece_id.to_string()));
}
}
}
}
/// wait_for_persistent_cache_piece_finished waits for the persistent cache piece to be finished.
#[instrument(skip_all)]
async fn wait_for_persistent_cache_piece_finished(
&self,
piece_id: &str,
) -> Result<metadata::Piece> {
// Total timeout for downloading a piece, combining the download time and the time to write to storage.
let wait_timeout = tokio::time::sleep(
self.config.download.piece_timeout + self.config.storage.write_piece_timeout,
);
tokio::pin!(wait_timeout);
let mut interval = tokio::time::interval(DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL);
loop {
tokio::select! {
_ = interval.tick() => {
let piece = self
.get_persistent_cache_piece(piece_id)?
.ok_or_else(|| Error::PieceNotFound(piece_id.to_string()))?;
// If the piece is finished, return.
if piece.is_finished() {
debug!("wait piece finished success");
return Ok(piece);
if wait_for_piece_count > 0 {
info!("wait piece finished");
}
wait_for_piece_count += 1;
}
_ = &mut wait_timeout => {
self.metadata.wait_for_piece_finished_failed(piece_id).unwrap_or_else(|err| error!("delete piece metadata failed: {}", err));
return Err(Error::WaitForPieceFinishedTimeout(piece_id.to_string()));
_ = &mut piece_timeout => {
self.metadata.wait_for_piece_finished_failed(task_id, number).unwrap_or_else(|err| error!("delete piece metadata failed: {}", err));
return Err(Error::WaitForPieceFinishedTimeout(self.piece_id(task_id, number)));
}
}
}

View File

@ -17,7 +17,8 @@
use chrono::{NaiveDateTime, Utc};
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::{Error, Result};
use dragonfly_client_util::{digest, http::headermap_to_hashmap};
use dragonfly_client_util::http::reqwest_headermap_to_hashmap;
use rayon::prelude::*;
use reqwest::header::HeaderMap;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@ -45,7 +46,7 @@ pub struct Task {
pub response_header: HashMap<String, String>,
/// uploading_count is the count of the task being uploaded by other peers.
pub uploading_count: i64,
pub uploading_count: u64,
/// uploaded_count is the count of the task has been uploaded by other peers.
pub uploaded_count: u64,
@ -107,12 +108,15 @@ impl Task {
/// is_empty returns whether the task is empty.
pub fn is_empty(&self) -> bool {
match self.content_length() {
Some(content_length) => content_length == 0,
None => false,
if let Some(content_length) = self.content_length() {
if content_length == 0 {
return true;
}
}
false
}
/// piece_length returns the piece length of the task.
pub fn piece_length(&self) -> Option<u64> {
self.piece_length
@ -138,6 +142,9 @@ pub struct PersistentCacheTask {
/// ttl is the time to live of the persistent cache task.
pub ttl: Duration,
/// digests is the digests of the persistent cache task.
pub digest: String,
/// piece_length is the length of the piece.
pub piece_length: u64,
@ -184,7 +191,10 @@ impl PersistentCacheTask {
/// is_expired returns whether the persistent cache task is expired.
pub fn is_expired(&self) -> bool {
self.created_at + self.ttl < Utc::now().naive_utc()
// When scheduler runs garbage collection, it will trigger dfdaemon to evict the persistent cache task.
// But sometimes the dfdaemon may not evict the persistent cache task in time, so we select the ttl * 1.2
// as the expired time to force evict the persistent cache task.
self.created_at + self.ttl * 2 < Utc::now().naive_utc()
}
/// is_failed returns whether the persistent cache task downloads failed.
@ -199,7 +209,11 @@ impl PersistentCacheTask {
/// is_empty returns whether the persistent cache task is empty.
pub fn is_empty(&self) -> bool {
self.content_length == 0
if self.content_length == 0 {
return true;
}
false
}
/// is_persistent returns whether the persistent cache task is persistent.
@ -236,13 +250,14 @@ pub struct Piece {
/// parent_id is the parent id of the piece.
pub parent_id: Option<String>,
/// DEPRECATED: uploading_count is the count of the piece being uploaded by other peers.
pub uploading_count: i64,
/// uploading_count is the count of the piece being uploaded by other peers.
pub uploading_count: u64,
/// DEPRECATED: uploaded_count is the count of the piece has been uploaded by other peers.
/// uploaded_count is the count of the piece has been uploaded by other peers.
pub uploaded_count: u64,
/// updated_at is the time when the piece metadata is updated.
/// updated_at is the time when the piece metadata is updated. If the piece is downloaded
/// by other peers, it will also update updated_at.
pub updated_at: NaiveDateTime,
/// created_at is the time when the piece metadata is created.
@ -300,20 +315,6 @@ impl Piece {
None => None,
}
}
/// calculate_digest return the digest of the piece metadata, including the piece number,
/// offset, length and content digest. The digest is used to check the integrity of the
/// piece metadata.
pub fn calculate_digest(&self) -> String {
let mut hasher = crc32fast::Hasher::new();
hasher.update(&self.number.to_be_bytes());
hasher.update(&self.offset.to_be_bytes());
hasher.update(&self.length.to_be_bytes());
hasher.update(self.digest.as_bytes());
let encoded = hasher.finalize().to_string();
digest::Digest::new(digest::Algorithm::Crc32, encoded).to_string()
}
}
/// Metadata manages the metadata of [Task], [Piece] and [PersistentCacheTask].
@ -338,7 +339,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
// Convert the response header to hashmap.
let response_header = response_header
.as_ref()
.map(headermap_to_hashmap)
.map(reqwest_headermap_to_hashmap)
.unwrap_or_default();
let task = match self.db.get::<Task>(id.as_bytes())? {
@ -507,12 +508,6 @@ impl<E: StorageEngineOwned> Metadata<E> {
self.db.get(id.as_bytes())
}
/// is_task_exists checks if the task exists.
#[instrument(skip_all)]
pub fn is_task_exists(&self, id: &str) -> Result<bool> {
self.db.is_exist::<Task>(id.as_bytes())
}
/// get_tasks gets the task metadatas.
#[instrument(skip_all)]
pub fn get_tasks(&self) -> Result<Vec<Task>> {
@ -526,7 +521,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
.collect::<Result<Vec<Box<[u8]>>>>()?;
tasks
.iter()
.par_iter()
.map(|task| Task::deserialize_from(task))
.collect()
}
@ -538,14 +533,17 @@ impl<E: StorageEngineOwned> Metadata<E> {
self.db.delete::<Task>(id.as_bytes())
}
/// create_persistent_cache_task creates a new persistent cache task.
/// create_persistent_persistent_cache_task creates a new persistent cache task.
/// If the persistent cache task imports the content to the dfdaemon finished,
/// the dfdaemon will create a persistent cache task metadata.
#[instrument(skip_all)]
pub fn create_persistent_cache_task_started(
pub fn create_persistent_persistent_cache_task(
&self,
id: &str,
ttl: Duration,
piece_length: u64,
content_length: u64,
digest: &str,
) -> Result<PersistentCacheTask> {
let task = PersistentCacheTask {
id: id.to_string(),
@ -553,8 +551,10 @@ impl<E: StorageEngineOwned> Metadata<E> {
ttl,
piece_length,
content_length,
digest: digest.to_string(),
updated_at: Utc::now().naive_utc(),
created_at: Utc::now().naive_utc(),
finished_at: Some(Utc::now().naive_utc()),
..Default::default()
};
@ -562,29 +562,6 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task)
}
/// create_persistent_cache_task_finished updates the metadata of the persistent cache task
/// when the persistent cache task finished.
#[instrument(skip_all)]
pub fn create_persistent_cache_task_finished(&self, id: &str) -> Result<PersistentCacheTask> {
let task = match self.db.get::<PersistentCacheTask>(id.as_bytes())? {
Some(mut task) => {
task.updated_at = Utc::now().naive_utc();
task.failed_at = None;
// If the persistent cache task is created by user, the finished_at has been set.
if task.finished_at.is_none() {
task.finished_at = Some(Utc::now().naive_utc());
}
task
}
None => return Err(Error::TaskNotFound(id.to_string())),
};
self.db.put(id.as_bytes(), &task)?;
Ok(task)
}
/// download_persistent_cache_task_started updates the metadata of the persistent cache task when
/// the persistent cache task downloads started. If the persistent cache task downloaded by scheduler
/// to create persistent cache task, the persistent should be set to true.
@ -596,14 +573,10 @@ impl<E: StorageEngineOwned> Metadata<E> {
persistent: bool,
piece_length: u64,
content_length: u64,
created_at: NaiveDateTime,
) -> Result<PersistentCacheTask> {
let task = match self.db.get::<PersistentCacheTask>(id.as_bytes())? {
Some(mut task) => {
// If the task exists, update the task metadata.
task.ttl = ttl;
task.persistent = persistent;
task.piece_length = piece_length;
task.updated_at = Utc::now().naive_utc();
task.failed_at = None;
task
@ -615,7 +588,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
piece_length,
content_length,
updated_at: Utc::now().naive_utc(),
created_at,
created_at: Utc::now().naive_utc(),
..Default::default()
},
};
@ -711,34 +684,12 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task)
}
/// persist_persistent_cache_task persists the persistent cache task metadata.
#[instrument(skip_all)]
pub fn persist_persistent_cache_task(&self, id: &str) -> Result<PersistentCacheTask> {
let task = match self.db.get::<PersistentCacheTask>(id.as_bytes())? {
Some(mut task) => {
task.persistent = true;
task.updated_at = Utc::now().naive_utc();
task
}
None => return Err(Error::TaskNotFound(id.to_string())),
};
self.db.put(id.as_bytes(), &task)?;
Ok(task)
}
/// get_persistent_cache_task gets the persistent cache task metadata.
#[instrument(skip_all)]
pub fn get_persistent_cache_task(&self, id: &str) -> Result<Option<PersistentCacheTask>> {
self.db.get(id.as_bytes())
}
/// is_persistent_cache_task_exists checks if the persistent cache task exists.
#[instrument(skip_all)]
pub fn is_persistent_cache_task_exists(&self, id: &str) -> Result<bool> {
self.db.is_exist::<PersistentCacheTask>(id.as_bytes())
}
/// get_persistent_cache_tasks gets the persistent cache task metadatas.
#[instrument(skip_all)]
pub fn get_persistent_cache_tasks(&self) -> Result<Vec<PersistentCacheTask>> {
@ -753,40 +704,9 @@ impl<E: StorageEngineOwned> Metadata<E> {
self.db.delete::<PersistentCacheTask>(id.as_bytes())
}
/// create_persistent_cache_piece creates a new persistent cache piece, which is imported by
/// local.
#[instrument(skip_all)]
pub fn create_persistent_cache_piece(
&self,
piece_id: &str,
number: u32,
offset: u64,
length: u64,
digest: &str,
) -> Result<Piece> {
// Construct the piece metadata.
let piece = Piece {
number,
offset,
length,
digest: digest.to_string(),
// Persistent cache piece does not have parent id, because the piece content is
// imported by local.
parent_id: None,
updated_at: Utc::now().naive_utc(),
created_at: Utc::now().naive_utc(),
finished_at: Some(Utc::now().naive_utc()),
..Default::default()
};
// Put the piece metadata.
self.db.put(piece_id.as_bytes(), &piece)?;
Ok(piece)
}
/// download_piece_started updates the metadata of the piece when the piece downloads started.
#[instrument(skip_all)]
pub fn download_piece_started(&self, piece_id: &str, number: u32) -> Result<Piece> {
pub fn download_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> {
// Construct the piece metadata.
let piece = Piece {
number,
@ -796,7 +716,8 @@ impl<E: StorageEngineOwned> Metadata<E> {
};
// Put the piece metadata.
self.db.put(piece_id.as_bytes(), &piece)?;
self.db
.put(self.piece_id(task_id, number).as_bytes(), &piece)?;
Ok(piece)
}
@ -804,13 +725,16 @@ impl<E: StorageEngineOwned> Metadata<E> {
#[instrument(skip_all)]
pub fn download_piece_finished(
&self,
piece_id: &str,
task_id: &str,
number: u32,
offset: u64,
length: u64,
digest: &str,
parent_id: Option<String>,
) -> Result<Piece> {
let piece = match self.db.get::<Piece>(piece_id.as_bytes())? {
// Get the piece id.
let id = self.piece_id(task_id, number);
let piece = match self.db.get::<Piece>(id.as_bytes())? {
Some(mut piece) => {
piece.offset = offset;
piece.length = length;
@ -820,38 +744,87 @@ impl<E: StorageEngineOwned> Metadata<E> {
piece.finished_at = Some(Utc::now().naive_utc());
piece
}
None => return Err(Error::PieceNotFound(piece_id.to_string())),
None => return Err(Error::PieceNotFound(id)),
};
self.db.put(piece_id.as_bytes(), &piece)?;
self.db.put(id.as_bytes(), &piece)?;
Ok(piece)
}
/// download_piece_failed updates the metadata of the piece when the piece downloads failed.
#[instrument(skip_all)]
pub fn download_piece_failed(&self, piece_id: &str) -> Result<()> {
self.delete_piece(piece_id)
pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> {
self.delete_piece(task_id, number)
}
/// wait_for_piece_finished_failed waits for the piece to be finished or failed.
#[instrument(skip_all)]
pub fn wait_for_piece_finished_failed(&self, piece_id: &str) -> Result<()> {
self.delete_piece(piece_id)
pub fn wait_for_piece_finished_failed(&self, task_id: &str, number: u32) -> Result<()> {
self.delete_piece(task_id, number)
}
/// upload_piece_started updates the metadata of the piece when piece uploads started.
#[instrument(skip_all)]
pub fn upload_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> {
// Get the piece id.
let id = self.piece_id(task_id, number);
let piece = match self.db.get::<Piece>(id.as_bytes())? {
Some(mut piece) => {
piece.uploading_count += 1;
piece.updated_at = Utc::now().naive_utc();
piece
}
None => return Err(Error::PieceNotFound(id)),
};
self.db.put(id.as_bytes(), &piece)?;
Ok(piece)
}
/// upload_piece_finished updates the metadata of the piece when piece uploads finished.
#[instrument(skip_all)]
pub fn upload_piece_finished(&self, task_id: &str, number: u32) -> Result<Piece> {
// Get the piece id.
let id = self.piece_id(task_id, number);
let piece = match self.db.get::<Piece>(id.as_bytes())? {
Some(mut piece) => {
piece.uploading_count -= 1;
piece.uploaded_count += 1;
piece.updated_at = Utc::now().naive_utc();
piece
}
None => return Err(Error::PieceNotFound(id)),
};
self.db.put(id.as_bytes(), &piece)?;
Ok(piece)
}
/// upload_piece_failed updates the metadata of the piece when the piece uploads failed.
#[instrument(skip_all)]
pub fn upload_piece_failed(&self, task_id: &str, number: u32) -> Result<Piece> {
// Get the piece id.
let id = self.piece_id(task_id, number);
let piece = match self.db.get::<Piece>(id.as_bytes())? {
Some(mut piece) => {
piece.uploading_count -= 1;
piece.updated_at = Utc::now().naive_utc();
piece
}
None => return Err(Error::PieceNotFound(id)),
};
self.db.put(id.as_bytes(), &piece)?;
Ok(piece)
}
/// get_piece gets the piece metadata.
pub fn get_piece(&self, piece_id: &str) -> Result<Option<Piece>> {
self.db.get(piece_id.as_bytes())
}
/// is_piece_exists checks if the piece exists.
#[instrument(skip_all)]
pub fn is_piece_exists(&self, piece_id: &str) -> Result<bool> {
self.db.is_exist::<Piece>(piece_id.as_bytes())
pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<Piece>> {
self.db.get(self.piece_id(task_id, number).as_bytes())
}
/// get_pieces gets the piece metadatas.
#[instrument(skip_all)]
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<Piece>> {
let pieces = self
.db
@ -863,16 +836,17 @@ impl<E: StorageEngineOwned> Metadata<E> {
.collect::<Result<Vec<Box<[u8]>>>>()?;
pieces
.iter()
.par_iter()
.map(|piece| Piece::deserialize_from(piece))
.collect()
}
/// delete_piece deletes the piece metadata.
#[instrument(skip_all)]
pub fn delete_piece(&self, piece_id: &str) -> Result<()> {
info!("delete piece metadata {}", piece_id);
self.db.delete::<Piece>(piece_id.as_bytes())
pub fn delete_piece(&self, task_id: &str, number: u32) -> Result<()> {
info!("delete piece metadata {}", self.piece_id(task_id, number));
self.db
.delete::<Piece>(self.piece_id(task_id, number).as_bytes())
}
/// delete_pieces deletes the piece metadatas.
@ -888,7 +862,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
.collect::<Result<Vec<Box<[u8]>>>>()?;
let piece_ids_refs = piece_ids
.iter()
.par_iter()
.map(|id| {
let id_ref = id.as_ref();
info!(
@ -905,7 +879,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
}
/// piece_id returns the piece id.
#[inline]
#[instrument(skip_all)]
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
format!("{}-{}", task_id, number)
}
@ -938,25 +912,11 @@ impl Metadata<RocksdbStorageEngine> {
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[test]
fn test_calculate_digest() {
let piece = Piece {
number: 1,
offset: 0,
length: 1024,
digest: "crc32:1929153120".to_string(),
..Default::default()
};
let digest = piece.calculate_digest();
assert_eq!(digest, "crc32:3299754941");
}
use tempdir::TempDir;
#[test]
fn should_create_metadata() {
let dir = tempdir().unwrap();
let dir = TempDir::new("metadata").unwrap();
let log_dir = dir.path().join("log");
let metadata = Metadata::new(Arc::new(Config::default()), dir.path(), &log_dir).unwrap();
assert!(metadata.get_tasks().unwrap().is_empty());
@ -968,7 +928,7 @@ mod tests {
#[test]
fn test_task_lifecycle() {
let dir = tempdir().unwrap();
let dir = TempDir::new("metadata").unwrap();
let log_dir = dir.path().join("log");
let metadata = Metadata::new(Arc::new(Config::default()), dir.path(), &log_dir).unwrap();
let task_id = "d3c4e940ad06c47fc36ac67801e6f8e36cb400e2391708620bc7e865b102062c";
@ -997,20 +957,20 @@ mod tests {
// Test upload_task_started.
metadata.upload_task_started(task_id).unwrap();
let task = metadata.get_task(task_id).unwrap().unwrap();
assert_eq!(task.uploading_count, 1);
assert_eq!(task.uploading_count, 1,);
// Test upload_task_finished.
metadata.upload_task_finished(task_id).unwrap();
let task = metadata.get_task(task_id).unwrap().unwrap();
assert_eq!(task.uploading_count, 0);
assert_eq!(task.uploaded_count, 1);
assert_eq!(task.uploading_count, 0,);
assert_eq!(task.uploaded_count, 1,);
// Test upload_task_failed.
let task = metadata.upload_task_started(task_id).unwrap();
assert_eq!(task.uploading_count, 1);
let task = metadata.upload_task_failed(task_id).unwrap();
assert_eq!(task.uploading_count, 0);
assert_eq!(task.uploaded_count, 1);
assert_eq!(task.uploading_count, 0,);
assert_eq!(task.uploaded_count, 1,);
// Test get_tasks.
let task_id = "a535b115f18d96870f0422ac891f91dd162f2f391e4778fb84279701fcd02dd1";
@ -1028,49 +988,54 @@ mod tests {
#[test]
fn test_piece_lifecycle() {
let dir = tempdir().unwrap();
let dir = TempDir::new("metadata").unwrap();
let log_dir = dir.path().join("log");
let metadata = Metadata::new(Arc::new(Config::default()), dir.path(), &log_dir).unwrap();
let task_id = "d3c4e940ad06c47fc36ac67801e6f8e36cb400e2391708620bc7e865b102062c";
let piece_id = metadata.piece_id(task_id, 1);
// Test download_piece_started.
metadata
.download_piece_started(piece_id.as_str(), 1)
.unwrap();
let piece = metadata.get_piece(piece_id.as_str()).unwrap().unwrap();
assert_eq!(piece.number, 1);
metadata.download_piece_started(task_id, 1).unwrap();
let piece = metadata.get_piece(task_id, 1).unwrap().unwrap();
assert_eq!(piece.number, 1,);
// Test download_piece_finished.
metadata
.download_piece_finished(piece_id.as_str(), 0, 1024, "digest1", None)
.download_piece_finished(task_id, 1, 0, 1024, "digest1", None)
.unwrap();
let piece = metadata.get_piece(piece_id.as_str()).unwrap().unwrap();
assert_eq!(piece.length, 1024);
assert_eq!(piece.digest, "digest1");
let piece = metadata.get_piece(task_id, 1).unwrap().unwrap();
assert_eq!(piece.length, 1024,);
assert_eq!(piece.digest, "digest1",);
// Test get_pieces.
metadata
.download_piece_started(metadata.piece_id(task_id, 2).as_str(), 2)
.unwrap();
metadata
.download_piece_started(metadata.piece_id(task_id, 3).as_str(), 3)
.unwrap();
metadata.download_piece_started(task_id, 2).unwrap();
metadata.download_piece_started(task_id, 3).unwrap();
let pieces = metadata.get_pieces(task_id).unwrap();
assert_eq!(pieces.len(), 3);
assert_eq!(pieces.len(), 3, "should get 3 pieces in total");
// Test download_piece_failed.
let piece_id = metadata.piece_id(task_id, 2);
metadata
.download_piece_started(piece_id.as_str(), 2)
.unwrap();
metadata
.download_piece_started(metadata.piece_id(task_id, 3).as_str(), 3)
.unwrap();
metadata.download_piece_failed(piece_id.as_str()).unwrap();
let piece = metadata.get_piece(piece_id.as_str()).unwrap();
metadata.download_piece_started(task_id, 2).unwrap();
metadata.download_piece_started(task_id, 3).unwrap();
metadata.download_piece_failed(task_id, 2).unwrap();
let piece = metadata.get_piece(task_id, 2).unwrap();
assert!(piece.is_none());
// Test upload_piece_started.
metadata.upload_piece_started(task_id, 3).unwrap();
let piece = metadata.get_piece(task_id, 3).unwrap().unwrap();
assert_eq!(piece.uploading_count, 1,);
// Test upload_piece_finished.
metadata.upload_piece_finished(task_id, 3).unwrap();
let piece = metadata.get_piece(task_id, 3).unwrap().unwrap();
assert_eq!(piece.uploading_count, 0,);
assert_eq!(piece.uploaded_count, 1,);
// Test upload_piece_failed.
metadata.upload_piece_started(task_id, 3).unwrap();
metadata.upload_piece_failed(task_id, 3).unwrap();
let piece = metadata.get_piece(task_id, 3).unwrap().unwrap();
assert_eq!(piece.uploading_count, 0,);
// Test delete_pieces.
metadata.delete_pieces(task_id).unwrap();
let pieces = metadata.get_pieces(task_id).unwrap();

View File

@ -52,9 +52,6 @@ pub trait Operations {
/// get gets the object by key.
fn get<O: DatabaseObject>(&self, key: &[u8]) -> Result<Option<O>>;
/// is_exist checks if the object exists by key.
fn is_exist<O: DatabaseObject>(&self, key: &[u8]) -> Result<bool>;
/// put puts the object by key.
fn put<O: DatabaseObject>(&self, key: &[u8], value: &O) -> Result<()>;
@ -65,7 +62,6 @@ pub trait Operations {
fn iter<O: DatabaseObject>(&self) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>>;
/// iter_raw iterates all objects without serialization.
#[allow(clippy::type_complexity)]
fn iter_raw<O: DatabaseObject>(
&self,
) -> Result<impl Iterator<Item = Result<(Box<[u8]>, Box<[u8]>)>>>;
@ -77,7 +73,6 @@ pub trait Operations {
) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>>;
/// prefix_iter_raw iterates all objects with prefix without serialization.
#[allow(clippy::type_complexity)]
fn prefix_iter_raw<O: DatabaseObject>(
&self,
prefix: &[u8],

View File

@ -19,12 +19,12 @@ use dragonfly_client_core::{
error::{ErrorType, OrErr},
Error, Result,
};
use rocksdb::WriteOptions;
use rocksdb::{ReadOptions, WriteOptions};
use std::{
ops::Deref,
path::{Path, PathBuf},
};
use tracing::{info, warn};
use tracing::{info, instrument, warn};
/// RocksdbStorageEngine is a storage engine based on rocksdb.
pub struct RocksdbStorageEngine {
@ -54,11 +54,11 @@ impl RocksdbStorageEngine {
// DEFAULT_MAX_BACKGROUND_JOBS is the default max background jobs for rocksdb, default is 2.
const DEFAULT_MAX_BACKGROUND_JOBS: i32 = 2;
/// DEFAULT_BLOCK_SIZE is the default block size for rocksdb, default is 64KB.
const DEFAULT_BLOCK_SIZE: usize = 64 * 1024;
/// DEFAULT_BLOCK_SIZE is the default block size for rocksdb, default is 128KB.
const DEFAULT_BLOCK_SIZE: usize = 128 * 1024;
/// DEFAULT_CACHE_SIZE is the default cache size for rocksdb, default is 1GB.
const DEFAULT_CACHE_SIZE: usize = 1024 * 1024 * 1024;
/// DEFAULT_CACHE_SIZE is the default cache size for rocksdb, default is 512MB.
const DEFAULT_CACHE_SIZE: usize = 512 * 1024 * 1024;
/// DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB.
const DEFAULT_LOG_MAX_SIZE: usize = 64 * 1024 * 1024;
@ -67,27 +67,23 @@ impl RocksdbStorageEngine {
const DEFAULT_LOG_MAX_FILES: usize = 10;
/// open opens a rocksdb storage engine with the given directory and column families.
#[instrument(skip_all)]
pub fn open(dir: &Path, log_dir: &PathBuf, cf_names: &[&str], keep: bool) -> Result<Self> {
info!("initializing metadata directory: {:?} {:?}", dir, cf_names);
// Initialize rocksdb options.
let mut options = rocksdb::Options::default();
options.create_if_missing(true);
options.create_missing_column_families(true);
// Optimize compression.
options.set_compression_type(rocksdb::DBCompressionType::Lz4);
options.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
// Improved parallelism.
options.increase_parallelism(num_cpus::get() as i32);
options.set_compression_type(rocksdb::DBCompressionType::Lz4);
options.set_max_background_jobs(std::cmp::max(
num_cpus::get() as i32,
num_cpus::get() as i32 / 2,
Self::DEFAULT_MAX_BACKGROUND_JOBS,
));
// Set rocksdb log options.
options.set_db_log_dir(log_dir);
options.set_log_level(rocksdb::LogLevel::Info);
options.set_log_level(rocksdb::LogLevel::Debug);
options.set_max_log_file_size(Self::DEFAULT_LOG_MAX_SIZE);
options.set_keep_log_file_num(Self::DEFAULT_LOG_MAX_FILES);
@ -95,14 +91,12 @@ impl RocksdbStorageEngine {
let mut block_options = rocksdb::BlockBasedOptions::default();
block_options.set_block_cache(&rocksdb::Cache::new_lru_cache(Self::DEFAULT_CACHE_SIZE));
block_options.set_block_size(Self::DEFAULT_BLOCK_SIZE);
block_options.set_cache_index_and_filter_blocks(true);
block_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
options.set_block_based_table_factory(&block_options);
// Initialize column family options.
let mut cf_options = rocksdb::Options::default();
cf_options.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(64));
cf_options.set_memtable_prefix_bloom_ratio(0.25);
cf_options.set_memtable_prefix_bloom_ratio(0.2);
cf_options.optimize_level_style_compaction(Self::DEFAULT_MEMTABLE_MEMORY_BUDGET);
// Initialize column families.
@ -116,7 +110,9 @@ impl RocksdbStorageEngine {
// If the storage is kept, open the db and drop the unused column families.
// Otherwise, destroy the db.
if !keep {
if keep {
drop_unused_cfs(&dir);
} else {
rocksdb::DB::destroy(&options, &dir).unwrap_or_else(|err| {
warn!("destroy {:?} failed: {}", dir, err);
});
@ -134,33 +130,36 @@ impl RocksdbStorageEngine {
/// RocksdbStorageEngine implements the storage engine operations.
impl Operations for RocksdbStorageEngine {
/// get gets the object by key.
#[instrument(skip_all)]
fn get<O: DatabaseObject>(&self, key: &[u8]) -> Result<Option<O>> {
let cf = cf_handle::<O>(self)?;
let value = self.get_cf(cf, key).or_err(ErrorType::StorageError)?;
let mut options = ReadOptions::default();
options.fill_cache(false);
let value = self
.get_cf_opt(cf, key, &options)
.or_err(ErrorType::StorageError)?;
match value {
Some(value) => Ok(Some(O::deserialize_from(&value)?)),
None => Ok(None),
}
}
/// is_exist checks if the object exists by key.
fn is_exist<O: DatabaseObject>(&self, key: &[u8]) -> Result<bool> {
let cf = cf_handle::<O>(self)?;
Ok(self
.get_cf(cf, key)
.or_err(ErrorType::StorageError)?
.is_some())
}
/// put puts the object by key.
#[instrument(skip_all)]
fn put<O: DatabaseObject>(&self, key: &[u8], value: &O) -> Result<()> {
let cf = cf_handle::<O>(self)?;
self.put_cf(cf, key, value.serialized()?)
let serialized = value.serialized()?;
let mut options = WriteOptions::default();
options.set_sync(true);
self.put_cf_opt(cf, key, serialized, &options)
.or_err(ErrorType::StorageError)?;
Ok(())
}
/// delete deletes the object by key.
#[instrument(skip_all)]
fn delete<O: DatabaseObject>(&self, key: &[u8]) -> Result<()> {
let cf = cf_handle::<O>(self)?;
let mut options = WriteOptions::default();
@ -172,6 +171,7 @@ impl Operations for RocksdbStorageEngine {
}
/// iter iterates all objects.
#[instrument(skip_all)]
fn iter<O: DatabaseObject>(&self) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>> {
let cf = cf_handle::<O>(self)?;
let iter = self.iterator_cf(cf, rocksdb::IteratorMode::Start);
@ -182,6 +182,7 @@ impl Operations for RocksdbStorageEngine {
}
/// iter_raw iterates all objects without serialization.
#[instrument(skip_all)]
fn iter_raw<O: DatabaseObject>(
&self,
) -> Result<impl Iterator<Item = Result<(Box<[u8]>, Box<[u8]>)>>> {
@ -195,6 +196,7 @@ impl Operations for RocksdbStorageEngine {
}
/// prefix_iter iterates all objects with prefix.
#[instrument(skip_all)]
fn prefix_iter<O: DatabaseObject>(
&self,
prefix: &[u8],
@ -208,6 +210,7 @@ impl Operations for RocksdbStorageEngine {
}
/// prefix_iter_raw iterates all objects with prefix without serialization.
#[instrument(skip_all)]
fn prefix_iter_raw<O: DatabaseObject>(
&self,
prefix: &[u8],
@ -220,6 +223,7 @@ impl Operations for RocksdbStorageEngine {
}
/// batch_delete deletes objects by keys.
#[instrument(skip_all)]
fn batch_delete<O: DatabaseObject>(&self, keys: Vec<&[u8]>) -> Result<()> {
let cf = cf_handle::<O>(self)?;
let mut batch = rocksdb::WriteBatch::default();
@ -236,7 +240,7 @@ impl Operations for RocksdbStorageEngine {
}
/// RocksdbStorageEngine implements the rocksdb of the storage engine.
impl StorageEngine<'_> for RocksdbStorageEngine {}
impl<'db> StorageEngine<'db> for RocksdbStorageEngine {}
/// cf_handle returns the column family handle for the given object.
fn cf_handle<T>(db: &rocksdb::DB) -> Result<&rocksdb::ColumnFamily>
@ -248,398 +252,23 @@ where
.ok_or_else(|| Error::ColumnFamilyNotFound(cf_name.to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
use tempfile::tempdir;
/// drop_unused_cfs drops the unused column families.
fn drop_unused_cfs(dir: &Path) {
let old_cf_names = vec!["task", "piece", "cache_task"];
let unused_cf_names = vec!["cache_task"];
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
struct Object {
id: String,
value: i32,
let mut db = match rocksdb::DB::open_cf(&rocksdb::Options::default(), dir, old_cf_names) {
Ok(db) => db,
Err(err) => {
warn!("open cf failed: {}", err);
return;
}
impl DatabaseObject for Object {
const NAMESPACE: &'static str = "object";
}
fn create_test_engine() -> RocksdbStorageEngine {
let temp_dir = tempdir().unwrap();
let log_dir = temp_dir.path().to_path_buf();
RocksdbStorageEngine::open(temp_dir.path(), &log_dir, &[Object::NAMESPACE], false).unwrap()
}
#[test]
fn test_put_and_get() {
let engine = create_test_engine();
let object = Object {
id: "1".to_string(),
value: 42,
};
engine.put::<Object>(object.id.as_bytes(), &object).unwrap();
let retrieved_object = engine.get::<Object>(object.id.as_bytes()).unwrap().unwrap();
assert_eq!(object, retrieved_object);
}
#[test]
fn test_is_exist() {
let engine = create_test_engine();
let object = Object {
id: "2".to_string(),
value: 100,
};
assert!(!engine.is_exist::<Object>(object.id.as_bytes()).unwrap());
engine.put::<Object>(object.id.as_bytes(), &object).unwrap();
assert!(engine.is_exist::<Object>(object.id.as_bytes()).unwrap());
}
#[test]
fn test_delete() {
let engine = create_test_engine();
let object = Object {
id: "3".to_string(),
value: 200,
};
engine.put::<Object>(object.id.as_bytes(), &object).unwrap();
assert!(engine.is_exist::<Object>(object.id.as_bytes()).unwrap());
engine.delete::<Object>(object.id.as_bytes()).unwrap();
assert!(!engine.is_exist::<Object>(object.id.as_bytes()).unwrap());
}
#[test]
fn test_batch_delete() {
let engine = create_test_engine();
let objects = vec![
Object {
id: "1".to_string(),
value: 1,
},
Object {
id: "2".to_string(),
value: 2,
},
Object {
id: "3".to_string(),
value: 3,
},
];
for object in &objects {
engine.put::<Object>(object.id.as_bytes(), object).unwrap();
assert!(engine.is_exist::<Object>(object.id.as_bytes()).unwrap());
}
let ids: Vec<&[u8]> = objects.iter().map(|object| object.id.as_bytes()).collect();
engine.batch_delete::<Object>(ids).unwrap();
for object in &objects {
assert!(!engine.is_exist::<Object>(object.id.as_bytes()).unwrap());
}
}
#[test]
fn test_iter() {
let engine = create_test_engine();
let objects = vec![
Object {
id: "1".to_string(),
value: 10,
},
Object {
id: "2".to_string(),
value: 20,
},
Object {
id: "3".to_string(),
value: 30,
},
];
for object in &objects {
engine.put::<Object>(object.id.as_bytes(), object).unwrap();
}
let retrieved_objects = engine
.iter::<Object>()
.unwrap()
.collect::<Result<Vec<_>>>()
.unwrap();
assert_eq!(retrieved_objects.len(), objects.len());
for object in &objects {
let found = retrieved_objects
.iter()
.any(|(_, v)| v.id == object.id && v.value == object.value);
assert!(found, "could not find object with id {:?}", object.id);
}
}
#[test]
fn test_prefix_iter() {
let engine = create_test_engine();
// RocksDB prefix extractor is configured with fixed_prefix(64) in the open method.
let prefix_a = [b'a'; 64];
let prefix_b = [b'b'; 64];
// Create test keys with 64-byte identical prefixes.
let key_a1 = [&prefix_a[..], b"_suffix1"].concat();
let key_a2 = [&prefix_a[..], b"_suffix2"].concat();
let key_b1 = [&prefix_b[..], b"_suffix1"].concat();
let key_b2 = [&prefix_b[..], b"_suffix2"].concat();
let objects_with_prefix_a = vec![
(
key_a1.clone(),
Object {
id: "prefix_id_a1".to_string(),
value: 100,
},
),
(
key_a2.clone(),
Object {
id: "prefix_id_a2".to_string(),
value: 200,
},
),
];
let objects_with_prefix_b = vec![
(
key_b1.clone(),
Object {
id: "prefix_id_b1".to_string(),
value: 300,
},
),
(
key_b2.clone(),
Object {
id: "prefix_id_b2".to_string(),
value: 400,
},
),
];
for (key, obj) in &objects_with_prefix_a {
engine.put::<Object>(key, obj).unwrap();
}
for (key, obj) in &objects_with_prefix_b {
engine.put::<Object>(key, obj).unwrap();
}
let retrieved_objects = engine
.prefix_iter::<Object>(&prefix_a)
.unwrap()
.collect::<Result<Vec<_>>>()
.unwrap();
assert_eq!(
retrieved_objects.len(),
objects_with_prefix_a.len(),
"expected {} objects with prefix 'a', but got {}",
objects_with_prefix_a.len(),
retrieved_objects.len()
);
// Verify each object with prefix is correctly retrieved.
for (key, object) in &objects_with_prefix_a {
let found = retrieved_objects
.iter()
.any(|(_, v)| v.id == object.id && v.value == object.value);
assert!(found, "could not find object with key {:?}", key);
}
// Verify objects with different prefix are not retrieved.
for (key, object) in &objects_with_prefix_b {
let found = retrieved_objects
.iter()
.any(|(_, v)| v.id == object.id && v.value == object.value);
assert!(!found, "found object with different prefix: {:?}", key);
}
}
#[test]
fn test_iter_raw() {
let engine = create_test_engine();
let objects = vec![
Object {
id: "1".to_string(),
value: 10,
},
Object {
id: "2".to_string(),
value: 20,
},
Object {
id: "3".to_string(),
value: 30,
},
];
for object in &objects {
engine.put::<Object>(object.id.as_bytes(), object).unwrap();
}
let retrieved_objects = engine
.iter_raw::<Object>()
.unwrap()
.collect::<Result<Vec<_>>>()
.unwrap();
assert_eq!(retrieved_objects.len(), objects.len());
// Verify each object can be deserialized from the raw bytes.
for object in &objects {
let found = retrieved_objects
.iter()
.any(|(_, v)| match Object::deserialize_from(v) {
Ok(deserialized) => {
deserialized.id == object.id && deserialized.value == object.value
}
Err(_) => false,
});
assert!(
found,
"could not find or deserialize object with key {:?}",
object.id
);
}
}
#[test]
fn test_prefix_iter_raw() {
let engine = create_test_engine();
// RocksDB prefix extractor is configured with fixed_prefix(64) in the open method.
let prefix_a = [b'a'; 64];
let prefix_b = [b'b'; 64];
// Create test keys with 64-byte identical prefixes.
let key_a1 = [&prefix_a[..], b"_raw_suffix1"].concat();
let key_a2 = [&prefix_a[..], b"_raw_suffix2"].concat();
let key_b1 = [&prefix_b[..], b"_raw_suffix1"].concat();
let key_b2 = [&prefix_b[..], b"_raw_suffix2"].concat();
let objects_with_prefix_a = vec![
(
key_a1.clone(),
Object {
id: "raw_prefix_id_a1".to_string(),
value: 100,
},
),
(
key_a2.clone(),
Object {
id: "raw_prefix_id_a2".to_string(),
value: 200,
},
),
];
let objects_with_prefix_b = vec![
(
key_b1.clone(),
Object {
id: "raw_prefix_id_b1".to_string(),
value: 300,
},
),
(
key_b2.clone(),
Object {
id: "raw_prefix_id_b2".to_string(),
value: 400,
},
),
];
for (key, obj) in &objects_with_prefix_a {
engine.put::<Object>(key, obj).unwrap();
}
for (key, obj) in &objects_with_prefix_b {
engine.put::<Object>(key, obj).unwrap();
}
let retrieved_objects = engine
.prefix_iter_raw::<Object>(&prefix_a)
.unwrap()
.collect::<Result<Vec<_>>>()
.unwrap();
assert_eq!(
retrieved_objects.len(),
objects_with_prefix_a.len(),
"expected {} raw objects with prefix 'a', but got {}",
objects_with_prefix_a.len(),
retrieved_objects.len()
);
// Verify each object with prefix can be deserialized from raw bytes.
for (_, object) in &objects_with_prefix_a {
let found = retrieved_objects
.iter()
.any(|(_, v)| match Object::deserialize_from(v) {
Ok(deserialized) => {
deserialized.id == object.id && deserialized.value == object.value
}
Err(_) => false,
});
assert!(
found,
"could not find or deserialize object with key {:?}",
object.id
);
}
// Verify objects with different prefix are not retrieved.
for (key, _) in &objects_with_prefix_b {
let found = retrieved_objects
.iter()
.any(|(k, _)| k.as_ref() == key.as_slice());
assert!(!found, "found object with different prefix: {:?}", key);
}
}
#[test]
fn test_column_family_not_found() {
let engine = create_test_engine();
// Define a new type with a different namespace that hasn't been registered.
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct UnregisteredObject {
data: String,
}
impl DatabaseObject for UnregisteredObject {
const NAMESPACE: &'static str = "unregistered";
}
let key = b"unregistered";
let result = engine.get::<UnregisteredObject>(key);
assert!(result.is_err());
if let Err(err) = result {
assert!(format!("{:?}", err).contains("ColumnFamilyNotFound"));
for cf_name in unused_cf_names {
match db.drop_cf(cf_name) {
Ok(_) => info!("drop cf [{}] success", cf_name),
Err(err) => warn!("drop cf [{}] failed: {}", cf_name, err),
}
}
}

View File

@ -13,6 +13,7 @@ edition.workspace = true
dragonfly-client-core.workspace = true
dragonfly-api.workspace = true
reqwest.workspace = true
hyper.workspace = true
http-range-header.workspace = true
http.workspace = true
tracing.workspace = true
@ -23,17 +24,12 @@ rustls-pki-types.workspace = true
rustls-pemfile.workspace = true
sha2.workspace = true
uuid.workspace = true
sysinfo.workspace = true
hex.workspace = true
crc32fast.workspace = true
openssl.workspace = true
lazy_static.workspace = true
bytesize.workspace = true
lru.workspace = true
tokio.workspace = true
rustix = { version = "1.0.8", features = ["fs"] }
blake3.workspace = true
crc32fast.workspace = true
base16ct.workspace = true
base64 = "0.22.1"
pnet = "0.35.0"
[dev-dependencies]
tempfile.workspace = true

View File

@ -14,10 +14,10 @@
* limitations under the License.
*/
use dragonfly_client_core::{Error as ClientError, Result as ClientResult};
use dragonfly_client_core::Result as ClientResult;
use sha2::Digest as Sha2Digest;
use std::fmt;
use std::io::{self, Read};
use std::io::Read;
use std::path::Path;
use std::str::FromStr;
use tracing::instrument;
@ -31,6 +31,9 @@ pub enum Algorithm {
/// Crc32 is crc32 algorithm for generate digest.
Crc32,
/// Blake3 is blake3 algorithm for generate digest.
Blake3,
/// Sha256 is sha256 algorithm for generate digest.
Sha256,
@ -44,6 +47,7 @@ impl fmt::Display for Algorithm {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Algorithm::Crc32 => write!(f, "crc32"),
Algorithm::Blake3 => write!(f, "blake3"),
Algorithm::Sha256 => write!(f, "sha256"),
Algorithm::Sha512 => write!(f, "sha512"),
}
@ -58,6 +62,7 @@ impl FromStr for Algorithm {
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"crc32" => Ok(Algorithm::Crc32),
"blake3" => Ok(Algorithm::Blake3),
"sha256" => Ok(Algorithm::Sha256),
"sha512" => Ok(Algorithm::Sha512),
_ => Err(format!("invalid digest algorithm: {}", s)),
@ -112,36 +117,10 @@ impl FromStr for Digest {
}
let algorithm = match parts[0] {
"crc32" => {
if parts[1].len() != 10 {
return Err(format!(
"invalid crc32 digest length: {}, expected 10",
parts[1].len()
));
}
Algorithm::Crc32
}
"sha256" => {
if parts[1].len() != 64 {
return Err(format!(
"invalid sha256 digest length: {}, expected 64",
parts[1].len()
));
}
Algorithm::Sha256
}
"sha512" => {
if parts[1].len() != 128 {
return Err(format!(
"invalid sha512 digest length: {}, expected 128",
parts[1].len()
));
}
Algorithm::Sha512
}
"crc32" => Algorithm::Crc32,
"blake3" => Algorithm::Blake3,
"sha256" => Algorithm::Sha256,
"sha512" => Algorithm::Sha512,
_ => return Err(format!("invalid digest algorithm: {}", parts[0])),
};
@ -149,58 +128,48 @@ impl FromStr for Digest {
}
}
/// calculate_file_digest calculates the digest of a file.
/// calculate_file_hash calculates the hash of a file.
#[instrument(skip_all)]
pub fn calculate_file_digest(algorithm: Algorithm, path: &Path) -> ClientResult<Digest> {
pub fn calculate_file_hash(algorithm: Algorithm, path: &Path) -> ClientResult<Digest> {
let f = std::fs::File::open(path)?;
let mut reader = io::BufReader::new(f);
let mut reader = std::io::BufReader::new(f);
match algorithm {
Algorithm::Crc32 => {
let mut buffer = [0; 4096];
let mut hasher = crc32fast::Hasher::new();
let mut buffer = [0; 4096];
loop {
match reader.read(&mut buffer) {
Ok(0) => break,
Ok(n) => hasher.update(&buffer[..n]),
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
Err(err) => return Err(err.into()),
};
let count = reader.read(&mut buffer)?;
if count == 0 {
break;
}
Ok(Digest::new(algorithm, hasher.finalize().to_string()))
hasher.update(&buffer[..count]);
}
Ok(Digest::new(
algorithm,
base16ct::lower::encode_string(&hasher.finalize().to_be_bytes()),
))
}
Algorithm::Blake3 => {
let mut hasher = blake3::Hasher::new();
std::io::copy(&mut reader, &mut hasher)?;
Ok(Digest::new(
algorithm,
base16ct::lower::encode_string(hasher.finalize().as_bytes()),
))
}
Algorithm::Sha256 => {
let mut hasher = sha2::Sha256::new();
io::copy(&mut reader, &mut hasher)?;
std::io::copy(&mut reader, &mut hasher)?;
Ok(Digest::new(algorithm, hex::encode(hasher.finalize())))
}
Algorithm::Sha512 => {
let mut hasher = sha2::Sha512::new();
io::copy(&mut reader, &mut hasher)?;
std::io::copy(&mut reader, &mut hasher)?;
Ok(Digest::new(algorithm, hex::encode(hasher.finalize())))
}
}
}
/// verify_file_digest verifies the digest of a file against an expected digest.
pub fn verify_file_digest(expected_digest: Digest, file_path: &Path) -> ClientResult<()> {
let digest = match calculate_file_digest(expected_digest.algorithm(), file_path) {
Ok(digest) => digest,
Err(err) => {
return Err(err);
}
};
if digest.to_string() != expected_digest.to_string() {
return Err(ClientError::DigestMismatch(
expected_digest.to_string(),
digest.to_string(),
));
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@ -210,6 +179,7 @@ mod tests {
#[test]
fn test_algorithm_display() {
assert_eq!(Algorithm::Crc32.to_string(), "crc32");
assert_eq!(Algorithm::Blake3.to_string(), "blake3");
assert_eq!(Algorithm::Sha256.to_string(), "sha256");
assert_eq!(Algorithm::Sha512.to_string(), "sha512");
}
@ -217,6 +187,7 @@ mod tests {
#[test]
fn test_algorithm_from_str() {
assert_eq!("crc32".parse::<Algorithm>(), Ok(Algorithm::Crc32));
assert_eq!("blake3".parse::<Algorithm>(), Ok(Algorithm::Blake3));
assert_eq!("sha256".parse::<Algorithm>(), Ok(Algorithm::Sha256));
assert_eq!("sha512".parse::<Algorithm>(), Ok(Algorithm::Sha512));
assert!("invalid".parse::<Algorithm>().is_err());
@ -229,50 +200,31 @@ mod tests {
}
#[test]
fn test_calculate_file_digest() {
fn test_calculate_file_hash() {
let content = b"test content";
let temp_file = tempfile::NamedTempFile::new().expect("failed to create temp file");
let path = temp_file.path();
let mut file = File::create(path).expect("failed to create file");
file.write_all(content).expect("failed to write to file");
let expected_blake3 = "ead3df8af4aece7792496936f83b6b6d191a7f256585ce6b6028db161278017e";
let digest =
calculate_file_hash(Algorithm::Blake3, path).expect("failed to calculate Blake3 hash");
assert_eq!(digest.encoded(), expected_blake3);
let expected_sha256 = "6ae8a75555209fd6c44157c0aed8016e763ff435a19cf186f76863140143ff72";
let digest = calculate_file_digest(Algorithm::Sha256, path)
.expect("failed to calculate Sha256 hash");
let digest =
calculate_file_hash(Algorithm::Sha256, path).expect("failed to calculate Sha256 hash");
assert_eq!(digest.encoded(), expected_sha256);
let expected_sha512 = "0cbf4caef38047bba9a24e621a961484e5d2a92176a859e7eb27df343dd34eb98d538a6c5f4da1ce302ec250b821cc001e46cc97a704988297185a4df7e99602";
let digest = calculate_file_digest(Algorithm::Sha512, path)
.expect("failed to calculate Sha512 hash");
let digest =
calculate_file_hash(Algorithm::Sha512, path).expect("failed to calculate Sha512 hash");
assert_eq!(digest.encoded(), expected_sha512);
let expected_crc32 = "1475635037";
let expected_crc32 = "57f4675d";
let digest =
calculate_file_digest(Algorithm::Crc32, path).expect("failed to calculate Crc32 hash");
calculate_file_hash(Algorithm::Crc32, path).expect("failed to calculate Sha512 hash");
assert_eq!(digest.encoded(), expected_crc32);
}
#[test]
fn test_verify_file_digest() {
let content = b"test content";
let temp_file = tempfile::NamedTempFile::new().expect("failed to create temp file");
let path = temp_file.path();
let mut file = File::create(path).expect("failed to create file");
file.write_all(content).expect("failed to write to file");
let expected_sha256_digest = Digest::new(
Algorithm::Sha256,
"6ae8a75555209fd6c44157c0aed8016e763ff435a19cf186f76863140143ff72".to_string(),
);
assert!(verify_file_digest(expected_sha256_digest, path).is_ok());
let expected_sha512_digest = Digest::new(
Algorithm::Sha512,
"0cbf4caef38047bba9a24e621a961484e5d2a92176a859e7eb27df343dd34eb98d538a6c5f4da1ce302ec250b821cc001e46cc97a704988297185a4df7e99602".to_string(),
);
assert!(verify_file_digest(expected_sha512_digest, path).is_ok());
let expected_crc32_digest = Digest::new(Algorithm::Crc32, "1475635037".to_string());
assert!(verify_file_digest(expected_crc32_digest, path).is_ok());
}
}

View File

@ -1,54 +0,0 @@
/*
* Copyright 2025 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use dragonfly_client_core::Result;
use tokio::fs;
/// fallocate allocates the space for the file and fills it with zero, only on Linux.
#[allow(unused_variables)]
pub async fn fallocate(f: &fs::File, length: u64) -> Result<()> {
// No allocation needed for zero length. Avoids potential fallocate errors.
if length == 0 {
return Ok(());
}
#[cfg(target_os = "linux")]
{
use dragonfly_client_core::Error;
use rustix::fs::{fallocate, FallocateFlags};
use std::os::unix::io::AsFd;
use tokio::io;
// Set length (potential truncation).
f.set_len(length).await?;
let fd = f.as_fd();
let offset = 0;
let flags = FallocateFlags::KEEP_SIZE;
loop {
match fallocate(fd, flags, offset, length) {
Ok(_) => return Ok(()),
Err(rustix::io::Errno::INTR) => continue,
Err(err) => {
return Err(Error::IO(io::Error::from_raw_os_error(err.raw_os_error())))
}
}
}
}
#[cfg(not(target_os = "linux"))]
Ok(())
}

View File

@ -20,6 +20,7 @@ use dragonfly_client_core::{
Error, Result,
};
use http::header::{self, HeaderMap};
use tracing::instrument;
/// Credentials is the credentials for the basic auth.
pub struct Credentials {
@ -33,6 +34,7 @@ pub struct Credentials {
/// Credentials is the basic auth.
impl Credentials {
/// new returns a new Credentials.
#[instrument(skip_all)]
pub fn new(username: &str, password: &str) -> Credentials {
Self {
username: username.to_string(),

View File

@ -19,53 +19,102 @@ use dragonfly_client_core::{
error::{ErrorType, OrErr},
Error, Result,
};
use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
use reqwest::header::{HeaderMap, HeaderValue};
use std::collections::HashMap;
use tracing::{error, instrument};
pub mod basic_auth;
/// headermap_to_hashmap converts a headermap to a hashmap.
pub fn headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String, String> {
let mut hashmap: HashMap<String, String> = HashMap::with_capacity(header.len());
/// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap.
#[instrument(skip_all)]
pub fn reqwest_headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String, String> {
let mut hashmap: HashMap<String, String> = HashMap::new();
for (k, v) in header {
if let Ok(v) = v.to_str() {
hashmap.insert(k.to_string(), v.to_string());
}
let Some(v) = v.to_str().ok() else {
continue;
};
hashmap.entry(k.to_string()).or_insert(v.to_string());
}
hashmap
}
/// hashmap_to_headermap converts a hashmap to a headermap.
pub fn hashmap_to_headermap(header: &HashMap<String, String>) -> Result<HeaderMap<HeaderValue>> {
let mut headermap = HeaderMap::with_capacity(header.len());
for (k, v) in header {
let name = HeaderName::from_bytes(k.as_bytes()).or_err(ErrorType::ParseError)?;
let value = HeaderValue::from_bytes(v.as_bytes()).or_err(ErrorType::ParseError)?;
headermap.insert(name, value);
/// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap.
#[instrument(skip_all)]
pub fn hashmap_to_reqwest_headermap(
header: &HashMap<String, String>,
) -> Result<HeaderMap<HeaderValue>> {
let header: HeaderMap = (header).try_into().or_err(ErrorType::ParseError)?;
Ok(header)
}
/// hashmap_to_hyper_header_map converts a hashmap to a hyper header map.
#[instrument(skip_all)]
pub fn hashmap_to_hyper_header_map(header: &HashMap<String, String>) -> Result<HeaderMap> {
let header: HeaderMap = (header).try_into().or_err(ErrorType::ParseError)?;
Ok(header)
}
/// TODO: Remove the conversion after the http crate version is the same.
/// Convert the Reqwest header to the Hyper header, because of the http crate
/// version is different. Reqwest header depends on the http crate
/// version 0.2, but the Hyper header depends on the http crate version 0.1.
#[instrument(skip_all)]
pub fn hyper_headermap_to_reqwest_headermap(hyper_header: &HeaderMap) -> HeaderMap {
let mut reqwest_header = HeaderMap::new();
for (hyper_header_key, hyper_header_value) in hyper_header.iter() {
let reqwest_header_name: reqwest::header::HeaderName =
match hyper_header_key.to_string().parse() {
Ok(reqwest_header_name) => reqwest_header_name,
Err(err) => {
error!("parse header name error: {}", err);
continue;
}
};
let reqwest_header_value: reqwest::header::HeaderValue = match hyper_header_value.to_str() {
Ok(reqwest_header_value) => match reqwest_header_value.parse() {
Ok(reqwest_header_value) => reqwest_header_value,
Err(err) => {
error!("parse header value error: {}", err);
continue;
}
},
Err(err) => {
error!("parse header value error: {}", err);
continue;
}
};
reqwest_header.insert(reqwest_header_name, reqwest_header_value);
}
Ok(headermap)
reqwest_header
}
/// header_vec_to_hashmap converts a vector of header string to a hashmap.
#[instrument(skip_all)]
pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String, String>> {
let mut header = HashMap::with_capacity(raw_header.len());
let mut header = HashMap::new();
for h in raw_header {
if let Some((k, v)) = h.split_once(':') {
header.insert(k.trim().to_string(), v.trim().to_string());
}
let mut parts = h.splitn(2, ':');
let key = parts.next().unwrap().trim();
let value = parts.next().unwrap().trim();
header.insert(key.to_string(), value.to_string());
}
Ok(header)
}
/// header_vec_to_headermap converts a vector of header string to a reqwest headermap.
pub fn header_vec_to_headermap(raw_header: Vec<String>) -> Result<HeaderMap> {
hashmap_to_headermap(&header_vec_to_hashmap(raw_header)?)
/// header_vec_to_reqwest_headermap converts a vector of header string to a reqwest headermap.
#[instrument(skip_all)]
pub fn header_vec_to_reqwest_headermap(raw_header: Vec<String>) -> Result<HeaderMap> {
hashmap_to_reqwest_headermap(&header_vec_to_hashmap(raw_header)?)
}
/// get_range gets the range from http header.
#[instrument(skip_all)]
pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range>> {
match header.get(reqwest::header::RANGE) {
Some(range) => {
@ -79,6 +128,7 @@ pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range
/// parse_range_header parses a Range header string as per RFC 7233,
/// supported Range Header: "Range": "bytes=100-200", "Range": "bytes=-50",
/// "Range": "bytes=150-", "Range": "bytes=0-0,-1".
#[instrument(skip_all)]
pub fn parse_range_header(range_header_value: &str, content_length: u64) -> Result<Range> {
let parsed_ranges =
http_range_header::parse_range_header(range_header_value).or_err(ErrorType::ParseError)?;
@ -102,28 +152,57 @@ mod tests {
use reqwest::header::{HeaderMap, HeaderValue};
#[test]
fn test_headermap_to_hashmap() {
fn test_reqwest_headermap_to_hashmap() {
let mut header = HeaderMap::new();
header.insert("Content-Type", HeaderValue::from_static("application/json"));
header.insert("Authorization", HeaderValue::from_static("Bearer token"));
let hashmap = headermap_to_hashmap(&header);
let hashmap = reqwest_headermap_to_hashmap(&header);
assert_eq!(hashmap.get("content-type").unwrap(), "application/json");
assert_eq!(hashmap.get("authorization").unwrap(), "Bearer token");
assert_eq!(hashmap.get("foo"), None);
}
#[test]
fn test_hashmap_to_headermap() {
fn test_hashmap_to_reqwest_headermap() {
let mut hashmap = HashMap::new();
hashmap.insert("Content-Type".to_string(), "application/json".to_string());
hashmap.insert("Authorization".to_string(), "Bearer token".to_string());
let header = hashmap_to_headermap(&hashmap).unwrap();
let header = hashmap_to_reqwest_headermap(&hashmap).unwrap();
assert_eq!(header.get("Content-Type").unwrap(), "application/json");
assert_eq!(header.get("Authorization").unwrap(), "Bearer token");
}
#[test]
fn test_hashmap_to_hyper_header_map() {
let mut hashmap = HashMap::new();
hashmap.insert("Content-Type".to_string(), "application/json".to_string());
hashmap.insert("Authorization".to_string(), "Bearer token".to_string());
let header = hashmap_to_hyper_header_map(&hashmap).unwrap();
assert_eq!(header.get("Content-Type").unwrap(), "application/json");
assert_eq!(header.get("Authorization").unwrap(), "Bearer token");
}
#[test]
fn test_hyper_headermap_to_reqwest_headermap() {
let mut hyper_header = HeaderMap::new();
hyper_header.insert("Content-Type", HeaderValue::from_static("application/json"));
hyper_header.insert("Authorization", HeaderValue::from_static("Bearer token"));
let reqwest_header = hyper_headermap_to_reqwest_headermap(&hyper_header);
assert_eq!(
reqwest_header.get("Content-Type").unwrap(),
"application/json"
);
assert_eq!(reqwest_header.get("Authorization").unwrap(), "Bearer token");
}
#[test]
fn test_header_vec_to_hashmap() {
let raw_header = vec![
@ -132,18 +211,20 @@ mod tests {
];
let hashmap = header_vec_to_hashmap(raw_header).unwrap();
assert_eq!(hashmap.get("Content-Type").unwrap(), "application/json");
assert_eq!(hashmap.get("Authorization").unwrap(), "Bearer token");
}
#[test]
fn test_header_vec_to_headermap() {
fn test_header_vec_to_reqwest_headermap() {
let raw_header = vec![
"Content-Type: application/json".to_string(),
"Authorization: Bearer token".to_string(),
];
let header = header_vec_to_headermap(raw_header).unwrap();
let header = header_vec_to_reqwest_headermap(raw_header).unwrap();
assert_eq!(header.get("Content-Type").unwrap(), "application/json");
assert_eq!(header.get("Authorization").unwrap(), "Bearer token");
}
@ -157,6 +238,7 @@ mod tests {
);
let range = get_range(&header, 200).unwrap().unwrap();
assert_eq!(range.start, 0);
assert_eq!(range.length, 101);
}
@ -164,6 +246,7 @@ mod tests {
#[test]
fn test_parse_range_header() {
let range = parse_range_header("bytes=0-100", 200).unwrap();
assert_eq!(range.start, 0);
assert_eq!(range.length, 101);
}

View File

@ -20,8 +20,8 @@ use dragonfly_client_core::{
Result,
};
use sha2::{Digest, Sha256};
use std::io::{self, Read};
use std::path::PathBuf;
use tracing::instrument;
use url::Url;
use uuid::Uuid;
@ -31,34 +31,6 @@ const SEED_PEER_SUFFIX: &str = "seed";
/// PERSISTENT_CACHE_TASK_SUFFIX is the suffix of the persistent cache task.
const PERSISTENT_CACHE_TASK_SUFFIX: &str = "persistent-cache-task";
/// TaskIDParameter is the parameter of the task id.
pub enum TaskIDParameter {
/// Content uses the content to generate the task id.
Content(String),
/// URLBased uses the url, piece_length, tag, application and filtered_query_params to generate
/// the task id.
URLBased {
url: String,
piece_length: Option<u64>,
tag: Option<String>,
application: Option<String>,
filtered_query_params: Vec<String>,
},
}
/// PersistentCacheTaskIDParameter is the parameter of the persistent cache task id.
pub enum PersistentCacheTaskIDParameter {
/// Content uses the content to generate the persistent cache task id.
Content(String),
/// FileContentBased uses the file path, piece_length, tag and application to generate the persistent cache task id.
FileContentBased {
path: PathBuf,
piece_length: Option<u64>,
tag: Option<String>,
application: Option<String>,
},
}
/// IDGenerator is used to generate the id for the resources.
#[derive(Debug)]
pub struct IDGenerator {
@ -75,6 +47,7 @@ pub struct IDGenerator {
/// IDGenerator implements the IDGenerator.
impl IDGenerator {
/// new creates a new IDGenerator.
#[instrument(skip_all)]
pub fn new(ip: String, hostname: String, is_seed_peer: bool) -> Self {
IDGenerator {
ip,
@ -84,7 +57,7 @@ impl IDGenerator {
}
/// host_id generates the host id.
#[inline]
#[instrument(skip_all)]
pub fn host_id(&self) -> String {
if self.is_seed_peer {
return format!("{}-{}-{}", self.ip, self.hostname, "seed");
@ -94,21 +67,16 @@ impl IDGenerator {
}
/// task_id generates the task id.
#[inline]
pub fn task_id(&self, parameter: TaskIDParameter) -> Result<String> {
match parameter {
TaskIDParameter::Content(content) => {
Ok(hex::encode(Sha256::digest(content.as_bytes())))
}
TaskIDParameter::URLBased {
url,
piece_length,
tag,
application,
filtered_query_params,
} => {
#[instrument(skip_all)]
pub fn task_id(
&self,
url: &str,
tag: Option<&str>,
application: Option<&str>,
filtered_query_params: Vec<String>,
) -> Result<String> {
// Filter the query parameters.
let url = Url::parse(url.as_str()).or_err(ErrorType::ParseError)?;
let url = Url::parse(url).or_err(ErrorType::ParseError)?;
let query = url
.query_pairs()
.filter(|(k, _)| !filtered_query_params.contains(&k.to_string()));
@ -143,50 +111,24 @@ impl IDGenerator {
hasher.update(application);
}
// Add the piece length to generate the task id.
if let Some(piece_length) = piece_length {
hasher.update(piece_length.to_string());
}
hasher.update(TaskType::Standard.as_str_name().as_bytes());
// Generate the task id.
Ok(hex::encode(hasher.finalize()))
}
}
}
/// persistent_cache_task_id generates the persistent cache task id.
#[inline]
#[instrument(skip_all)]
pub fn persistent_cache_task_id(
&self,
parameter: PersistentCacheTaskIDParameter,
path: &PathBuf,
tag: Option<&str>,
application: Option<&str>,
) -> Result<String> {
let mut hasher = crc32fast::Hasher::new();
// Initialize the hasher.
let mut hasher = blake3::Hasher::new();
match parameter {
PersistentCacheTaskIDParameter::Content(content) => {
hasher.update(content.as_bytes());
Ok(hasher.finalize().to_string())
}
PersistentCacheTaskIDParameter::FileContentBased {
path,
piece_length,
tag,
application,
} => {
// Calculate the hash of the file.
let f = std::fs::File::open(path)?;
let mut buffer = [0; 4096];
let mut reader = io::BufReader::with_capacity(buffer.len(), f);
loop {
match reader.read(&mut buffer) {
Ok(0) => break,
Ok(n) => hasher.update(&buffer[..n]),
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
Err(err) => return Err(err.into()),
};
}
let mut f = std::fs::File::open(path)?;
std::io::copy(&mut f, &mut hasher)?;
// Add the tag to generate the persistent cache task id.
if let Some(tag) = tag {
@ -198,21 +140,12 @@ impl IDGenerator {
hasher.update(application.as_bytes());
}
// Add the piece length to generate the persistent cache task id.
if let Some(piece_length) = piece_length {
hasher.update(piece_length.to_string().as_bytes());
}
hasher.update(TaskType::PersistentCache.as_str_name().as_bytes());
// Generate the task id by crc32.
Ok(hasher.finalize().to_string())
}
}
// Generate the persistent cache task id.
Ok(hasher.finalize().to_hex().to_string())
}
/// peer_id generates the peer id.
#[inline]
#[instrument(skip_all)]
pub fn peer_id(&self) -> String {
if self.is_seed_peer {
return format!(
@ -228,6 +161,7 @@ impl IDGenerator {
}
/// task_type generates the task type by the task id.
#[instrument(skip_all)]
pub fn task_type(&self, id: &str) -> TaskType {
if id.ends_with(PERSISTENT_CACHE_TASK_SUFFIX) {
return TaskType::PersistentCache;
@ -267,140 +201,81 @@ mod tests {
let test_cases = vec![
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::URLBased {
url: "https://example.com".to_string(),
piece_length: Some(1024_u64),
tag: Some("foo".to_string()),
application: Some("bar".to_string()),
filtered_query_params: vec![],
},
"27554d06dfc788c2c2c60e01960152ffbd4b145fc103fcb80b432b4dc238a6fe",
"https://example.com",
Some("foo"),
Some("bar"),
vec![],
"160fa7f001d9d2e893130894fbb60a5fb006e1d61bff82955f2946582bc9de1d",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::URLBased {
url: "https://example.com".to_string(),
piece_length: None,
tag: Some("foo".to_string()),
application: Some("bar".to_string()),
filtered_query_params: vec![],
},
"06408fbf247ddaca478f8cb9565fe5591c28efd0994b8fea80a6a87d3203c5ca",
"https://example.com",
Some("foo"),
None,
vec![],
"2773851c628744fb7933003195db436ce397c1722920696c4274ff804d86920b",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::URLBased {
url: "https://example.com".to_string(),
piece_length: None,
tag: Some("foo".to_string()),
application: None,
filtered_query_params: vec![],
},
"3c3f230ef9f191dd2821510346a7bc138e4894bee9aee184ba250a3040701d2a",
"https://example.com",
None,
Some("bar"),
vec![],
"63dee2822037636b0109876b58e95692233840753a882afa69b9b5ee82a6c57d",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::URLBased {
url: "https://example.com".to_string(),
piece_length: None,
tag: None,
application: Some("bar".to_string()),
filtered_query_params: vec![],
},
"c9f9261b7305c24371244f9f149f5d4589ed601348fdf22d7f6f4b10658fdba2",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::URLBased {
url: "https://example.com".to_string(),
piece_length: Some(1024_u64),
tag: None,
application: None,
filtered_query_params: vec![],
},
"9f7c9aafbc6f30f8f41a96ca77eeae80c5b60964b3034b0ee43ccf7b2f9e52b8",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::URLBased {
url: "https://example.com?foo=foo&bar=bar".to_string(),
piece_length: None,
tag: None,
application: None,
filtered_query_params: vec!["foo".to_string(), "bar".to_string()],
},
"457b4328cde278e422c9e243f7bfd1e97f511fec43a80f535cf6b0ef6b086776",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
TaskIDParameter::Content("This is a test file".to_string()),
"e2d0fe1585a63ec6009c8016ff8dda8b17719a637405a4e23c0ff81339148249",
"https://example.com?foo=foo&bar=bar",
None,
None,
vec!["foo".to_string(), "bar".to_string()],
"100680ad546ce6a577f42f52df33b4cfdca756859e664b8d7de329b150d09ce9",
),
];
for (generator, parameter, expected_id) in test_cases {
let task_id = generator.task_id(parameter).unwrap();
for (generator, url, tag, application, filtered_query_params, expected_id) in test_cases {
let task_id = generator
.task_id(url, tag, application, filtered_query_params)
.unwrap();
assert_eq!(task_id, expected_id);
}
}
#[test]
fn should_generate_persistent_cache_task_id() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("testfile");
let mut f = File::create(&file_path).unwrap();
f.write_all("This is a test file".as_bytes()).unwrap();
let test_cases = vec![
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
PersistentCacheTaskIDParameter::FileContentBased {
path: file_path.clone(),
piece_length: Some(1024_u64),
tag: Some("tag1".to_string()),
application: Some("app1".to_string()),
},
"3490958009",
"This is a test file",
Some("tag1"),
Some("app1"),
"84ed9fca6c51c725c21ab005682509bc9f5a9e08779aa14039a1df41bd95bb9f",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
PersistentCacheTaskIDParameter::FileContentBased {
path: file_path.clone(),
piece_length: None,
tag: None,
application: Some("app1".to_string()),
},
"735741469",
"This is a test file",
None,
Some("app1"),
"c39ee7baea1df8276d16224b6bbe93d0abaedaa056e819bb1a6318e28cdde508",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
PersistentCacheTaskIDParameter::FileContentBased {
path: file_path.clone(),
piece_length: None,
tag: Some("tag1".to_string()),
application: None,
},
"3954905097",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
PersistentCacheTaskIDParameter::FileContentBased {
path: file_path.clone(),
piece_length: Some(1024_u64),
tag: None,
application: None,
},
"4162557545",
),
(
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
PersistentCacheTaskIDParameter::Content("This is a test file".to_string()),
"107352521",
"This is a test file",
Some("tag1"),
None,
"de692dcd9b6eace344140ef2718033527ee0a2e436c03044a771902bd536ae7d",
),
];
for (generator, parameter, expected_id) in test_cases {
let task_id = generator.persistent_cache_task_id(parameter).unwrap();
for (generator, file_content, tag, application, expected_id) in test_cases {
let dir = tempdir().unwrap();
let file_path = dir.path().join("testfile");
let mut f = File::create(&file_path).unwrap();
f.write_all(file_content.as_bytes()).unwrap();
let task_id = generator
.persistent_cache_task_id(&file_path, tag, application)
.unwrap();
assert_eq!(task_id, expected_id);
}
}

View File

@ -15,8 +15,6 @@
*/
pub mod digest;
pub mod fs;
pub mod http;
pub mod id_generator;
pub mod net;
pub mod tls;

View File

@ -1,230 +0,0 @@
/*
* Copyright 2025 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use bytesize::ByteSize;
use pnet::datalink::{self, NetworkInterface};
use std::cmp::min;
use std::net::IpAddr;
use std::sync::Arc;
use std::time::Duration;
use sysinfo::Networks;
use tokio::sync::Mutex;
use tracing::{info, warn};
/// Interface represents a network interface with its information.
#[derive(Debug, Clone, Default)]
pub struct Interface {
/// name is the name of the network interface.
pub name: String,
/// bandwidth is the bandwidth of the network interface in bps.
pub bandwidth: u64,
// network_data_mutex is a mutex to protect access to network data.
network_data_mutex: Arc<Mutex<()>>,
}
/// NetworkData represents the network data for a specific interface,
#[derive(Debug, Clone, Default)]
pub struct NetworkData {
/// max_rx_bandwidth is the maximum receive bandwidth of the interface in bps.
pub max_rx_bandwidth: u64,
/// rx_bandwidth is the current receive bandwidth of the interface in bps.
pub rx_bandwidth: Option<u64>,
/// max_tx_bandwidth is the maximum transmit bandwidth of the interface in bps.
pub max_tx_bandwidth: u64,
/// tx_bandwidth is the current transmit bandwidth of the interface in bps.
pub tx_bandwidth: Option<u64>,
}
/// Interface methods provide functionality to get network interface information.
impl Interface {
/// DEFAULT_NETWORKS_REFRESH_INTERVAL is the default interval for refreshing network data.
const DEFAULT_NETWORKS_REFRESH_INTERVAL: Duration = Duration::from_secs(2);
/// new creates a new Interface instance based on the provided IP address and rate limit.
pub fn new(ip: IpAddr, rate_limit: ByteSize) -> Interface {
let rate_limit = Self::byte_size_to_bits(rate_limit); // convert to bps
let Some(interface) = Self::get_network_interface_by_ip(ip) else {
warn!(
"can not find interface for IP address {}, network interface unknown with bandwidth {} bps",
ip, rate_limit
);
return Interface {
name: "unknown".to_string(),
bandwidth: rate_limit,
network_data_mutex: Arc::new(Mutex::new(())),
};
};
match Self::get_speed(&interface.name) {
Some(speed) => {
let bandwidth = min(Self::megabits_to_bits(speed), rate_limit);
info!(
"network interface {} with bandwidth {} bps",
interface.name, bandwidth
);
Interface {
name: interface.name,
bandwidth,
network_data_mutex: Arc::new(Mutex::new(())),
}
}
None => {
warn!(
"can not get speed, network interface {} with bandwidth {} bps",
interface.name, rate_limit
);
Interface {
name: interface.name,
bandwidth: rate_limit,
network_data_mutex: Arc::new(Mutex::new(())),
}
}
}
}
/// get_network_data retrieves the network data for the interface.
pub async fn get_network_data(&self) -> NetworkData {
// Lock the mutex to ensure exclusive access to network data.
let _guard = self.network_data_mutex.lock().await;
// Initialize sysinfo network.
let mut networks = Networks::new_with_refreshed_list();
// Sleep to calculate the network traffic difference over
// the DEFAULT_NETWORKS_REFRESH_INTERVAL.
tokio::time::sleep(Self::DEFAULT_NETWORKS_REFRESH_INTERVAL).await;
// Refresh network information.
networks.refresh();
let Some(network_data) = networks.get(self.name.as_str()) else {
warn!("can not find network data for interface {}", self.name);
return NetworkData {
max_rx_bandwidth: self.bandwidth,
max_tx_bandwidth: self.bandwidth,
..Default::default()
};
};
// Calculate the receive and transmit bandwidth in bits per second.
let rx_bandwidth = (Self::bytes_to_bits(network_data.received()) as f64
/ Self::DEFAULT_NETWORKS_REFRESH_INTERVAL.as_secs_f64())
.round() as u64;
// Calculate the transmit bandwidth in bits per second.
let tx_bandwidth = (Self::bytes_to_bits(network_data.transmitted()) as f64
/ Self::DEFAULT_NETWORKS_REFRESH_INTERVAL.as_secs_f64())
.round() as u64;
NetworkData {
max_rx_bandwidth: self.bandwidth,
rx_bandwidth: Some(rx_bandwidth),
max_tx_bandwidth: self.bandwidth,
tx_bandwidth: Some(tx_bandwidth),
}
}
/// get_speed returns the speed of the network interface in Mbps.
pub fn get_speed(name: &str) -> Option<u64> {
#[cfg(target_os = "linux")]
{
let speed_path = format!("/sys/class/net/{}/speed", name);
std::fs::read_to_string(&speed_path)
.ok()
.and_then(|speed_str| speed_str.trim().parse::<u64>().ok())
}
#[cfg(not(target_os = "linux"))]
{
warn!("can not get interface {} speed on non-linux platform", name);
None
}
}
/// get_network_interface_by_ip returns the network interface that has the specified
/// IP address.
pub fn get_network_interface_by_ip(ip: IpAddr) -> Option<NetworkInterface> {
datalink::interfaces()
.into_iter()
.find(|interface| interface.ips.iter().any(|ip_net| ip_net.ip() == ip))
}
/// byte_size_to_bits converts a ByteSize to bits.
pub fn byte_size_to_bits(size: ByteSize) -> u64 {
size.as_u64() * 8
}
/// megabits_to_bit converts megabits to bits.
pub fn megabits_to_bits(size: u64) -> u64 {
size * 1_000_000 // 1 Mbit = 1,000,000 bits
}
/// bytes_to_bits converts bytes to bits.
pub fn bytes_to_bits(size: u64) -> u64 {
size * 8 // 1 byte = 8 bits
}
}
#[cfg(test)]
mod tests {
use super::*;
use bytesize::ByteSize;
#[test]
fn test_byte_size_to_bits() {
let test_cases = vec![
(ByteSize::kb(1), 8_000u64),
(ByteSize::mb(1), 8_000_000u64),
(ByteSize::gb(1), 8_000_000_000u64),
(ByteSize::b(0), 0u64),
];
for (input, expected) in test_cases {
let result = Interface::byte_size_to_bits(input);
assert_eq!(result, expected);
}
}
#[test]
fn test_megabits_to_bits() {
let test_cases = vec![
(1u64, 1_000_000u64),
(1000u64, 1_000_000_000u64),
(0u64, 0u64),
];
for (input, expected) in test_cases {
let result = Interface::megabits_to_bits(input);
assert_eq!(result, expected);
}
}
#[test]
fn test_bytes_to_bits() {
let test_cases = vec![(1u64, 8u64), (1000u64, 8_000u64), (0u64, 0u64)];
for (input, expected) in test_cases {
let result = Interface::bytes_to_bits(input);
assert_eq!(result, expected);
}
}
}

View File

@ -16,35 +16,14 @@
use dragonfly_client_core::error::{ErrorType, OrErr};
use dragonfly_client_core::{Error as ClientError, Result as ClientResult};
use lazy_static::lazy_static;
use lru::LruCache;
use rcgen::{Certificate, CertificateParams, KeyPair};
use rustls_pki_types::{CertificateDer, PrivateKeyDer, ServerName, UnixTime};
use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::vec::Vec;
use std::{fs, io};
use tracing::instrument;
/// DEFAULT_CERTS_CACHE_CAPACITY is the default capacity of the certificates cache.
const DEFAULT_CERTS_CACHE_CAPACITY: usize = 1000;
/// CertKeyPair is the type of the certificate and private key pair.
type CertKeyPair = (Vec<CertificateDer<'static>>, PrivateKeyDer<'static>);
lazy_static! {
/// SELF_SIGNED_CERTS is a map that stores the self-signed certificates to avoid
/// generating the same certificates multiple times.
static ref SELF_SIGNED_CERTS: Arc<Mutex<LruCache<String, CertKeyPair>>> =
Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(DEFAULT_CERTS_CACHE_CAPACITY).unwrap())));
/// SIMPLE_SELF_SIGNED_CERTS is a map that stores the simple self-signed certificates to avoid
/// generating the same certificates multiple times.
static ref SIMPLE_SELF_SIGNED_CERTS: Arc<Mutex<LruCache<String, CertKeyPair>>> =
Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(DEFAULT_CERTS_CACHE_CAPACITY).unwrap())));
}
/// NoVerifier is a verifier that does not verify the server certificate.
/// It is used for testing and should not be used in production.
#[derive(Debug)]
@ -145,15 +124,8 @@ pub fn generate_cert_from_pem(cert_path: &PathBuf) -> ClientResult<Vec<Certifica
#[instrument(skip_all)]
pub fn generate_self_signed_certs_by_ca_cert(
ca_cert: &Certificate,
host: &str,
subject_alt_names: Vec<String>,
) -> ClientResult<(Vec<CertificateDer<'static>>, PrivateKeyDer<'static>)> {
let mut cache = SELF_SIGNED_CERTS.lock().unwrap();
if let Some((certs, key)) = cache.get(host) {
return Ok((certs.clone(), key.clone_key()));
};
drop(cache);
// Sign certificate with CA certificate by given subject alternative names.
let params = CertificateParams::new(subject_alt_names);
let cert = Certificate::from_params(params).or_err(ErrorType::CertificateError)?;
@ -171,23 +143,14 @@ pub fn generate_self_signed_certs_by_ca_cert(
let key = rustls_pemfile::private_key(&mut key_pem_reader)?
.ok_or_else(|| ClientError::Unknown("failed to load private key".to_string()))?;
let mut cache = SELF_SIGNED_CERTS.lock().unwrap();
cache.push(host.to_string(), (certs.clone(), key.clone_key()));
Ok((certs, key))
}
/// generate_simple_self_signed_certs generates a simple self-signed certificates
#[instrument(skip_all)]
pub fn generate_simple_self_signed_certs(
host: &str,
subject_alt_names: impl Into<Vec<String>>,
) -> ClientResult<(Vec<CertificateDer<'static>>, PrivateKeyDer<'static>)> {
let mut cache = SIMPLE_SELF_SIGNED_CERTS.lock().unwrap();
if let Some((certs, key)) = cache.get(host) {
return Ok((certs.clone(), key.clone_key()));
};
drop(cache);
let cert = rcgen::generate_simple_self_signed(subject_alt_names)
.or_err(ErrorType::CertificateError)?;
let key = rustls_pki_types::PrivateKeyDer::Pkcs8(cert.serialize_private_key_der().into());
@ -196,8 +159,6 @@ pub fn generate_simple_self_signed_certs(
.or_err(ErrorType::CertificateError)?
.into()];
let mut cache = SIMPLE_SELF_SIGNED_CERTS.lock().unwrap();
cache.push(host.to_string(), (certs.clone(), key.clone_key()));
Ok((certs, key))
}
@ -370,10 +331,9 @@ Z+yQ5jhu/fmSBNhqO/8Lp+Y=
&ca_key_file.path().to_path_buf(),
)
.unwrap();
let host = "example.com";
let subject_alt_names = vec![host.to_string()];
let subject_alt_names = vec!["example.com".to_string()];
let result = generate_self_signed_certs_by_ca_cert(&ca_cert, host, subject_alt_names);
let result = generate_self_signed_certs_by_ca_cert(&ca_cert, subject_alt_names);
assert!(result.is_ok());
let (certs, key) = result.unwrap();
assert!(!certs.is_empty());

View File

@ -18,6 +18,10 @@ path = "src/bin/dfdaemon/main.rs"
name = "dfget"
path = "src/bin/dfget/main.rs"
[[bin]]
name = "dfstore"
path = "src/bin/dfstore/main.rs"
[[bin]]
name = "dfcache"
path = "src/bin/dfcache/main.rs"
@ -34,6 +38,8 @@ hyper.workspace = true
hyper-util.workspace = true
hyper-rustls.workspace = true
tracing.workspace = true
validator.workspace = true
humantime.workspace = true
serde.workspace = true
chrono.workspace = true
prost-wkt-types.workspace = true
@ -51,42 +57,39 @@ http.workspace = true
openssl.workspace = true
clap.workspace = true
anyhow.workspace = true
bytes.workspace = true
blake3.workspace = true
bytesize.workspace = true
humantime.workspace = true
uuid.workspace = true
percent-encoding.workspace = true
tokio-rustls.workspace = true
serde_json.workspace = true
fs2.workspace = true
lazy_static.workspace = true
futures.workspace = true
local-ip-address.workspace = true
sysinfo.workspace = true
tracing-appender = "0.2.3"
lazy_static = "1.5"
serde_json = "1.0"
tracing-log = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter", "time", "chrono"] }
tracing-panic = "0.1.2"
tracing-opentelemetry = "0.30.0"
opentelemetry = { version = "0.29.1", default-features = false, features = ["trace"] }
opentelemetry-otlp = { version = "0.29.0", default-features = false, features = ["trace", "grpc-tonic", "http-proto", "reqwest-blocking-client"] }
opentelemetry_sdk = { version = "0.29.0", default-features = false, features = ["trace", "rt-tokio"] }
opentelemetry-semantic-conventions = { version = "0.30.0", features = ["semconv_experimental"] }
tracing-appender = "0.2.3"
rolling-file = "0.2.0"
pprof = { version = "0.15", features = ["flamegraph", "protobuf-codec"] }
tracing-opentelemetry = "0.18.0"
tracing-flame = "0.2.0"
opentelemetry = { version = "0.18.0", default-features = false, features = ["trace", "rt-tokio"] }
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
pprof = { version = "0.13", features = ["flamegraph", "protobuf-codec"] }
prometheus = { version = "0.13", features = ["process"] }
tonic-health = "0.12.3"
tower = { version = "0.4.13", features = ["limit", "load-shed", "buffer"] }
indicatif = "0.18.0"
hashring = "0.3.6"
leaky-bucket = "1.1.2"
http-body-util = "0.1.3"
termion = "4.0.5"
tabled = "0.20.0"
path-absolutize = "3.1.1"
bytes = "1.8"
sysinfo = "0.32.0"
tower = "0.4.13"
indicatif = "0.17.8"
dashmap = "6.1.0"
fastrand = "2.3.0"
glob = "0.3.3"
console-subscriber = "0.4.1"
fs2 = "0.4.3"
hashring = "0.3.6"
libc = "0.2"
fslock = "0.2.1"
leaky-bucket = "1.1.2"
http-body-util = "0.1.2"
futures-util = "0.3.31"
termion = "4.0.3"
tabled = "0.16.0"
path-absolutize = "3.1.1"
[dev-dependencies]
tempfile.workspace = true
@ -117,11 +120,21 @@ assets = [
"usr/bin/dfcache",
"755",
],
[
"../target/x86_64-unknown-linux-gnu/release/dfstore",
"usr/bin/dfstore",
"755",
],
[
"../ci/dfdaemon.service",
"lib/systemd/system/dfdaemon.service",
"644",
],
[
"../ci/dfdaemon.yaml",
"etc/dragonfly/dfdaemon.yaml",
"644",
],
[
"../CONTRIBUTING.md",
"usr/share/doc/client/CONTRIBUTING.md",
@ -159,11 +172,21 @@ assets = [
"usr/bin/dfcache",
"755",
],
[
"../target/x86_64-unknown-linux-musl/release/dfstore",
"usr/bin/dfstore",
"755",
],
[
"../ci/dfdaemon.service",
"lib/systemd/system/dfdaemon.service",
"644",
],
[
"../ci/dfdaemon.yaml",
"etc/dragonfly/dfdaemon.yaml",
"644",
],
[
"../CONTRIBUTING.md",
"usr/share/doc/client/CONTRIBUTING.md",
@ -201,11 +224,21 @@ assets = [
"usr/bin/dfcache",
"755",
],
[
"../target/aarch64-unknown-linux-gnu/release/dfstore",
"usr/bin/dfstore",
"755",
],
[
"../ci/dfdaemon.service",
"lib/systemd/system/dfdaemon.service",
"644",
],
[
"../ci/dfdaemon.yaml",
"etc/dragonfly/dfdaemon.yaml",
"644",
],
[
"../CONTRIBUTING.md",
"usr/share/doc/client/CONTRIBUTING.md",
@ -243,11 +276,21 @@ assets = [
"usr/bin/dfcache",
"755",
],
[
"../target/aarch64-unknown-linux-musl/release/dfstore",
"usr/bin/dfstore",
"755",
],
[
"../ci/dfdaemon.service",
"lib/systemd/system/dfdaemon.service",
"644",
],
[
"../ci/dfdaemon.yaml",
"etc/dragonfly/dfdaemon.yaml",
"644",
],
[
"../CONTRIBUTING.md",
"usr/share/doc/client/CONTRIBUTING.md",
@ -270,7 +313,9 @@ assets = [
{ source = "../target/x86_64-unknown-linux-gnu/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
{ source = "../target/x86_64-unknown-linux-gnu/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
{ source = "../target/x86_64-unknown-linux-gnu/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
{ source = "../target/x86_64-unknown-linux-gnu/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
{ source = "../ci/dfdaemon.yaml", dest = "/etc/dragonfly/dfdaemon.yaml", mode = "644", config = true },
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
{ source = "../README.md", dest = "/usr/share/doc/client/README.md", mode = "644", doc = true },
@ -281,19 +326,22 @@ assets = [
{ source = "../target/x86_64-unknown-linux-musl/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
{ source = "../target/x86_64-unknown-linux-musl/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
{ source = "../target/x86_64-unknown-linux-musl/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
{ source = "../target/x86_64-unknown-linux-musl/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
{ source = "../ci/dfdaemon.yaml", dest = "/etc/dragonfly/dfdaemon.yaml", mode = "644", config = true },
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
{ source = "../README.md", dest = "/usr/share/doc/client/README.md", mode = "644", doc = true },
]
auto-req = "no"
[package.metadata.generate-rpm.variants.aarch64-unknown-linux-gnu]
assets = [
{ source = "../target/aarch64-unknown-linux-gnu/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
{ source = "../target/aarch64-unknown-linux-gnu/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
{ source = "../target/aarch64-unknown-linux-gnu/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
{ source = "../target/aarch64-unknown-linux-gnu/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
{ source = "../ci/dfdaemon.yaml", dest = "/etc/dragonfly/dfdaemon.yaml", mode = "644", config = true },
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
{ source = "../README.md", dest = "/usr/share/doc/client/README.md", mode = "644", doc = true },
@ -304,9 +352,10 @@ assets = [
{ source = "../target/aarch64-unknown-linux-musl/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
{ source = "../target/aarch64-unknown-linux-musl/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
{ source = "../target/aarch64-unknown-linux-musl/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
{ source = "../target/aarch64-unknown-linux-musl/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
{ source = "../ci/dfdaemon.yaml", dest = "/etc/dragonfly/dfdaemon.yaml", mode = "644", config = true },
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
{ source = "../README.md", dest = "/usr/share/doc/client/README.md", mode = "644", doc = true },
]
auto-req = "no"

View File

@ -14,9 +14,10 @@
* limitations under the License.
*/
use crate::grpc::scheduler::SchedulerClient;
use crate::grpc::{manager::ManagerClient, scheduler::SchedulerClient};
use crate::shutdown;
use dragonfly_api::common::v2::{Build, Cpu, Disk, Host, Memory, Network};
use dragonfly_api::manager::v2::{DeleteSeedPeerRequest, SourceType, UpdateSeedPeerRequest};
use dragonfly_api::scheduler::v2::{AnnounceHostRequest, DeleteHostRequest};
use dragonfly_client_config::{
dfdaemon::{Config, HostType},
@ -24,13 +25,90 @@ use dragonfly_client_config::{
};
use dragonfly_client_core::error::{ErrorType, OrErr};
use dragonfly_client_core::Result;
use dragonfly_client_util::net::Interface;
use std::env;
use std::sync::Arc;
use std::time::Duration;
use sysinfo::System;
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument};
use tracing::{error, info, instrument};
/// ManagerAnnouncer is used to announce the dfdaemon information to the manager.
pub struct ManagerAnnouncer {
/// config is the configuration of the dfdaemon.
config: Arc<Config>,
/// manager_client is the grpc client of the manager.
manager_client: Arc<ManagerClient>,
/// shutdown is used to shutdown the announcer.
shutdown: shutdown::Shutdown,
/// _shutdown_complete is used to notify the announcer is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>,
}
/// ManagerAnnouncer implements the manager announcer of the dfdaemon.
impl ManagerAnnouncer {
/// new creates a new manager announcer.
#[instrument(skip_all)]
pub fn new(
config: Arc<Config>,
manager_client: Arc<ManagerClient>,
shutdown: shutdown::Shutdown,
shutdown_complete_tx: mpsc::UnboundedSender<()>,
) -> Self {
Self {
config,
manager_client,
shutdown,
_shutdown_complete: shutdown_complete_tx,
}
}
/// run announces the dfdaemon information to the manager.
#[instrument(skip_all)]
pub async fn run(&self) -> Result<()> {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
// If the seed peer is enabled, we should announce the seed peer to the manager.
if self.config.seed_peer.enable {
// Register the seed peer to the manager.
self.manager_client
.update_seed_peer(UpdateSeedPeerRequest {
source_type: SourceType::SeedPeerSource.into(),
hostname: self.config.host.hostname.clone(),
r#type: self.config.seed_peer.kind.to_string(),
idc: self.config.host.idc.clone(),
location: self.config.host.location.clone(),
ip: self.config.host.ip.unwrap().to_string(),
port: self.config.upload.server.port as i32,
download_port: self.config.upload.server.port as i32,
seed_peer_cluster_id: self.config.seed_peer.cluster_id,
})
.await?;
// Announce to scheduler shutting down with signals.
shutdown.recv().await;
// Delete the seed peer from the manager.
self.manager_client
.delete_seed_peer(DeleteSeedPeerRequest {
source_type: SourceType::SeedPeerSource.into(),
hostname: self.config.host.hostname.clone(),
ip: self.config.host.ip.unwrap().to_string(),
seed_peer_cluster_id: self.config.seed_peer.cluster_id,
})
.await?;
info!("announce to manager shutting down");
} else {
shutdown.recv().await;
info!("announce to manager shutting down");
}
Ok(())
}
}
/// Announcer is used to announce the dfdaemon information to the manager and scheduler.
pub struct SchedulerAnnouncer {
@ -43,9 +121,6 @@ pub struct SchedulerAnnouncer {
/// scheduler_client is the grpc client of the scheduler.
scheduler_client: Arc<SchedulerClient>,
/// interface is the network interface.
interface: Arc<Interface>,
/// shutdown is used to shutdown the announcer.
shutdown: shutdown::Shutdown,
@ -56,11 +131,11 @@ pub struct SchedulerAnnouncer {
/// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon.
impl SchedulerAnnouncer {
/// new creates a new scheduler announcer.
#[instrument(skip_all)]
pub async fn new(
config: Arc<Config>,
host_id: String,
scheduler_client: Arc<SchedulerClient>,
interface: Arc<Interface>,
shutdown: shutdown::Shutdown,
shutdown_complete_tx: mpsc::UnboundedSender<()>,
) -> Result<Self> {
@ -68,7 +143,6 @@ impl SchedulerAnnouncer {
config,
host_id,
scheduler_client,
interface,
shutdown,
_shutdown_complete: shutdown_complete_tx,
};
@ -76,12 +150,13 @@ impl SchedulerAnnouncer {
// Initialize the scheduler announcer.
announcer
.scheduler_client
.init_announce_host(announcer.make_announce_host_request(Duration::ZERO).await?)
.init_announce_host(announcer.make_announce_host_request()?)
.await?;
Ok(announcer)
}
/// run announces the dfdaemon information to the scheduler.
#[instrument(skip_all)]
pub async fn run(&self) {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
@ -91,7 +166,7 @@ impl SchedulerAnnouncer {
loop {
tokio::select! {
_ = interval.tick() => {
let request = match self.make_announce_host_request(interval.period()).await {
let request = match self.make_announce_host_request() {
Ok(request) => request,
Err(err) => {
error!("make announce host request failed: {}", err);
@ -120,7 +195,7 @@ impl SchedulerAnnouncer {
/// make_announce_host_request makes the announce host request.
#[instrument(skip_all)]
async fn make_announce_host_request(&self, interval: Duration) -> Result<AnnounceHostRequest> {
fn make_announce_host_request(&self) -> Result<AnnounceHostRequest> {
// If the seed peer is enabled, we should announce the seed peer to the scheduler.
let host_type = if self.config.seed_peer.enable {
self.config.seed_peer.kind
@ -128,7 +203,7 @@ impl SchedulerAnnouncer {
HostType::Normal
};
// Refresh the system information.
// Get the system information.
let mut sys = System::new_all();
sys.refresh_all();
@ -156,25 +231,25 @@ impl SchedulerAnnouncer {
free: sys.free_memory(),
};
// Wait for getting the network data.
let network_data = self.interface.get_network_data().await;
debug!(
"network data: rx bandwidth {}/{} bps, tx bandwidth {}/{} bps",
network_data.rx_bandwidth.unwrap_or(0),
network_data.max_rx_bandwidth,
network_data.tx_bandwidth.unwrap_or(0),
network_data.max_tx_bandwidth
);
// Get the network information.
let network = Network {
// TODO: Get the count of the tcp connection.
tcp_connection_count: 0,
// TODO: Get the count of the upload tcp connection.
upload_tcp_connection_count: 0,
idc: self.config.host.idc.clone(),
location: self.config.host.location.clone(),
max_rx_bandwidth: network_data.max_rx_bandwidth,
rx_bandwidth: network_data.rx_bandwidth,
max_tx_bandwidth: network_data.max_tx_bandwidth,
tx_bandwidth: network_data.tx_bandwidth,
..Default::default()
// TODO: Get the network download rate, refer to
// https://docs.rs/sysinfo/latest/sysinfo/struct.NetworkData.html#method.received.
download_rate: 0,
download_rate_limit: self.config.download.rate_limit.as_u64(),
// TODO: Get the network download rate, refer to
// https://docs.rs/sysinfo/latest/sysinfo/struct.NetworkData.html#method.transmitted
upload_rate: 0,
upload_rate_limit: self.config.upload.rate_limit.as_u64(),
};
// Get the disk information.
@ -184,21 +259,11 @@ impl SchedulerAnnouncer {
let used_space = total_space - available_space;
let used_percent = (used_space as f64 / (total_space) as f64) * 100.0;
let mut write_bandwidth = 0;
let mut read_bandwidth = 0;
if interval != Duration::ZERO {
let disk_usage = process.disk_usage();
write_bandwidth = disk_usage.written_bytes / interval.as_secs();
read_bandwidth = disk_usage.read_bytes / interval.as_secs();
};
let disk = Disk {
total: total_space,
free: available_space,
used: used_space,
used_percent,
write_bandwidth,
read_bandwidth,
// TODO: Get the disk inodes information.
inodes_total: 0,

View File

@ -23,17 +23,13 @@ use dragonfly_client_core::{
error::{ErrorType, OrErr},
Error, Result,
};
use dragonfly_client_util::fs::fallocate;
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use local_ip_address::local_ip;
use path_absolutize::*;
use std::path::{Path, PathBuf};
use std::time::Duration;
use std::{cmp::min, fmt::Write};
use termion::{color, style};
use tokio::fs::{self, OpenOptions};
use tokio::io::{AsyncSeekExt, AsyncWriteExt, SeekFrom};
use tracing::{debug, error, info};
use tracing::{error, info};
use super::*;
@ -43,20 +39,6 @@ pub struct ExportCommand {
#[arg(help = "Specify the persistent cache task ID to export")]
id: String,
#[arg(
long = "transfer-from-dfdaemon",
default_value_t = false,
help = "Specify whether to transfer the content of downloading file from dfdaemon's unix domain socket. If it is true, dfcache will call dfdaemon to download the file, and dfdaemon will return the content of downloading file to dfcache via unix domain socket, and dfcache will copy the content to the output path. If it is false, dfdaemon will download the file and hardlink or copy the file to the output path."
)]
transfer_from_dfdaemon: bool,
#[arg(
long = "force-hard-link",
default_value_t = false,
help = "Specify whether the download file must be hard linked to the output path. If hard link is failed, download will be failed. If it is false, dfdaemon will copy the file to the output path if hard link is failed."
)]
force_hard_link: bool,
#[arg(
long = "application",
default_value = "",
@ -85,79 +67,15 @@ pub struct ExportCommand {
help = "Specify the timeout for exporting a file"
)]
timeout: Duration,
#[arg(
long = "digest",
required = false,
help = "Verify the integrity of the downloaded file using the specified digest, support sha256, sha512, crc32. If the digest is not specified, the downloaded file will not be verified. Format: <algorithm>:<digest>, e.g. sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef, crc32:12345678"
)]
digest: Option<String>,
#[arg(
short = 'e',
long = "endpoint",
default_value_os_t = dfdaemon::default_download_unix_socket_path(),
help = "Endpoint of dfdaemon's GRPC server"
)]
endpoint: PathBuf,
#[arg(
short = 'l',
long,
default_value = "info",
help = "Specify the logging level [trace, debug, info, warn, error]"
)]
log_level: Level,
#[arg(
long,
default_value_os_t = dfcache::default_dfcache_log_dir(),
help = "Specify the log directory"
)]
log_dir: PathBuf,
#[arg(
long,
default_value_t = 6,
help = "Specify the max number of log files"
)]
log_max_files: usize,
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
console: bool,
}
/// Implement the execute for ExportCommand.
impl ExportCommand {
/// Executes the export command with comprehensive validation and advanced error handling.
///
/// This function serves as the main entry point for the dfcache export command execution.
/// It handles the complete workflow including argument parsing, validation, logging setup,
/// dfdaemon client connection, and export operation execution. The function provides
/// sophisticated error reporting with colored terminal output, including specialized
/// handling for backend errors with HTTP status codes and headers.
pub async fn execute(&self) -> Result<()> {
// Parse command line arguments.
Args::parse();
// Initialize tracing.
let _guards = init_tracing(
dfcache::NAME,
self.log_dir.clone(),
self.log_level,
self.log_max_files,
None,
None,
None,
None,
None,
false,
self.console,
);
/// execute executes the export command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Validate the command line arguments.
if let Err(err) = self.validate_args() {
println!(
eprintln!(
"{}{}{}Validating Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -165,7 +83,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -173,7 +91,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -182,7 +100,7 @@ impl ExportCommand {
err,
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -195,10 +113,10 @@ impl ExportCommand {
// Get dfdaemon download client.
let dfdaemon_download_client =
match get_dfdaemon_download_client(self.endpoint.to_path_buf()).await {
match get_dfdaemon_download_client(endpoint.to_path_buf()).await {
Ok(client) => client,
Err(err) => {
println!(
eprintln!(
"{}{}{}Connect Dfdaemon Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -206,7 +124,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -214,17 +132,17 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{}, can not connect {}, please check the unix socket {}",
color::Fg(color::Cyan),
style::Italic,
style::Bold,
style::Reset,
err,
self.endpoint.to_string_lossy(),
endpoint.to_string_lossy(),
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -242,7 +160,7 @@ impl ExportCommand {
Error::TonicStatus(status) => {
let details = status.details();
if let Ok(backend_err) = serde_json::from_slice::<Backend>(details) {
println!(
eprintln!(
"{}{}{}Exporting Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -250,7 +168,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -259,7 +177,7 @@ impl ExportCommand {
);
if let Some(status_code) = backend_err.status_code {
println!(
eprintln!(
"{}{}{}Bad Status Code:{} {}",
color::Fg(color::Red),
style::Italic,
@ -269,7 +187,7 @@ impl ExportCommand {
);
}
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -279,7 +197,7 @@ impl ExportCommand {
);
if !backend_err.header.is_empty() {
println!(
eprintln!(
"{}{}{}Header:{}",
color::Fg(color::Cyan),
style::Italic,
@ -287,11 +205,11 @@ impl ExportCommand {
style::Reset
);
for (key, value) in backend_err.header.iter() {
println!(" [{}]: {}", key.as_str(), value.as_str());
eprintln!(" [{}]: {}", key.as_str(), value.as_str());
}
}
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -299,7 +217,7 @@ impl ExportCommand {
style::Reset
);
} else {
println!(
eprintln!(
"{}{}{}Exporting Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -307,7 +225,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
@ -315,7 +233,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Bad Code:{} {}",
color::Fg(color::Red),
style::Italic,
@ -324,7 +242,7 @@ impl ExportCommand {
status.code()
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -334,7 +252,7 @@ impl ExportCommand {
);
if !status.details().is_empty() {
println!(
eprintln!(
"{}{}{}Details:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -344,7 +262,7 @@ impl ExportCommand {
);
}
println!(
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
@ -354,7 +272,7 @@ impl ExportCommand {
}
}
Error::BackendError(err) => {
println!(
eprintln!(
"{}{}{}Exporting Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -362,7 +280,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -370,7 +288,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Red),
style::Italic,
@ -380,7 +298,7 @@ impl ExportCommand {
);
if err.header.is_some() {
println!(
eprintln!(
"{}{}{}Header:{}",
color::Fg(color::Cyan),
style::Italic,
@ -388,11 +306,11 @@ impl ExportCommand {
style::Reset
);
for (key, value) in err.header.unwrap_or_default().iter() {
println!(" [{}]: {}", key.as_str(), value.to_str().unwrap());
eprintln!(" [{}]: {}", key.as_str(), value.to_str().unwrap());
}
}
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -401,7 +319,7 @@ impl ExportCommand {
);
}
err => {
println!(
eprintln!(
"{}{}{}Exporting Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -409,7 +327,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -417,7 +335,7 @@ impl ExportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Red),
style::Italic,
@ -426,7 +344,7 @@ impl ExportCommand {
err
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -442,24 +360,11 @@ impl ExportCommand {
Ok(())
}
/// Executes the export operation to retrieve cached files from the persistent cache system.
///
/// This function handles the core export functionality by downloading a cached file from the
/// dfdaemon persistent cache system. It supports two transfer modes: direct file transfer
/// by dfdaemon (hardlink/copy) or streaming piece content through the client for manual
/// file assembly. The operation provides real-time progress feedback and handles file
/// creation, directory setup, and efficient piece-by-piece writing with sparse file allocation.
/// run runs the export command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
// Dfcache needs to notify dfdaemon to transfer the piece content of downloading file via unix domain socket
// when the `transfer_from_dfdaemon` is true. Otherwise, dfdaemon will download the file and hardlink or
// copy the file to the output path.
let (output_path, need_piece_content) = if self.transfer_from_dfdaemon {
(None, true)
} else {
// Get the absolute path of the output file.
let absolute_path = Path::new(&self.output).absolutize()?;
info!("export file to: {}", absolute_path.to_string_lossy());
(Some(absolute_path.to_string_lossy().to_string()), false)
};
info!("download file to: {}", absolute_path.to_string_lossy());
// Create dfdaemon client.
let response = dfdaemon_download_client
@ -470,50 +375,21 @@ impl ExportCommand {
persistent: false,
tag: Some(self.tag.clone()),
application: Some(self.application.clone()),
output_path,
output_path: absolute_path.to_string_lossy().to_string(),
timeout: Some(
prost_wkt_types::Duration::try_from(self.timeout)
.or_err(ErrorType::ParseError)?,
),
need_piece_content,
force_hard_link: self.force_hard_link,
digest: self.digest.clone(),
remote_ip: Some(local_ip().unwrap().to_string()),
})
.await
.inspect_err(|err| {
.map_err(|err| {
error!("download persistent cache task failed: {}", err);
err
})?;
// If transfer_from_dfdaemon is true, then dfcache needs to create the output file and write the
// piece content to the output file.
let mut f = if self.transfer_from_dfdaemon {
if let Some(parent) = self.output.parent() {
if !parent.exists() {
fs::create_dir_all(parent).await.inspect_err(|err| {
error!("failed to create directory {:?}: {}", parent, err);
})?;
}
}
let f = OpenOptions::new()
.create_new(true)
.write(true)
.mode(dfcache::DEFAULT_OUTPUT_FILE_MODE)
.open(&self.output)
.await
.inspect_err(|err| {
error!("open file {:?} failed: {}", self.output, err);
})?;
Some(f)
} else {
None
};
// Initialize progress bar.
let progress_bar = ProgressBar::new(0);
progress_bar.set_style(
let pb = ProgressBar::new(0);
pb.set_style(
ProgressStyle::with_template(
"[{elapsed_precise}] [{wide_bar}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})",
)
@ -527,62 +403,34 @@ impl ExportCommand {
// Download file.
let mut downloaded = 0;
let mut out_stream = response.into_inner();
while let Some(message) = out_stream.message().await.inspect_err(|err| {
while let Some(message) = out_stream.message().await.map_err(|err| {
error!("get message failed: {}", err);
err
})? {
match message.response {
Some(download_persistent_cache_task_response::Response::DownloadPersistentCacheTaskStartedResponse(
response,
)) => {
if let Some(f) = &f {
fallocate(f, response.content_length)
.await
.inspect_err(|err| {
error!("fallocate {:?} failed: {}", self.output, err);
})?;
}
progress_bar.set_length(response.content_length);
pb.set_length(response.content_length);
}
Some(download_persistent_cache_task_response::Response::DownloadPieceFinishedResponse(
response,
)) => {
let piece = response.piece.ok_or(Error::InvalidParameter)?;
// Dfcache needs to write the piece content to the output file.
if let Some(f) = &mut f {
f.seek(SeekFrom::Start(piece.offset))
.await
.inspect_err(|err| {
error!("seek {:?} failed: {}", self.output, err);
})?;
let content = piece.content.ok_or(Error::InvalidParameter)?;
f.write_all(&content).await.inspect_err(|err| {
error!("write {:?} failed: {}", self.output, err);
})?;
debug!("copy piece {} to {:?} success", piece.number, self.output);
};
downloaded += piece.length;
let position = min(downloaded + piece.length, progress_bar.length().unwrap_or(0));
progress_bar.set_position(position);
let position = min(downloaded + piece.length, pb.length().unwrap_or(0));
pb.set_position(position);
}
None => {}
}
}
progress_bar.finish_with_message("downloaded");
pb.finish_with_message("downloaded");
Ok(())
}
/// Validates command line arguments for the export operation to ensure safe file output.
///
/// This function performs essential validation of the output path to prevent file conflicts
/// and ensure the target location is suitable for export operations. It checks parent
/// directory existence, prevents accidental file overwrites, and validates path accessibility
/// before allowing the export operation to proceed.
/// validate_args validates the command line arguments.
fn validate_args(&self) -> Result<()> {
let absolute_path = Path::new(&self.output).absolutize()?;
match absolute_path.parent() {

View File

@ -14,22 +14,17 @@
* limitations under the License.
*/
use bytesize::ByteSize;
use clap::Parser;
use dragonfly_api::dfdaemon::v2::UploadPersistentCacheTaskRequest;
use dragonfly_client::resource::piece::MIN_PIECE_LENGTH;
use dragonfly_client_config::dfcache::default_dfcache_persistent_replica_count;
use dragonfly_client_core::{
error::{ErrorType, OrErr},
Error, Result,
};
use indicatif::{ProgressBar, ProgressStyle};
use local_ip_address::local_ip;
use path_absolutize::*;
use std::path::{Path, PathBuf};
use std::time::Duration;
use termion::{color, style};
use tracing::info;
use super::*;
@ -42,12 +37,6 @@ pub struct ImportCommand {
#[arg(help = "Specify the path of the file to import")]
path: PathBuf,
#[arg(
long = "content-for-calculating-task-id",
help = "Specify the content used to calculate the persistent cache task ID. If it is set, use its value to calculate the task ID, Otherwise, calculate the persistent cache task ID based on url, piece-length, tag, application, and filtered-query-params."
)]
content_for_calculating_task_id: Option<String>,
#[arg(
long = "persistent-replica-count",
default_value_t = default_dfcache_persistent_replica_count(),
@ -55,17 +44,10 @@ pub struct ImportCommand {
)]
persistent_replica_count: u64,
#[arg(
long = "piece-length",
required = false,
help = "Specify the piece length for downloading file. If the piece length is not specified, the piece length will be calculated according to the file size. Different piece lengths will be divided into different persistent cache tasks. The value needs to be set with human readable format and needs to be greater than or equal to 4mib, for example: 4mib, 1gib"
)]
piece_length: Option<ByteSize>,
#[arg(
long = "application",
required = false,
help = "Different applications for the same url will be divided into different persistent cache tasks"
help = "Caller application which is used for statistics and access control"
)]
application: Option<String>,
@ -91,72 +73,15 @@ pub struct ImportCommand {
help = "Specify the timeout for importing a file"
)]
timeout: Duration,
#[arg(
short = 'e',
long = "endpoint",
default_value_os_t = dfdaemon::default_download_unix_socket_path(),
help = "Endpoint of dfdaemon's GRPC server"
)]
endpoint: PathBuf,
#[arg(
short = 'l',
long,
default_value = "info",
help = "Specify the logging level [trace, debug, info, warn, error]"
)]
log_level: Level,
#[arg(
long,
default_value_os_t = dfcache::default_dfcache_log_dir(),
help = "Specify the log directory"
)]
log_dir: PathBuf,
#[arg(
long,
default_value_t = 6,
help = "Specify the max number of log files"
)]
log_max_files: usize,
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
console: bool,
}
/// Implement the execute for ImportCommand.
impl ImportCommand {
/// Executes the import sub command with comprehensive validation and error handling.
///
/// This function serves as the main entry point for the dfcache import command execution.
/// It handles the complete workflow including argument parsing, validation, logging setup,
/// dfdaemon client connection, and import operation execution. The function provides
/// detailed error reporting with colored terminal output and follows a fail-fast approach
/// with immediate process termination on any critical failures.
pub async fn execute(&self) -> Result<()> {
// Parse command line arguments.
Args::parse();
// Initialize tracing.
let _guards = init_tracing(
dfcache::NAME,
self.log_dir.clone(),
self.log_level,
self.log_max_files,
None,
None,
None,
None,
None,
false,
self.console,
);
/// execute executes the import sub command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Validate the command line arguments.
if let Err(err) = self.validate_args() {
println!(
eprintln!(
"{}{}{}Validating Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -164,7 +89,7 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -172,7 +97,7 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -181,7 +106,7 @@ impl ImportCommand {
err,
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -194,10 +119,10 @@ impl ImportCommand {
// Get dfdaemon download client.
let dfdaemon_download_client =
match get_dfdaemon_download_client(self.endpoint.to_path_buf()).await {
match get_dfdaemon_download_client(endpoint.to_path_buf()).await {
Ok(client) => client,
Err(err) => {
println!(
eprintln!(
"{}{}{}Connect Dfdaemon Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -205,7 +130,7 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -213,17 +138,17 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{}, can not connect {}, please check the unix socket {}",
color::Fg(color::Cyan),
style::Italic,
style::Bold,
style::Reset,
err,
self.endpoint.to_string_lossy(),
endpoint.to_string_lossy(),
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -239,7 +164,7 @@ impl ImportCommand {
if let Err(err) = self.run(dfdaemon_download_client).await {
match err {
Error::TonicStatus(status) => {
println!(
eprintln!(
"{}{}{}Importing Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -247,7 +172,7 @@ impl ImportCommand {
style::Reset,
);
println!(
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
@ -255,7 +180,7 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Bad Code:{} {}",
color::Fg(color::Red),
style::Italic,
@ -264,7 +189,7 @@ impl ImportCommand {
status.code()
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -273,7 +198,7 @@ impl ImportCommand {
status.message()
);
println!(
eprintln!(
"{}{}{}Details:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -282,7 +207,7 @@ impl ImportCommand {
std::str::from_utf8(status.details()).unwrap()
);
println!(
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
@ -291,7 +216,7 @@ impl ImportCommand {
);
}
err => {
println!(
eprintln!(
"{}{}{}Importing Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -299,7 +224,7 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -307,7 +232,7 @@ impl ImportCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Red),
style::Italic,
@ -316,7 +241,7 @@ impl ImportCommand {
err
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -332,34 +257,23 @@ impl ImportCommand {
Ok(())
}
/// Executes the cache import operation by uploading a file to the persistent cache system.
///
/// This function handles the core import functionality by uploading a local file to the
/// dfdaemon persistent cache system. It provides visual feedback through a progress spinner,
/// converts the file path to absolute format, and configures the cache task with specified
/// parameters including TTL, replica count, and piece length. The operation is asynchronous
/// and provides completion feedback with the generated task ID.
/// run runs the import sub command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
let absolute_path = Path::new(&self.path).absolutize()?;
info!("import file: {}", absolute_path.to_string_lossy());
let progress_bar = ProgressBar::new_spinner();
progress_bar.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
progress_bar.set_style(
let pb = ProgressBar::new_spinner();
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
pb.set_style(
ProgressStyle::with_template("{spinner:.blue} {msg}")
.unwrap()
.tick_strings(&["", "", "", "", "", "", "", ""]),
);
progress_bar.set_message("Importing...");
pb.set_message("Importing...");
let persistent_cache_task = dfdaemon_download_client
dfdaemon_download_client
.upload_persistent_cache_task(UploadPersistentCacheTaskRequest {
content_for_calculating_task_id: self.content_for_calculating_task_id.clone(),
path: absolute_path.to_string_lossy().to_string(),
path: self.path.clone().into_os_string().into_string().unwrap(),
persistent_replica_count: self.persistent_replica_count,
tag: self.tag.clone(),
application: self.application.clone(),
piece_length: self.piece_length.map(|piece_length| piece_length.as_u64()),
ttl: Some(
prost_wkt_types::Duration::try_from(self.ttl).or_err(ErrorType::ParseError)?,
),
@ -367,30 +281,15 @@ impl ImportCommand {
prost_wkt_types::Duration::try_from(self.timeout)
.or_err(ErrorType::ParseError)?,
),
remote_ip: Some(local_ip().unwrap().to_string()),
})
.await?;
progress_bar.finish_with_message(format!("Done: {}", persistent_cache_task.id));
pb.finish_with_message("Done");
Ok(())
}
/// Validates command line arguments for the import operation to ensure safe and correct execution.
///
/// This function performs comprehensive validation of import-specific parameters to prevent
/// invalid operations and ensure the import request meets all system requirements. It validates
/// TTL boundaries, file existence and type, and piece length constraints before allowing the
/// import operation to proceed.
/// validate_args validates the command line arguments.
fn validate_args(&self) -> Result<()> {
if self.ttl < Duration::from_secs(5 * 60)
|| self.ttl > Duration::from_secs(7 * 24 * 60 * 60)
{
return Err(Error::ValidationError(format!(
"ttl must be between 5 minutes and 7 days, but got {}",
self.ttl.as_secs()
)));
}
if self.path.is_dir() {
return Err(Error::ValidationError(format!(
"path {} is a directory",
@ -405,16 +304,6 @@ impl ImportCommand {
)));
}
if let Some(piece_length) = self.piece_length {
if piece_length.as_u64() < MIN_PIECE_LENGTH {
return Err(Error::ValidationError(format!(
"piece length {} bytes is less than the minimum piece length {} bytes",
piece_length.as_u64(),
MIN_PIECE_LENGTH
)));
}
}
Ok(())
}
}

View File

@ -21,11 +21,12 @@ use dragonfly_client::tracing::init_tracing;
use dragonfly_client_config::VersionValueParser;
use dragonfly_client_config::{dfcache, dfdaemon};
use dragonfly_client_core::Result;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use tracing::Level;
pub mod export;
pub mod import;
pub mod remove;
pub mod stat;
#[derive(Debug, Parser)]
@ -39,6 +40,43 @@ pub mod stat;
disable_version_flag = true
)]
struct Args {
#[arg(
short = 'e',
long = "endpoint",
default_value_os_t = dfdaemon::default_download_unix_socket_path(),
help = "Endpoint of dfdaemon's GRPC server"
)]
endpoint: PathBuf,
#[arg(
short = 'l',
long,
default_value = "info",
help = "Specify the logging level [trace, debug, info, warn, error]"
)]
log_level: Level,
#[arg(
long,
default_value_os_t = dfcache::default_dfcache_log_dir(),
help = "Specify the log directory"
)]
log_dir: PathBuf,
#[arg(
long,
default_value_t = 6,
help = "Specify the max number of log files"
)]
log_max_files: usize,
#[arg(
long = "verbose",
default_value_t = false,
help = "Specify whether to print log"
)]
verbose: bool,
#[arg(
short = 'V',
long = "version",
@ -82,16 +120,26 @@ pub enum Command {
long_about = "Stat a file in Dragonfly P2P network by task ID. If stat successfully, it will return the file information."
)]
Stat(stat::StatCommand),
#[command(
name = "rm",
author,
version,
about = "Remove a file from Dragonfly P2P network",
long_about = "Remove the P2P cache in Dragonfly P2P network by task ID."
)]
Remove(remove::RemoveCommand),
}
/// Implement the execute for Command.
impl Command {
#[allow(unused)]
pub async fn execute(self) -> Result<()> {
pub async fn execute(self, endpoint: &Path) -> Result<()> {
match self {
Self::Import(cmd) => cmd.execute().await,
Self::Export(cmd) => cmd.execute().await,
Self::Stat(cmd) => cmd.execute().await,
Self::Import(cmd) => cmd.execute(endpoint).await,
Self::Export(cmd) => cmd.execute(endpoint).await,
Self::Stat(cmd) => cmd.execute(endpoint).await,
Self::Remove(cmd) => cmd.execute(endpoint).await,
}
}
}
@ -101,17 +149,24 @@ async fn main() -> anyhow::Result<()> {
// Parse command line arguments.
let args = Args::parse();
// Initialize tracing.
let _guards = init_tracing(
dfcache::NAME,
args.log_dir,
args.log_level,
args.log_max_files,
None,
false,
false,
args.verbose,
);
// Execute the command.
args.command.execute().await?;
args.command.execute(&args.endpoint).await?;
Ok(())
}
/// Creates and validates a dfdaemon download client with health checking.
///
/// This function establishes a connection to the dfdaemon service via Unix domain socket
/// and performs a health check to ensure the service is running and ready to handle
/// download requests. Only after successful health verification does it return the
/// download client for actual use.
/// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
pub async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
// Check dfdaemon's health.
let health_client = HealthClient::new_unix(endpoint.clone()).await?;

View File

@ -0,0 +1,201 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use clap::Parser;
use dragonfly_api::dfdaemon::v2::DeletePersistentCacheTaskRequest;
use dragonfly_client_core::{Error, Result};
use indicatif::{ProgressBar, ProgressStyle};
use std::path::Path;
use std::time::Duration;
use termion::{color, style};
use super::*;
/// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80);
/// RemoveCommand is the subcommand of remove.
#[derive(Debug, Clone, Parser)]
pub struct RemoveCommand {
#[arg(help = "Specify the persistent cache task ID to remove")]
id: String,
}
/// Implement the execute for RemoveCommand.
impl RemoveCommand {
/// execute executes the delete command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Get dfdaemon download client.
let dfdaemon_download_client =
match get_dfdaemon_download_client(endpoint.to_path_buf()).await {
Ok(client) => client,
Err(err) => {
eprintln!(
"{}{}{}Connect Dfdaemon Failed!{}",
color::Fg(color::Red),
style::Italic,
style::Bold,
style::Reset
);
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
style::Bold,
style::Reset
);
eprintln!(
"{}{}{}Message:{}, can not connect {}, please check the unix socket {}",
color::Fg(color::Cyan),
style::Italic,
style::Bold,
style::Reset,
err,
endpoint.to_string_lossy(),
);
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
style::Bold,
style::Reset
);
std::process::exit(1);
}
};
// Run delete sub command.
if let Err(err) = self.run(dfdaemon_download_client).await {
match err {
Error::TonicStatus(status) => {
eprintln!(
"{}{}{}Removing Failed!{}",
color::Fg(color::Red),
style::Italic,
style::Bold,
style::Reset,
);
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
style::Bold,
style::Reset
);
eprintln!(
"{}{}{}Bad Code:{} {}",
color::Fg(color::Red),
style::Italic,
style::Bold,
style::Reset,
status.code()
);
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
style::Bold,
style::Reset,
status.message()
);
eprintln!(
"{}{}{}Details:{} {}",
color::Fg(color::Cyan),
style::Italic,
style::Bold,
style::Reset,
std::str::from_utf8(status.details()).unwrap()
);
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
style::Bold,
style::Reset
);
}
err => {
eprintln!(
"{}{}{}Removing Failed!{}",
color::Fg(color::Red),
style::Italic,
style::Bold,
style::Reset
);
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
style::Bold,
style::Reset
);
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Red),
style::Italic,
style::Bold,
style::Reset,
err
);
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
style::Bold,
style::Reset
);
}
}
std::process::exit(1);
}
Ok(())
}
/// run runs the delete command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
let pb = ProgressBar::new_spinner();
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
pb.set_style(
ProgressStyle::with_template("{spinner:.blue} {msg}")
.unwrap()
.tick_strings(&["", "", "", "", "", "", "", ""]),
);
pb.set_message("Removing...");
dfdaemon_download_client
.delete_persistent_cache_task(DeletePersistentCacheTaskRequest {
task_id: self.id.clone(),
})
.await?;
pb.finish_with_message("Done");
Ok(())
}
}

View File

@ -22,7 +22,7 @@ use dragonfly_client_core::{
Error, Result,
};
use humantime::format_duration;
use local_ip_address::local_ip;
use std::path::Path;
use std::time::Duration;
use tabled::{
settings::{object::Rows, Alignment, Modify, Style},
@ -37,75 +37,18 @@ use super::*;
pub struct StatCommand {
#[arg(help = "Specify the persistent cache task ID to stat")]
id: String,
#[arg(
short = 'e',
long = "endpoint",
default_value_os_t = dfdaemon::default_download_unix_socket_path(),
help = "Endpoint of dfdaemon's GRPC server"
)]
endpoint: PathBuf,
#[arg(
short = 'l',
long,
default_value = "info",
help = "Specify the logging level [trace, debug, info, warn, error]"
)]
log_level: Level,
#[arg(
long,
default_value_os_t = dfcache::default_dfcache_log_dir(),
help = "Specify the log directory"
)]
log_dir: PathBuf,
#[arg(
long,
default_value_t = 6,
help = "Specify the max number of log files"
)]
log_max_files: usize,
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
console: bool,
}
/// Implement the execute for StatCommand.
impl StatCommand {
/// Executes the stat command with comprehensive error handling and user feedback.
///
/// This function serves as the main entry point for the dfcache stat command execution.
/// It handles the complete lifecycle including argument parsing, logging initialization,
/// dfdaemon client setup, and command execution with detailed error reporting. The
/// function provides colored terminal output for better user experience and exits
/// with appropriate status codes on failure.
pub async fn execute(&self) -> Result<()> {
// Parse command line arguments.
Args::parse();
// Initialize tracing.
let _guards = init_tracing(
dfcache::NAME,
self.log_dir.clone(),
self.log_level,
self.log_max_files,
None,
None,
None,
None,
None,
false,
self.console,
);
/// execute executes the stat command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Get dfdaemon download client.
let dfdaemon_download_client =
match get_dfdaemon_download_client(self.endpoint.to_path_buf()).await {
match get_dfdaemon_download_client(endpoint.to_path_buf()).await {
Ok(client) => client,
Err(err) => {
println!(
eprintln!(
"{}{}{}Connect Dfdaemon Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -113,7 +56,7 @@ impl StatCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -121,17 +64,17 @@ impl StatCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{}, can not connect {}, please check the unix socket {}",
color::Fg(color::Cyan),
style::Italic,
style::Bold,
style::Reset,
err,
self.endpoint.to_string_lossy(),
endpoint.to_string_lossy(),
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -147,7 +90,7 @@ impl StatCommand {
if let Err(err) = self.run(dfdaemon_download_client).await {
match err {
Error::TonicStatus(status) => {
println!(
eprintln!(
"{}{}{}Stating Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -155,7 +98,7 @@ impl StatCommand {
style::Reset,
);
println!(
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
@ -163,7 +106,7 @@ impl StatCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Bad Code:{} {}",
color::Fg(color::Red),
style::Italic,
@ -172,7 +115,7 @@ impl StatCommand {
status.code()
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -181,7 +124,7 @@ impl StatCommand {
status.message()
);
println!(
eprintln!(
"{}{}{}Details:{} {}",
color::Fg(color::Cyan),
style::Italic,
@ -190,7 +133,7 @@ impl StatCommand {
std::str::from_utf8(status.details()).unwrap()
);
println!(
eprintln!(
"{}{}{}*********************************{}",
color::Fg(color::Black),
style::Italic,
@ -199,7 +142,7 @@ impl StatCommand {
);
}
err => {
println!(
eprintln!(
"{}{}{}Stating Failed!{}",
color::Fg(color::Red),
style::Italic,
@ -207,7 +150,7 @@ impl StatCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -215,7 +158,7 @@ impl StatCommand {
style::Reset
);
println!(
eprintln!(
"{}{}{}Message:{} {}",
color::Fg(color::Red),
style::Italic,
@ -224,7 +167,7 @@ impl StatCommand {
err
);
println!(
eprintln!(
"{}{}{}****************************************{}",
color::Fg(color::Black),
style::Italic,
@ -240,17 +183,11 @@ impl StatCommand {
Ok(())
}
/// Executes the stat command to retrieve and display persistent cache task information.
///
/// This function queries the dfdaemon service for detailed information about a specific
/// persistent cache task and presents it in a formatted table for user consumption.
/// It handles data conversion from raw protocol buffer values to human-readable formats
/// including byte sizes, durations, and timestamps with proper timezone conversion.
/// run runs the stat command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
let task = dfdaemon_download_client
.stat_persistent_cache_task(StatPersistentCacheTaskRequest {
task_id: self.id.clone(),
remote_ip: Some(local_ip().unwrap().to_string()),
})
.await?;

View File

@ -15,7 +15,7 @@
*/
use clap::Parser;
use dragonfly_client::announcer::SchedulerAnnouncer;
use dragonfly_client::announcer::{ManagerAnnouncer, SchedulerAnnouncer};
use dragonfly_client::dynconfig::Dynconfig;
use dragonfly_client::gc::GC;
use dragonfly_client::grpc::{
@ -30,15 +30,14 @@ use dragonfly_client::shutdown;
use dragonfly_client::stats::Stats;
use dragonfly_client::tracing::init_tracing;
use dragonfly_client_backend::BackendFactory;
use dragonfly_client_config::{dfdaemon, VersionValueParser};
use dragonfly_client_config::dfdaemon;
use dragonfly_client_config::VersionValueParser;
use dragonfly_client_storage::Storage;
use dragonfly_client_util::{id_generator::IDGenerator, net::Interface};
use dragonfly_client_util::id_generator::IDGenerator;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use termion::{color, style};
use tokio::sync::mpsc;
use tokio::sync::Barrier;
use tracing::{error, info, Level};
#[cfg(not(target_env = "msvc"))]
@ -91,8 +90,12 @@ struct Args {
)]
log_max_files: usize,
#[arg(long, default_value_t = true, help = "Specify whether to print log")]
console: bool,
#[arg(
long = "verbose",
default_value_t = false,
help = "Specify whether to print log"
)]
verbose: bool,
#[arg(
short = 'V',
@ -111,32 +114,7 @@ async fn main() -> Result<(), anyhow::Error> {
let args = Args::parse();
// Load config.
let config = match dfdaemon::Config::load(&args.config).await {
Ok(config) => config,
Err(err) => {
println!(
"{}{}Load config {} error: {}{}\n",
color::Fg(color::Red),
style::Bold,
args.config.display(),
err,
style::Reset
);
println!(
"{}{}If the file does not exist, you need to new a default config file refer to: {}{}{}{}https://d7y.io/docs/next/reference/configuration/client/dfdaemon/{}",
color::Fg(color::Yellow),
style::Bold,
style::Reset,
color::Fg(color::Cyan),
style::Underline,
style::Italic,
style::Reset,
);
std::process::exit(1);
}
};
let config = dfdaemon::Config::load(&args.config).await?;
let config = Arc::new(config);
// Initialize tracing.
@ -145,20 +123,18 @@ async fn main() -> Result<(), anyhow::Error> {
args.log_dir.clone(),
args.log_level,
args.log_max_files,
config.tracing.protocol.clone(),
config.tracing.endpoint.clone(),
config.tracing.path.clone(),
Some(config.tracing.headers.clone()),
Some(config.host.clone()),
config.seed_peer.enable,
args.console,
config.tracing.addr.to_owned(),
config.tracing.flamegraph,
true,
args.verbose,
);
// Initialize storage.
let storage = Storage::new(config.clone(), config.storage.dir.as_path(), args.log_dir)
.await
.inspect_err(|err| {
.map_err(|err| {
error!("initialize storage failed: {}", err);
err
})?;
let storage = Arc::new(storage);
@ -173,8 +149,9 @@ async fn main() -> Result<(), anyhow::Error> {
// Initialize manager client.
let manager_client = ManagerClient::new(config.clone(), config.manager.addr.clone())
.await
.inspect_err(|err| {
.map_err(|err| {
error!("initialize manager client failed: {}", err);
err
})?;
let manager_client = Arc::new(manager_client);
@ -190,22 +167,25 @@ async fn main() -> Result<(), anyhow::Error> {
shutdown_complete_tx.clone(),
)
.await
.inspect_err(|err| {
.map_err(|err| {
error!("initialize dynconfig server failed: {}", err);
err
})?;
let dynconfig = Arc::new(dynconfig);
// Initialize scheduler client.
let scheduler_client = SchedulerClient::new(config.clone(), dynconfig.clone())
.await
.inspect_err(|err| {
.map_err(|err| {
error!("initialize scheduler client failed: {}", err);
err
})?;
let scheduler_client = Arc::new(scheduler_client);
let backend_factory = BackendFactory::new(Some(config.server.plugin_dir.as_path()))
.inspect_err(|err| {
let backend_factory =
BackendFactory::new(Some(config.server.plugin_dir.as_path())).map_err(|err| {
error!("initialize backend factory failed: {}", err);
err
})?;
let backend_factory = Arc::new(backend_factory);
@ -216,7 +196,7 @@ async fn main() -> Result<(), anyhow::Error> {
storage.clone(),
scheduler_client.clone(),
backend_factory.clone(),
)?;
);
let task = Arc::new(task);
// Initialize persistent cache task manager.
@ -226,12 +206,9 @@ async fn main() -> Result<(), anyhow::Error> {
storage.clone(),
scheduler_client.clone(),
backend_factory.clone(),
)?;
);
let persistent_cache_task = Arc::new(persistent_cache_task);
let interface = Interface::new(config.host.ip.unwrap(), config.upload.rate_limit);
let interface = Arc::new(interface);
// Initialize health server.
let health = Health::new(
SocketAddr::new(config.health.server.ip.unwrap(), config.health.server.port),
@ -261,18 +238,26 @@ async fn main() -> Result<(), anyhow::Error> {
shutdown_complete_tx.clone(),
);
// Initialize manager announcer.
let manager_announcer = ManagerAnnouncer::new(
config.clone(),
manager_client.clone(),
shutdown.clone(),
shutdown_complete_tx.clone(),
);
// Initialize scheduler announcer.
let scheduler_announcer = SchedulerAnnouncer::new(
config.clone(),
id_generator.host_id(),
scheduler_client.clone(),
interface.clone(),
shutdown.clone(),
shutdown_complete_tx.clone(),
)
.await
.inspect_err(|err| {
.map_err(|err| {
error!("initialize scheduler announcer failed: {}", err);
err
})?;
// Initialize upload grpc server.
@ -281,14 +266,12 @@ async fn main() -> Result<(), anyhow::Error> {
SocketAddr::new(config.upload.server.ip.unwrap(), config.upload.server.port),
task.clone(),
persistent_cache_task.clone(),
interface.clone(),
shutdown.clone(),
shutdown_complete_tx.clone(),
);
// Initialize download grpc server.
let mut dfdaemon_download_grpc = DfdaemonDownloadServer::new(
config.clone(),
config.download.server.socket_path.clone(),
task.clone(),
persistent_cache_task.clone(),
@ -309,9 +292,6 @@ async fn main() -> Result<(), anyhow::Error> {
// Log dfdaemon started pid.
info!("dfdaemon started at pid {}", std::process::id());
// grpc server started barrier.
let grpc_server_started_barrier = Arc::new(Barrier::new(3));
// Wait for servers to exit or shutdown signal.
tokio::select! {
_ = tokio::spawn(async move { dynconfig.run().await }) => {
@ -330,41 +310,30 @@ async fn main() -> Result<(), anyhow::Error> {
info!("stats server exited");
},
_ = tokio::spawn(async move { manager_announcer.run().await.unwrap_or_else(|err| error!("announcer manager failed: {}", err))} ) => {
info!("announcer manager exited");
},
_ = tokio::spawn(async move { scheduler_announcer.run().await }) => {
info!("announcer scheduler exited");
},
_ = tokio::spawn(async move { gc.run().await }) => {
info!("garbage collector exited");
},
_ = {
let barrier = grpc_server_started_barrier.clone();
tokio::spawn(async move {
dfdaemon_upload_grpc.run(barrier).await.unwrap_or_else(|err| error!("dfdaemon upload grpc server failed: {}", err));
})
} => {
_ = tokio::spawn(async move { dfdaemon_upload_grpc.run().await.unwrap_or_else(|err| error!("dfdaemon upload grpc server failed: {}", err)) }) => {
info!("dfdaemon upload grpc server exited");
},
_ = {
let barrier = grpc_server_started_barrier.clone();
tokio::spawn(async move {
dfdaemon_download_grpc.run(barrier).await.unwrap_or_else(|err| error!("dfdaemon download grpc server failed: {}", err));
})
} => {
_ = tokio::spawn(async move { dfdaemon_download_grpc.run().await.unwrap_or_else(|err| error!("dfdaemon download grpc server failed: {}", err)) }) => {
info!("dfdaemon download grpc unix server exited");
},
_ = {
let barrier = grpc_server_started_barrier.clone();
tokio::spawn(async move {
proxy.run(barrier).await.unwrap_or_else(|err| error!("proxy server failed: {}", err));
})
} => {
_ = tokio::spawn(async move { proxy.run().await.unwrap_or_else(|err| error!("proxy server failed: {}", err)) }) => {
info!("proxy server exited");
},
_ = tokio::spawn(async move { gc.run().await }) => {
info!("garbage collector exited");
},
_ = shutdown::shutdown_signal() => {},
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,133 @@
/*
* Copyright 2023 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use clap::{Parser, Subcommand};
use dragonfly_client::tracing::init_tracing;
use dragonfly_client_config::VersionValueParser;
use dragonfly_client_config::{dfdaemon, dfstore};
use std::path::PathBuf;
use tracing::Level;
#[derive(Debug, Parser)]
#[command(
name = dfstore::NAME,
author,
version,
about = "dfstore is a storage command line based on P2P technology in Dragonfly.",
long_about = "A storage command line based on P2P technology in Dragonfly that can rely on different types of object storage, \
such as S3 or OSS, to provide stable object storage capabilities. It uses the entire P2P network as a cache when storing objects. \
Rely on S3 or OSS as the backend to ensure storage reliability. In the process of object storage, \
P2P cache is effectively used for fast read and write storage.",
disable_version_flag = true
)]
struct Args {
#[arg(
short = 'e',
long = "endpoint",
default_value_os_t = dfdaemon::default_download_unix_socket_path(),
help = "Endpoint of dfdaemon's GRPC server"
)]
endpoint: PathBuf,
#[arg(
short = 'l',
long,
default_value = "info",
help = "Specify the logging level [trace, debug, info, warn, error]"
)]
log_level: Level,
#[arg(
long,
default_value_os_t = dfstore::default_dfstore_log_dir(),
help = "Specify the log directory"
)]
log_dir: PathBuf,
#[arg(
long,
default_value_t = 6,
help = "Specify the max number of log files"
)]
log_max_files: usize,
#[arg(
long = "verbose",
default_value_t = false,
help = "Specify whether to print log"
)]
verbose: bool,
#[arg(
short = 'V',
long = "version",
help = "Print version information",
default_value_t = false,
action = clap::ArgAction::SetTrue,
value_parser = VersionValueParser
)]
version: bool,
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Clone, Subcommand)]
#[command()]
pub enum Command {
#[command(
name = "cp",
author,
version,
about = "Download or upload files using object storage in Dragonfly",
long_about = "Download a file from object storage in Dragonfly or upload a local file to object storage in Dragonfly"
)]
Copy(CopyCommand),
#[command(
name = "rm",
author,
version,
about = "Remove a file from Dragonfly object storage",
long_about = "Remove the P2P cache in Dragonfly and remove the file stored in the object storage."
)]
Remove(RemoveCommand),
}
/// Download or upload files using object storage in Dragonfly.
#[derive(Debug, Clone, Parser)]
pub struct CopyCommand {}
/// Remove a file from Dragonfly object storage.
#[derive(Debug, Clone, Parser)]
pub struct RemoveCommand {}
fn main() {
// Parse command line arguments.
let args = Args::parse();
// Initialize tracing.
let _guards = init_tracing(
dfstore::NAME,
args.log_dir,
args.log_level,
args.log_max_files,
None,
false,
false,
args.verbose,
);
}

View File

@ -25,7 +25,7 @@ use dragonfly_client_core::{Error, Result};
use std::sync::Arc;
use tokio::sync::{mpsc, Mutex, RwLock};
use tonic_health::pb::health_check_response::ServingStatus;
use tracing::{debug, error, info, instrument};
use tracing::{error, info, instrument};
use url::Url;
/// Data is the dynamic configuration of the dfdaemon.
@ -65,6 +65,7 @@ pub struct Dynconfig {
/// Dynconfig is the implementation of Dynconfig.
impl Dynconfig {
/// new creates a new Dynconfig.
#[instrument(skip_all)]
pub async fn new(
config: Arc<Config>,
manager_client: Arc<ManagerClient>,
@ -87,6 +88,7 @@ impl Dynconfig {
}
/// run starts the dynconfig server.
#[instrument(skip_all)]
pub async fn run(&self) {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
@ -96,10 +98,9 @@ impl Dynconfig {
loop {
tokio::select! {
_ = interval.tick() => {
match self.refresh().await {
Err(err) => error!("refresh dynconfig failed: {}", err),
Ok(_) => debug!("refresh dynconfig success"),
}
if let Err(err) = self.refresh().await {
error!("refresh dynconfig failed: {}", err);
};
}
_ = shutdown.recv() => {
// Dynconfig server shutting down with signals.
@ -162,7 +163,6 @@ impl Dynconfig {
location: self.config.host.location.clone(),
version: CARGO_PKG_VERSION.to_string(),
commit: GIT_COMMIT_SHORT_HASH.to_string(),
scheduler_cluster_id: self.config.host.scheduler_cluster_id.unwrap_or(0),
})
.await
}

View File

@ -17,10 +17,13 @@
use crate::grpc::scheduler::SchedulerClient;
use crate::shutdown;
use chrono::Utc;
use dragonfly_api::scheduler::v2::DeleteTaskRequest;
use dragonfly_api::scheduler::v2::{DeletePersistentCacheTaskRequest, DeleteTaskRequest};
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::Result;
use dragonfly_client_storage::{metadata, Storage};
use dragonfly_client_storage::{
content::{DEFAULT_CONTENT_DIR, DEFAULT_PERSISTENT_CACHE_TASK_DIR, DEFAULT_TASK_DIR},
metadata, Storage,
};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
@ -53,6 +56,7 @@ pub struct GC {
impl GC {
/// new creates a new GC.
#[instrument(skip_all)]
pub fn new(
config: Arc<Config>,
host_id: String,
@ -72,6 +76,7 @@ impl GC {
}
/// run runs the garbage collector.
#[instrument(skip_all)]
pub async fn run(&self) {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
@ -125,15 +130,22 @@ impl GC {
}
}
info!("evict by task ttl done");
Ok(())
}
/// evict_task_by_disk_usage evicts the task by disk usage.
#[instrument(skip_all)]
async fn evict_task_by_disk_usage(&self) -> Result<()> {
let available_space = self.storage.available_space()?;
let total_space = self.storage.total_space()?;
let stats = fs2::statvfs(
self.config
.storage
.dir
.join(DEFAULT_CONTENT_DIR)
.join(DEFAULT_TASK_DIR)
.as_path(),
)?;
let available_space = stats.available_space();
let total_space = stats.total_space();
// Calculate the usage percent.
let usage_percent = (100 - available_space * 100 / total_space) as u8;
@ -152,8 +164,6 @@ impl GC {
if let Err(err) = self.evict_task_space(need_evict_space as u64).await {
info!("failed to evict task by disk usage: {}", err);
}
info!("evict task by disk usage done");
}
Ok(())
@ -172,22 +182,13 @@ impl GC {
break;
}
// If the task has downloaded finished, task has the content length, evicted space is the
// content length. If the task has started and did not download the data, and content
// length is 0, evicted space is 0.
// If the task has no content length, skip it.
let task_space = match task.content_length() {
Some(content_length) => content_length,
None => {
// If the task has no content length, skip it.
if !task.is_failed() {
error!("task {} has no content length", task.id);
continue;
}
// If the task has started and did not download the data, and content length is 0.
info!("task {} is failed, has no content length", task.id);
0
}
};
// If the task is started and not finished, and the task download is not timeout,
@ -233,24 +234,35 @@ impl GC {
/// evict_persistent_cache_task_by_ttl evicts the persistent cache task by ttl.
#[instrument(skip_all)]
async fn evict_persistent_cache_task_by_ttl(&self) -> Result<()> {
info!("start to evict by persistent cache task ttl");
info!("start to evict by persistent cache task ttl * 2");
for task in self.storage.get_persistent_cache_tasks()? {
// If the persistent cache task is expired and not uploading, evict the persistent cache task.
if task.is_expired() {
self.storage.delete_persistent_cache_task(&task.id).await;
info!("evict persistent cache task {}", task.id);
self.delete_persistent_cache_task_from_scheduler(task.clone())
.await;
info!("delete persistent cache task {} from scheduler", task.id);
}
}
info!("evict by persistent cache task ttl done");
Ok(())
}
/// evict_persistent_cache_task_by_disk_usage evicts the persistent cache task by disk usage.
#[instrument(skip_all)]
async fn evict_persistent_cache_task_by_disk_usage(&self) -> Result<()> {
let available_space = self.storage.available_space()?;
let total_space = self.storage.total_space()?;
let stats = fs2::statvfs(
self.config
.storage
.dir
.join(DEFAULT_CONTENT_DIR)
.join(DEFAULT_PERSISTENT_CACHE_TASK_DIR)
.as_path(),
)?;
let available_space = stats.available_space();
let total_space = stats.total_space();
// Calculate the usage percent.
let usage_percent = (100 - available_space * 100 / total_space) as u8;
@ -272,8 +284,6 @@ impl GC {
{
info!("failed to evict task by disk usage: {}", err);
}
info!("evict persistent cache task by disk usage done");
}
Ok(())
@ -312,7 +322,7 @@ impl GC {
}
// Evict the task.
self.storage.delete_persistent_cache_task(&task.id).await;
self.storage.delete_task(&task.id).await;
// Update the evicted space.
let task_space = task.content_length();
@ -321,9 +331,33 @@ impl GC {
"evict persistent cache task {} size {}",
task.id, task_space
);
self.delete_persistent_cache_task_from_scheduler(task.clone())
.await;
info!("delete persistent cache task {} from scheduler", task.id);
}
info!("evict total size {}", evicted_space);
Ok(())
}
/// delete_persistent_cache_task_from_scheduler deletes the persistent cache task from the scheduler.
#[instrument(skip_all)]
async fn delete_persistent_cache_task_from_scheduler(
&self,
task: metadata::PersistentCacheTask,
) {
self.scheduler_client
.delete_persistent_cache_task(DeletePersistentCacheTaskRequest {
host_id: self.host_id.clone(),
task_id: task.id.clone(),
})
.await
.unwrap_or_else(|err| {
error!(
"failed to delete persistent cache peer {}: {}",
task.id, err
);
});
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,6 @@ use dragonfly_client_core::{
use hyper_util::rt::TokioIo;
use std::path::PathBuf;
use tokio::net::UnixStream;
use tonic::service::interceptor::InterceptedService;
use tonic::transport::ClientTlsConfig;
use tonic::transport::{Channel, Endpoint, Uri};
use tonic_health::pb::{
@ -30,18 +29,17 @@ use tonic_health::pb::{
use tower::service_fn;
use tracing::{error, instrument};
use super::interceptor::InjectTracingInterceptor;
/// HealthClient is a wrapper of HealthGRPCClient.
#[derive(Clone)]
pub struct HealthClient {
/// client is the grpc client of the certificate.
client: HealthGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>,
client: HealthGRPCClient<Channel>,
}
/// HealthClient implements the grpc client of the health.
impl HealthClient {
/// new creates a new HealthClient.
#[instrument(skip_all)]
pub async fn new(addr: &str, client_tls_config: Option<ClientTlsConfig>) -> Result<Self> {
let channel = match client_tls_config {
Some(client_tls_config) => Channel::from_shared(addr.to_string())
@ -54,8 +52,9 @@ impl HealthClient {
.keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr, err);
err
})
.or_err(ErrorType::ConnectError)?,
None => Channel::from_shared(addr.to_string())
@ -67,19 +66,21 @@ impl HealthClient {
.keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr, err);
err
})
.or_err(ErrorType::ConnectError)?,
};
let client = HealthGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
let client = HealthGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX);
Ok(Self { client })
}
/// new_unix creates a new HealthClient with unix domain socket.
#[instrument(skip_all)]
pub async fn new_unix(socket_path: PathBuf) -> Result<Self> {
// Ignore the uri because it is not used.
let channel = Endpoint::try_from("http://[::]:50051")
@ -93,12 +94,12 @@ impl HealthClient {
}
}))
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect failed: {}", err);
err
})
.or_err(ErrorType::ConnectError)?;
let client = HealthGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
let client = HealthGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX);
Ok(Self { client })
@ -137,6 +138,7 @@ impl HealthClient {
}
/// make_request creates a new request with timeout.
#[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request);
request.set_timeout(super::REQUEST_TIMEOUT);

View File

@ -1,86 +0,0 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use tonic::{metadata, service::Interceptor, Request, Status};
use tracing_opentelemetry::OpenTelemetrySpanExt;
/// MetadataMap is a tracing meda data map container for span context.
struct MetadataMap<'a>(&'a mut metadata::MetadataMap);
/// MetadataMap implements the otel tracing Extractor.
impl opentelemetry::propagation::Extractor for MetadataMap<'_> {
/// Get a value for a key from the `MetadataMap`. If the value can't be converted to &str, returns None
fn get(&self, key: &str) -> Option<&str> {
self.0.get(key).and_then(|metadata| metadata.to_str().ok())
}
/// Collect all the keys from the `MetadataMap`.
fn keys(&self) -> Vec<&str> {
self.0
.keys()
.map(|key| match key {
tonic::metadata::KeyRef::Ascii(v) => v.as_str(),
tonic::metadata::KeyRef::Binary(v) => v.as_str(),
})
.collect::<Vec<_>>()
}
}
/// MetadataMap implements the otel tracing Injector.
impl opentelemetry::propagation::Injector for MetadataMap<'_> {
/// set a key-value pair to the injector.
fn set(&mut self, key: &str, value: String) {
if let Ok(key) = metadata::MetadataKey::from_bytes(key.as_bytes()) {
if let Ok(val) = metadata::MetadataValue::try_from(&value) {
self.0.insert(key, val);
}
}
}
}
/// InjectTracingInterceptor is a auto-inject tracing gRPC interceptor.
#[derive(Clone)]
pub struct InjectTracingInterceptor;
/// InjectTracingInterceptor implements the tonic Interceptor interface.
impl Interceptor for InjectTracingInterceptor {
/// call and inject tracing context into lgobal propagator.
fn call(&mut self, mut request: Request<()>) -> std::result::Result<Request<()>, Status> {
let context = tracing::Span::current().context();
opentelemetry::global::get_text_map_propagator(|prop| {
prop.inject_context(&context, &mut MetadataMap(request.metadata_mut()));
});
Ok(request)
}
}
/// ExtractTracingInterceptor is a auto-extract tracing gRPC interceptor.
#[derive(Clone)]
pub struct ExtractTracingInterceptor;
/// ExtractTracingInterceptor implements the tonic Interceptor interface.
impl Interceptor for ExtractTracingInterceptor {
/// call and inject tracing context into lgobal propagator.
fn call(&mut self, mut request: Request<()>) -> std::result::Result<Request<()>, Status> {
let parent_cx = opentelemetry::global::get_text_map_propagator(|prop| {
prop.extract(&MetadataMap(request.metadata_mut()))
});
request.extensions_mut().insert(parent_cx);
Ok(request)
}
}

View File

@ -25,23 +25,22 @@ use dragonfly_client_core::{
Error, Result,
};
use std::sync::Arc;
use tonic::{service::interceptor::InterceptedService, transport::Channel};
use tonic::transport::Channel;
use tonic_health::pb::health_check_response::ServingStatus;
use tracing::{error, instrument};
use tracing::{error, instrument, warn};
use url::Url;
use super::interceptor::InjectTracingInterceptor;
/// ManagerClient is a wrapper of ManagerGRPCClient.
#[derive(Clone)]
pub struct ManagerClient {
/// client is the grpc client of the manager.
pub client: ManagerGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>,
pub client: ManagerGRPCClient<Channel>,
}
/// ManagerClient implements the grpc client of the manager.
impl ManagerClient {
/// new creates a new ManagerClient.
#[instrument(skip_all)]
pub async fn new(config: Arc<Config>, addr: String) -> Result<Self> {
let domain_name = Url::parse(addr.as_str())?
.host_str()
@ -78,8 +77,9 @@ impl ManagerClient {
.keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?,
None => Channel::from_shared(addr.clone())
@ -92,13 +92,14 @@ impl ManagerClient {
.keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?,
};
let client = ManagerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
let client = ManagerGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX);
Ok(Self { client })
@ -132,6 +133,7 @@ impl ManagerClient {
}
/// make_request creates a new request with timeout.
#[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request);
request.set_timeout(super::REQUEST_TIMEOUT);

View File

@ -27,19 +27,14 @@ use tracing::{error, info, instrument, Instrument};
pub mod dfdaemon_download;
pub mod dfdaemon_upload;
pub mod health;
pub mod interceptor;
pub mod manager;
pub mod scheduler;
/// CONNECT_TIMEOUT is the timeout for GRPC connection.
pub const CONNECT_TIMEOUT: Duration = Duration::from_secs(2);
/// REQUEST_TIMEOUT is the timeout for GRPC requests, default is 10 second.
/// Note: This timeout is used for the whole request, including wait for scheduler
/// scheduling, refer to https://d7y.io/docs/next/reference/configuration/scheduler/.
/// Scheduler'configure `scheduler.retryInterval`, `scheduler.retryBackToSourceLimit` and `scheduler.retryLimit`
/// is used for the scheduler to schedule the task.
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(15);
/// REQUEST_TIMEOUT is the timeout for GRPC requests.
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
/// TCP_KEEPALIVE is the keepalive duration for TCP connection.
pub const TCP_KEEPALIVE: Duration = Duration::from_secs(3600);
@ -50,11 +45,11 @@ pub const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300);
/// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive.
pub const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20);
/// MAX_FRAME_SIZE is the max frame size for GRPC, default is 4MB.
pub const MAX_FRAME_SIZE: u32 = 4 * 1024 * 1024;
/// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB.
pub const MAX_FRAME_SIZE: u32 = 12 * 1024 * 1024;
/// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 512KB.
pub const INITIAL_WINDOW_SIZE: u32 = 512 * 1024;
/// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB.
pub const INITIAL_WINDOW_SIZE: u32 = 12 * 1024 * 1024;
/// BUFFER_SIZE is the buffer size for GRPC, default is 64KB.
pub const BUFFER_SIZE: usize = 64 * 1024;
@ -81,9 +76,6 @@ pub async fn prefetch_task(
// Remove the prefetch flag for prevent the infinite loop.
download.prefetch = false;
// Mark the is_prefetch flag as true to represents it is a prefetch request.
download.is_prefetch = true;
// Remove the range header for download full task.
download
.request_header
@ -99,8 +91,9 @@ pub async fn prefetch_task(
let response = dfdaemon_download_client
.download_task(request)
.await
.inspect_err(|err| {
.map_err(|err| {
error!("prefetch task failed: {}", err);
err
})?;
// Collect the prefetch task started metrics.

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
// use crate::dynconfig::Dynconfig;
use crate::dynconfig::Dynconfig;
use dragonfly_api::common::v2::{Peer, PersistentCachePeer, PersistentCacheTask, Task};
use dragonfly_api::manager::v2::Scheduler;
@ -35,13 +36,10 @@ use std::str::FromStr;
use std::sync::Arc;
use tokio::sync::RwLock;
use tokio::task::JoinSet;
use tonic::service::interceptor::InterceptedService;
use tonic::transport::Channel;
use tracing::{error, info, instrument, Instrument};
use url::Url;
use super::interceptor::InjectTracingInterceptor;
/// VNode is the virtual node of the hashring.
#[derive(Debug, Copy, Clone, Hash, PartialEq)]
struct VNode {
@ -79,6 +77,7 @@ pub struct SchedulerClient {
/// SchedulerClient implements the grpc client of the scheduler.
impl SchedulerClient {
/// new creates a new SchedulerClient.
#[instrument(skip_all)]
pub async fn new(config: Arc<Config>, dynconfig: Arc<Dynconfig>) -> Result<Self> {
let client = Self {
config,
@ -186,13 +185,13 @@ impl SchedulerClient {
.timeout(super::REQUEST_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?;
let mut client =
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
let mut client = SchedulerGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX);
client.announce_host(request).await?;
@ -240,13 +239,13 @@ impl SchedulerClient {
.timeout(super::REQUEST_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?;
let mut client =
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
let mut client = SchedulerGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX);
client.announce_host(request).await?;
@ -299,13 +298,13 @@ impl SchedulerClient {
.timeout(super::REQUEST_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?;
let mut client =
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
let mut client = SchedulerGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX);
client.delete_host(request).await?;
@ -459,7 +458,7 @@ impl SchedulerClient {
&self,
task_id: &str,
peer_id: Option<&str>,
) -> Result<SchedulerGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>> {
) -> Result<SchedulerGRPCClient<Channel>> {
// Update scheduler addresses of the client.
self.update_available_scheduler_addrs().await?;
@ -497,8 +496,9 @@ impl SchedulerClient {
.keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?,
None => Channel::from_shared(addr.clone())
@ -511,27 +511,25 @@ impl SchedulerClient {
.keep_alive_timeout(super::HTTP2_KEEP_ALIVE_TIMEOUT)
.connect()
.await
.inspect_err(|err| {
.map_err(|err| {
error!("connect to {} failed: {}", addr.to_string(), err);
err
})
.or_err(ErrorType::ConnectError)?,
};
Ok(
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
Ok(SchedulerGRPCClient::new(channel)
.max_decoding_message_size(usize::MAX)
.max_encoding_message_size(usize::MAX),
)
.max_encoding_message_size(usize::MAX))
}
/// update_available_scheduler_addrs updates the addresses of available schedulers.
#[instrument(skip(self))]
async fn update_available_scheduler_addrs(&self) -> Result<()> {
// Get the endpoints of available schedulers.
let data_available_schedulers_clone = {
let data = self.dynconfig.data.read().await;
data.available_schedulers.clone()
};
let data_available_schedulers_clone = data.available_schedulers.clone();
drop(data);
// Check if the available schedulers is empty.
if data_available_schedulers_clone.is_empty() {
@ -539,10 +537,9 @@ impl SchedulerClient {
}
// Get the available schedulers.
let available_schedulers_clone = {
let available_schedulers = self.available_schedulers.read().await;
available_schedulers.clone()
};
let available_schedulers_clone = available_schedulers.clone();
drop(available_schedulers);
// Check if the available schedulers is not changed.
if data_available_schedulers_clone.len() == available_schedulers_clone.len()
@ -577,11 +574,13 @@ impl SchedulerClient {
new_available_schedulers.push(available_scheduler.clone());
// Add the scheduler address to the addresses of available schedulers.
let socket_addr = SocketAddr::new(ip, available_scheduler.port as u16);
new_available_scheduler_addrs.push(socket_addr);
new_available_scheduler_addrs
.push(SocketAddr::new(ip, available_scheduler.port as u16));
// Add the scheduler to the hashring.
new_hashring.add(VNode { addr: socket_addr });
new_hashring.add(VNode {
addr: SocketAddr::new(ip, available_scheduler.port as u16),
});
}
// Update the available schedulers.
@ -621,6 +620,7 @@ impl SchedulerClient {
}
/// make_request creates a new request with timeout.
#[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request);
request.set_timeout(super::REQUEST_TIMEOUT);

View File

@ -36,6 +36,7 @@ pub struct Health {
/// Health implements the health server.
impl Health {
/// new creates a new Health.
#[instrument(skip_all)]
pub fn new(
addr: SocketAddr,
shutdown: shutdown::Shutdown,
@ -49,6 +50,7 @@ impl Health {
}
/// run starts the health server.
#[instrument(skip_all)]
pub async fn run(&self) {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
@ -69,6 +71,7 @@ impl Health {
_ = shutdown.recv() => {
// Health server shutting down with signals.
info!("health server shutting down");
return
}
}
}

View File

@ -177,27 +177,6 @@ lazy_static! {
&[]
).expect("metric can be created");
/// PROXY_REQUEST_VIA_DFDAEMON_COUNT is used to count the number of proxy requset via dfdaemon.
pub static ref PROXY_REQUEST_VIA_DFDAEMON_COUNT: IntCounterVec =
IntCounterVec::new(
Opts::new("proxy_request_via_dfdaemon_total", "Counter of the number of the proxy request via dfdaemon.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[]
).expect("metric can be created");
/// UPDATE_TASK_COUNT is used to count the number of update tasks.
pub static ref UPDATE_TASK_COUNT: IntCounterVec =
IntCounterVec::new(
Opts::new("update_task_total", "Counter of the number of the update task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"]
).expect("metric can be created");
/// UPDATE_TASK_FAILURE_COUNT is used to count the failed number of update tasks.
pub static ref UPDATE_TASK_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new(
Opts::new("update_task_failure_total", "Counter of the number of failed of the update task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"]
).expect("metric can be created");
/// STAT_TASK_COUNT is used to count the number of stat tasks.
pub static ref STAT_TASK_COUNT: IntCounterVec =
IntCounterVec::new(
@ -212,20 +191,6 @@ lazy_static! {
&["type"]
).expect("metric can be created");
/// LIST_TASK_ENTRIES_COUNT is used to count the number of list task entries.
pub static ref LIST_TASK_ENTRIES_COUNT: IntCounterVec =
IntCounterVec::new(
Opts::new("list_task_entries_total", "Counter of the number of the list task entries.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"]
).expect("metric can be created");
/// LIST_TASK_ENTRIES_FAILURE_COUNT is used to count the failed number of list task entries.
pub static ref LIST_TASK_ENTRIES_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new(
Opts::new("list_task_entries_failure_total", "Counter of the number of failed of the list task entries.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"]
).expect("metric can be created");
/// DELETE_TASK_COUNT is used to count the number of delete tasks.
pub static ref DELETE_TASK_COUNT: IntCounterVec =
IntCounterVec::new(
@ -269,153 +234,6 @@ lazy_static! {
).expect("metric can be created");
}
/// register_custom_metrics registers all custom metrics.
fn register_custom_metrics() {
REGISTRY
.register(Box::new(VERSION_GAUGE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PREFETCH_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PREFETCH_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(CONCURRENT_DOWNLOAD_TASK_GAUGE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(CONCURRENT_UPLOAD_PIECE_GAUGE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TRAFFIC.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(UPLOAD_TRAFFIC.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TASK_DURATION.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(BACKEND_REQUEST_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(BACKEND_REQUEST_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(BACKEND_REQUEST_DURATION.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PROXY_REQUEST_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PROXY_REQUEST_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PROXY_REQUEST_VIA_DFDAEMON_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(UPDATE_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(UPDATE_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(STAT_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(STAT_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(LIST_TASK_ENTRIES_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(LIST_TASK_ENTRIES_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_HOST_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_HOST_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DISK_SPACE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DISK_USAGE_SPACE.clone()))
.expect("metric can be registered");
}
/// reset_custom_metrics resets all custom metrics.
fn reset_custom_metrics() {
VERSION_GAUGE.reset();
DOWNLOAD_TASK_COUNT.reset();
DOWNLOAD_TASK_FAILURE_COUNT.reset();
PREFETCH_TASK_COUNT.reset();
PREFETCH_TASK_FAILURE_COUNT.reset();
CONCURRENT_DOWNLOAD_TASK_GAUGE.reset();
CONCURRENT_UPLOAD_PIECE_GAUGE.reset();
DOWNLOAD_TRAFFIC.reset();
UPLOAD_TRAFFIC.reset();
DOWNLOAD_TASK_DURATION.reset();
BACKEND_REQUEST_COUNT.reset();
BACKEND_REQUEST_FAILURE_COUNT.reset();
BACKEND_REQUEST_DURATION.reset();
PROXY_REQUEST_COUNT.reset();
PROXY_REQUEST_FAILURE_COUNT.reset();
PROXY_REQUEST_VIA_DFDAEMON_COUNT.reset();
UPDATE_TASK_COUNT.reset();
UPDATE_TASK_FAILURE_COUNT.reset();
STAT_TASK_COUNT.reset();
STAT_TASK_FAILURE_COUNT.reset();
LIST_TASK_ENTRIES_COUNT.reset();
LIST_TASK_ENTRIES_FAILURE_COUNT.reset();
DELETE_TASK_COUNT.reset();
DELETE_TASK_FAILURE_COUNT.reset();
DELETE_HOST_COUNT.reset();
DELETE_HOST_FAILURE_COUNT.reset();
DISK_SPACE.reset();
DISK_USAGE_SPACE.reset();
}
/// TaskSize represents the size of the task.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskSize {
@ -545,12 +363,12 @@ impl TaskSize {
/// collect_upload_task_started_metrics collects the upload task started metrics.
pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) {
let typ = typ.to_string();
UPLOAD_TASK_COUNT.with_label_values(&[&typ, tag, app]).inc();
UPLOAD_TASK_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app])
.inc();
CONCURRENT_UPLOAD_TASK_GAUGE
.with_label_values(&[&typ, tag, app])
.with_label_values(&[typ.to_string().as_str(), tag, app])
.inc();
}
@ -573,41 +391,34 @@ pub fn collect_upload_task_finished_metrics(
);
}
let typ = typ.to_string();
let task_size = task_size.to_string();
UPLOAD_TASK_DURATION
.with_label_values(&[&typ, &task_size])
.with_label_values(&[typ.to_string().as_str(), task_size.to_string().as_str()])
.observe(cost.as_millis() as f64);
CONCURRENT_UPLOAD_TASK_GAUGE
.with_label_values(&[&typ, tag, app])
.with_label_values(&[typ.to_string().as_str(), tag, app])
.dec();
}
/// collect_upload_task_failure_metrics collects the upload task failure metrics.
pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) {
let typ = typ.to_string();
UPLOAD_TASK_FAILURE_COUNT
.with_label_values(&[&typ, tag, app])
.with_label_values(&[typ.to_string().as_str(), tag, app])
.inc();
CONCURRENT_UPLOAD_TASK_GAUGE
.with_label_values(&[&typ, tag, app])
.with_label_values(&[typ.to_string().as_str(), tag, app])
.dec();
}
/// collect_download_task_started_metrics collects the download task started metrics.
pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
let typ = typ.to_string();
DOWNLOAD_TASK_COUNT
.with_label_values(&[&typ, tag, app, priority])
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.inc();
CONCURRENT_DOWNLOAD_TASK_GAUGE
.with_label_values(&[&typ, tag, app, priority])
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.inc();
}
@ -638,28 +449,23 @@ pub fn collect_download_task_finished_metrics(
);
}
let typ = typ.to_string();
let task_size = task_size.to_string();
DOWNLOAD_TASK_DURATION
.with_label_values(&[&typ, &task_size])
.with_label_values(&[typ.to_string().as_str(), task_size.to_string().as_str()])
.observe(cost.as_millis() as f64);
CONCURRENT_DOWNLOAD_TASK_GAUGE
.with_label_values(&[&typ, tag, app, priority])
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.dec();
}
/// collect_download_task_failure_metrics collects the download task failure metrics.
pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
let typ = typ.to_string();
DOWNLOAD_TASK_FAILURE_COUNT
.with_label_values(&[&typ, tag, app, priority])
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.inc();
CONCURRENT_DOWNLOAD_TASK_GAUGE
.with_label_values(&[&typ, tag, app, priority])
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.dec();
}
@ -737,27 +543,6 @@ pub fn collect_proxy_request_failure_metrics() {
PROXY_REQUEST_FAILURE_COUNT.with_label_values(&[]).inc();
}
/// collect_proxy_request_via_dfdaemon_metrics collects the proxy request via dfdaemon metrics.
pub fn collect_proxy_request_via_dfdaemon_metrics() {
PROXY_REQUEST_VIA_DFDAEMON_COUNT
.with_label_values(&[])
.inc();
}
/// collect_update_task_started_metrics collects the update task started metrics.
pub fn collect_update_task_started_metrics(typ: i32) {
UPDATE_TASK_COUNT
.with_label_values(&[typ.to_string().as_str()])
.inc();
}
/// collect_update_task_failure_metrics collects the update task failure metrics.
pub fn collect_update_task_failure_metrics(typ: i32) {
UPDATE_TASK_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str()])
.inc();
}
/// collect_stat_task_started_metrics collects the stat task started metrics.
pub fn collect_stat_task_started_metrics(typ: i32) {
STAT_TASK_COUNT
@ -772,20 +557,6 @@ pub fn collect_stat_task_failure_metrics(typ: i32) {
.inc();
}
/// collect_list_task_entries_started_metrics collects the list task entries started metrics.
pub fn collect_list_task_entries_started_metrics(typ: i32) {
LIST_TASK_ENTRIES_COUNT
.with_label_values(&[typ.to_string().as_str()])
.inc();
}
/// collect_list_task_entries_failure_metrics collects the list task entries failure metrics.
pub fn collect_list_task_entries_failure_metrics(typ: i32) {
LIST_TASK_ENTRIES_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str()])
.inc();
}
/// collect_delete_task_started_metrics collects the delete task started metrics.
pub fn collect_delete_task_started_metrics(typ: i32) {
DELETE_TASK_COUNT
@ -810,9 +581,8 @@ pub fn collect_delete_host_failure_metrics() {
DELETE_HOST_FAILURE_COUNT.with_label_values(&[]).inc();
}
/// collect_disk_metrics collects the disk metrics.
pub fn collect_disk_metrics(path: &Path) {
// Collect disk space metrics.
/// collect_disk_space_metrics collects the disk space metrics.
pub fn collect_disk_space_metrics(path: &Path) {
let stats = match fs2::statvfs(path) {
Ok(stats) => stats,
Err(err) => {
@ -846,6 +616,7 @@ pub struct Metrics {
/// Metrics implements the metrics server.
impl Metrics {
/// new creates a new Metrics.
#[instrument(skip_all)]
pub fn new(
config: Arc<Config>,
shutdown: shutdown::Shutdown,
@ -859,12 +630,13 @@ impl Metrics {
}
/// run starts the metrics server.
#[instrument(skip_all)]
pub async fn run(&self) {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
// Register custom metrics.
register_custom_metrics();
self.register_custom_metrics();
// VERSION_GAUGE sets the version info of the service.
VERSION_GAUGE
@ -886,38 +658,128 @@ impl Metrics {
self.config.metrics.server.port,
);
// Get the metrics route.
let get_metrics_route = warp::path!("metrics")
// Create the metrics route.
let metrics_route = warp::path!("metrics")
.and(warp::get())
.and(warp::path::end())
.and_then(move || Self::get_metrics_handler(config.clone()));
// Delete the metrics route.
let delete_metrics_route = warp::path!("metrics")
.and(warp::delete())
.and(warp::path::end())
.and_then(Self::delete_metrics_handler);
let metrics_routes = get_metrics_route.or(delete_metrics_route);
.and_then(move || Self::metrics_handler(config.clone()));
// Start the metrics server and wait for it to finish.
info!("metrics server listening on {}", addr);
tokio::select! {
_ = warp::serve(metrics_routes).run(addr) => {
_ = warp::serve(metrics_route).run(addr) => {
// Metrics server ended.
info!("metrics server ended");
}
_ = shutdown.recv() => {
// Metrics server shutting down with signals.
info!("metrics server shutting down");
return
}
}
}
/// get_metrics_handler handles the metrics request of getting.
/// register_custom_metrics registers all custom metrics.
#[instrument(skip_all)]
async fn get_metrics_handler(config: Arc<Config>) -> Result<impl Reply, Rejection> {
fn register_custom_metrics(&self) {
REGISTRY
.register(Box::new(VERSION_GAUGE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PREFETCH_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PREFETCH_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(CONCURRENT_DOWNLOAD_TASK_GAUGE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(CONCURRENT_UPLOAD_PIECE_GAUGE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TRAFFIC.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(UPLOAD_TRAFFIC.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DOWNLOAD_TASK_DURATION.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(BACKEND_REQUEST_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(BACKEND_REQUEST_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(BACKEND_REQUEST_DURATION.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PROXY_REQUEST_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(PROXY_REQUEST_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(STAT_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(STAT_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_TASK_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_TASK_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_HOST_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DELETE_HOST_FAILURE_COUNT.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DISK_SPACE.clone()))
.expect("metric can be registered");
REGISTRY
.register(Box::new(DISK_USAGE_SPACE.clone()))
.expect("metric can be registered");
}
/// metrics_handler handles the metrics request.
#[instrument(skip_all)]
async fn metrics_handler(config: Arc<Config>) -> Result<impl Reply, Rejection> {
// Collect the disk space metrics.
collect_disk_metrics(config.storage.dir.as_path());
collect_disk_space_metrics(config.storage.dir.as_path());
// Encode custom metrics.
let encoder = TextEncoder::new();
@ -953,11 +815,4 @@ impl Metrics {
res.push_str(&res_custom);
Ok(res)
}
/// delete_metrics_handler handles the metrics request of deleting.
#[instrument(skip_all)]
async fn delete_metrics_handler() -> Result<impl Reply, Rejection> {
reset_custom_metrics();
Ok(Vec::new())
}
}

View File

@ -14,10 +14,9 @@
* limitations under the License.
*/
use bytesize::ByteSize;
use dragonfly_api::common::v2::Priority;
use reqwest::header::HeaderMap;
use tracing::error;
use tracing::{error, instrument};
/// DRAGONFLY_TAG_HEADER is the header key of tag in http request.
pub const DRAGONFLY_TAG_HEADER: &str = "X-Dragonfly-Tag";
@ -52,59 +51,38 @@ pub const DRAGONFLY_USE_P2P_HEADER: &str = "X-Dragonfly-Use-P2P";
/// If the value is "false", the range request will fetch the range content.
pub const DRAGONFLY_PREFETCH_HEADER: &str = "X-Dragonfly-Prefetch";
/// DRAGONFLY_OUTPUT_PATH_HEADER is the header key of absolute output path in http request.
///
/// If `X-Dragonfly-Output-Path` is set, the downloaded file will be saved to the specified path.
/// Dfdaemon will try to create hard link to the output path before starting the download. If hard link creation fails,
/// it will copy the file to the output path after the download is completed.
/// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
pub const DRAGONFLY_OUTPUT_PATH_HEADER: &str = "X-Dragonfly-Output-Path";
/// DRAGONFLY_FORCE_HARD_LINK_HEADER is the header key of force hard link in http request.
///
/// `X-Dragonfly-Force-Hard-Link` is the flag to indicate whether the download file must be hard linked to the output path.
/// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
pub const DRAGONFLY_FORCE_HARD_LINK_HEADER: &str = "X-Dragonfly-Force-Hard-Link";
/// DRAGONFLY_PIECE_LENGTH_HEADER is the header key of piece length in http request.
/// If the value is set, the piece length will be used to download the file.
/// Different piece length will generate different task id. The value needs to
/// be set with human readable format and needs to be greater than or equal
/// to 4mib, for example: 4mib, 1gib
pub const DRAGONFLY_PIECE_LENGTH_HEADER: &str = "X-Dragonfly-Piece-Length";
/// DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER is the header key of content for calculating task id.
/// If DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER is set, use its value to calculate the task ID.
/// Otherwise, calculate the task ID based on `url`, `piece_length`, `tag`, `application`, and `filtered_query_params`.
pub const DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER: &str =
"X-Dragonfly-Content-For-Calculating-Task-ID";
/// DRAGONFLY_TASK_DOWNLOAD_FINISHED_HEADER is the response header key to indicate whether the task download finished.
/// When the task download is finished, the response will include this header with the value `"true"`,
/// indicating that the download hit the local cache.
pub const DRAGONFLY_TASK_DOWNLOAD_FINISHED_HEADER: &str = "X-Dragonfly-Task-Download-Finished";
/// DRAGONFLY_TASK_ID_HEADER is the response header key of task id. Client will calculate the task ID
/// based on `url`, `piece_length`, `tag`, `application`, and `filtered_query_params`.
pub const DRAGONFLY_TASK_ID_HEADER: &str = "X-Dragonfly-Task-ID";
/// get_tag gets the tag from http header.
#[instrument(skip_all)]
pub fn get_tag(header: &HeaderMap) -> Option<String> {
header
.get(DRAGONFLY_TAG_HEADER)
.and_then(|tag| tag.to_str().ok())
.map(|tag| tag.to_string())
match header.get(DRAGONFLY_TAG_HEADER) {
Some(tag) => match tag.to_str() {
Ok(tag) => Some(tag.to_string()),
Err(err) => {
error!("get tag from header failed: {}", err);
None
}
},
None => None,
}
}
/// get_application gets the application from http header.
#[instrument(skip_all)]
pub fn get_application(header: &HeaderMap) -> Option<String> {
header
.get(DRAGONFLY_APPLICATION_HEADER)
.and_then(|application| application.to_str().ok())
.map(|application| application.to_string())
match header.get(DRAGONFLY_APPLICATION_HEADER) {
Some(application) => match application.to_str() {
Ok(application) => Some(application.to_string()),
Err(err) => {
error!("get application from header failed: {}", err);
None
}
},
None => None,
}
}
/// get_priority gets the priority from http header.
#[instrument(skip_all)]
pub fn get_priority(header: &HeaderMap) -> i32 {
let default_priority = Priority::Level6 as i32;
match header.get(DRAGONFLY_PRIORITY_HEADER) {
@ -126,21 +104,29 @@ pub fn get_priority(header: &HeaderMap) -> i32 {
}
/// get_registry gets the custom address of container registry from http header.
#[instrument(skip_all)]
pub fn get_registry(header: &HeaderMap) -> Option<String> {
header
.get(DRAGONFLY_REGISTRY_HEADER)
.and_then(|registry| registry.to_str().ok())
.map(|registry| registry.to_string())
match header.get(DRAGONFLY_REGISTRY_HEADER) {
Some(registry) => match registry.to_str() {
Ok(registry) => Some(registry.to_string()),
Err(err) => {
error!("get registry from header failed: {}", err);
None
}
},
None => None,
}
}
/// get_filters gets the filters from http header.
#[instrument(skip_all)]
pub fn get_filtered_query_params(
header: &HeaderMap,
default_filtered_query_params: Vec<String>,
) -> Vec<String> {
match header.get(DRAGONFLY_FILTERED_QUERY_PARAMS_HEADER) {
Some(filters) => match filters.to_str() {
Ok(filters) => filters.split(',').map(|s| s.trim().to_string()).collect(),
Ok(filters) => filters.split(',').map(|s| s.to_string()).collect(),
Err(err) => {
error!("get filters from header failed: {}", err);
default_filtered_query_params
@ -151,6 +137,7 @@ pub fn get_filtered_query_params(
}
/// get_use_p2p gets the use p2p from http header.
#[instrument(skip_all)]
pub fn get_use_p2p(header: &HeaderMap) -> bool {
match header.get(DRAGONFLY_USE_P2P_HEADER) {
Some(value) => match value.to_str() {
@ -165,6 +152,7 @@ pub fn get_use_p2p(header: &HeaderMap) -> bool {
}
/// get_prefetch gets the prefetch from http header.
#[instrument(skip_all)]
pub fn get_prefetch(header: &HeaderMap) -> Option<bool> {
match header.get(DRAGONFLY_PREFETCH_HEADER) {
Some(value) => match value.to_str() {
@ -177,229 +165,3 @@ pub fn get_prefetch(header: &HeaderMap) -> Option<bool> {
None => None,
}
}
/// get_output_path gets the output path from http header.
pub fn get_output_path(header: &HeaderMap) -> Option<String> {
header
.get(DRAGONFLY_OUTPUT_PATH_HEADER)
.and_then(|output_path| output_path.to_str().ok())
.map(|output_path| output_path.to_string())
}
/// get_force_hard_link gets the force hard link from http header.
pub fn get_force_hard_link(header: &HeaderMap) -> bool {
match header.get(DRAGONFLY_FORCE_HARD_LINK_HEADER) {
Some(value) => match value.to_str() {
Ok(value) => value.eq_ignore_ascii_case("true"),
Err(err) => {
error!("get force hard link from header failed: {}", err);
false
}
},
None => false,
}
}
/// get_piece_length gets the piece length from http header.
pub fn get_piece_length(header: &HeaderMap) -> Option<ByteSize> {
match header.get(DRAGONFLY_PIECE_LENGTH_HEADER) {
Some(piece_length) => match piece_length.to_str() {
Ok(piece_length) => match piece_length.parse::<ByteSize>() {
Ok(piece_length) => Some(piece_length),
Err(err) => {
error!("parse piece length from header failed: {}", err);
None
}
},
Err(err) => {
error!("get piece length from header failed: {}", err);
None
}
},
None => None,
}
}
/// get_content_for_calculating_task_id gets the content for calculating task id from http header.
pub fn get_content_for_calculating_task_id(header: &HeaderMap) -> Option<String> {
header
.get(DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER)
.and_then(|content| content.to_str().ok())
.map(|content| content.to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use reqwest::header::{HeaderMap, HeaderValue};
#[test]
fn test_get_tag() {
let mut headers = HeaderMap::new();
headers.insert(DRAGONFLY_TAG_HEADER, HeaderValue::from_static("test-tag"));
assert_eq!(get_tag(&headers), Some("test-tag".to_string()));
let empty_headers = HeaderMap::new();
assert_eq!(get_tag(&empty_headers), None);
}
#[test]
fn test_get_application() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_APPLICATION_HEADER,
HeaderValue::from_static("test-app"),
);
assert_eq!(get_application(&headers), Some("test-app".to_string()));
let empty_headers = HeaderMap::new();
assert_eq!(get_application(&empty_headers), None);
}
#[test]
fn test_get_priority() {
let mut headers = HeaderMap::new();
headers.insert(DRAGONFLY_PRIORITY_HEADER, HeaderValue::from_static("5"));
assert_eq!(get_priority(&headers), 5);
let empty_headers = HeaderMap::new();
assert_eq!(get_priority(&empty_headers), Priority::Level6 as i32);
headers.insert(
DRAGONFLY_PRIORITY_HEADER,
HeaderValue::from_static("invalid"),
);
assert_eq!(get_priority(&headers), Priority::Level6 as i32);
}
#[test]
fn test_get_registry() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_REGISTRY_HEADER,
HeaderValue::from_static("test-registry"),
);
assert_eq!(get_registry(&headers), Some("test-registry".to_string()));
let empty_headers = HeaderMap::new();
assert_eq!(get_registry(&empty_headers), None);
}
#[test]
fn test_get_filtered_query_params() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_FILTERED_QUERY_PARAMS_HEADER,
HeaderValue::from_static("param1,param2"),
);
assert_eq!(
get_filtered_query_params(&headers, vec!["default".to_string()]),
vec!["param1".to_string(), "param2".to_string()]
);
let empty_headers = HeaderMap::new();
assert_eq!(
get_filtered_query_params(&empty_headers, vec!["default".to_string()]),
vec!["default".to_string()]
);
}
#[test]
fn test_get_use_p2p() {
let mut headers = HeaderMap::new();
headers.insert(DRAGONFLY_USE_P2P_HEADER, HeaderValue::from_static("true"));
assert!(get_use_p2p(&headers));
headers.insert(DRAGONFLY_USE_P2P_HEADER, HeaderValue::from_static("false"));
assert!(!get_use_p2p(&headers));
let empty_headers = HeaderMap::new();
assert!(!get_use_p2p(&empty_headers));
}
#[test]
fn test_get_prefetch() {
let mut headers = HeaderMap::new();
headers.insert(DRAGONFLY_PREFETCH_HEADER, HeaderValue::from_static("true"));
assert_eq!(get_prefetch(&headers), Some(true));
headers.insert(DRAGONFLY_PREFETCH_HEADER, HeaderValue::from_static("false"));
assert_eq!(get_prefetch(&headers), Some(false));
let empty_headers = HeaderMap::new();
assert_eq!(get_prefetch(&empty_headers), None);
}
#[test]
fn test_get_output_path() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_OUTPUT_PATH_HEADER,
HeaderValue::from_static("/path/to/output"),
);
assert_eq!(
get_output_path(&headers),
Some("/path/to/output".to_string())
);
let empty_headers = HeaderMap::new();
assert_eq!(get_output_path(&empty_headers), None);
}
#[test]
fn test_get_force_hard_link() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_FORCE_HARD_LINK_HEADER,
HeaderValue::from_static("true"),
);
assert!(get_force_hard_link(&headers));
headers.insert(
DRAGONFLY_FORCE_HARD_LINK_HEADER,
HeaderValue::from_static("false"),
);
assert!(!get_force_hard_link(&headers));
let empty_headers = HeaderMap::new();
assert!(!get_force_hard_link(&empty_headers));
}
#[test]
fn test_get_piece_length() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_PIECE_LENGTH_HEADER,
HeaderValue::from_static("4mib"),
);
assert_eq!(get_piece_length(&headers), Some(ByteSize::mib(4)));
let empty_headers = HeaderMap::new();
assert_eq!(get_piece_length(&empty_headers), None);
headers.insert(
DRAGONFLY_PIECE_LENGTH_HEADER,
HeaderValue::from_static("invalid"),
);
assert_eq!(get_piece_length(&headers), None);
headers.insert(DRAGONFLY_PIECE_LENGTH_HEADER, HeaderValue::from_static("0"));
assert_eq!(get_piece_length(&headers), Some(ByteSize::b(0)));
}
#[test]
fn test_get_content_for_calculating_task_id() {
let mut headers = HeaderMap::new();
headers.insert(
DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER,
HeaderValue::from_static("test-content"),
);
assert_eq!(
get_content_for_calculating_task_id(&headers),
Some("test-content".to_string())
);
let empty_headers = HeaderMap::new();
assert_eq!(get_registry(&empty_headers), None);
}
}

View File

@ -14,12 +14,11 @@
* limitations under the License.
*/
use crate::grpc::{dfdaemon_download::DfdaemonDownloadClient, REQUEST_TIMEOUT};
use crate::grpc::dfdaemon_download::DfdaemonDownloadClient;
use crate::metrics::{
collect_proxy_request_failure_metrics, collect_proxy_request_started_metrics,
collect_proxy_request_via_dfdaemon_metrics,
};
use crate::resource::{piece::MIN_PIECE_LENGTH, task::Task};
use crate::resource::task::Task;
use crate::shutdown;
use bytes::Bytes;
use dragonfly_api::common::v2::{Download, TaskType};
@ -31,10 +30,13 @@ use dragonfly_client_config::dfdaemon::{Config, Rule};
use dragonfly_client_core::error::{ErrorType, OrErr};
use dragonfly_client_core::{Error as ClientError, Result as ClientResult};
use dragonfly_client_util::{
http::{hashmap_to_headermap, headermap_to_hashmap},
http::{
hashmap_to_hyper_header_map, hyper_headermap_to_reqwest_headermap,
reqwest_headermap_to_hashmap,
},
tls::{generate_self_signed_certs_by_ca_cert, generate_simple_self_signed_certs, NoVerifier},
};
use futures::TryStreamExt;
use futures_util::TryStreamExt;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, StreamBody};
use hyper::body::Frame;
use hyper::client::conn::http1::Builder as ClientBuilder;
@ -46,28 +48,24 @@ use hyper_util::{
client::legacy::Client,
rt::{tokio::TokioIo, TokioExecutor},
};
use lazy_static::lazy_static;
use rcgen::Certificate;
use rustls::{RootCertStore, ServerConfig};
use rustls_pki_types::CertificateDer;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::{AsyncWriteExt, BufReader, BufWriter};
use std::time::Duration;
use tokio::io::{AsyncWriteExt, BufReader};
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio::sync::{mpsc, Barrier};
use tokio::sync::mpsc;
use tokio::time::sleep;
use tokio_rustls::TlsAcceptor;
use tokio_util::io::ReaderStream;
use tracing::{debug, error, info, instrument, Instrument, Span};
use tracing::{error, info, instrument, Span};
pub mod header;
lazy_static! {
/// SUPPORTED_HTTP_PROTOCOLS is the supported HTTP protocols, including http/1.1 and http/1.0.
static ref SUPPORTED_HTTP_PROTOCOLS: Vec<Vec<u8>> = vec![b"http/1.1".to_vec(), b"http/1.0".to_vec()];
}
/// Response is the response of the proxy server.
pub type Response = hyper::Response<BoxBody<Bytes, ClientError>>;
@ -99,6 +97,7 @@ pub struct Proxy {
/// Proxy implements the proxy server.
impl Proxy {
/// new creates a new Proxy.
#[instrument(skip_all)]
pub fn new(
config: Arc<Config>,
task: Arc<Task>,
@ -143,49 +142,15 @@ impl Proxy {
}
/// run starts the proxy server.
pub async fn run(&self, grpc_server_started_barrier: Arc<Barrier>) -> ClientResult<()> {
let mut shutdown = self.shutdown.clone();
let read_buffer_size = self.config.proxy.read_buffer_size;
// When the grpc server is started, notify the barrier. If the shutdown signal is received
// before barrier is waited successfully, the server will shutdown immediately.
tokio::select! {
// Wait for starting the proxy server
_ = grpc_server_started_barrier.wait() => {
info!("proxy server is ready to start");
}
_ = shutdown.recv() => {
// Proxy server shutting down with signals.
info!("proxy server shutting down");
return Ok(());
}
}
let dfdaemon_download_client =
DfdaemonDownloadClient::new_unix(self.config.download.server.socket_path.clone())
.await?;
#[derive(Clone)]
struct Context {
config: Arc<Config>,
task: Arc<Task>,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
server_ca_cert: Arc<Option<Certificate>>,
}
let context = Context {
config: self.config.clone(),
task: self.task.clone(),
dfdaemon_download_client,
registry_cert: self.registry_cert.clone(),
server_ca_cert: self.server_ca_cert.clone(),
};
#[instrument(skip_all)]
pub async fn run(&self) -> ClientResult<()> {
let listener = TcpListener::bind(self.addr).await?;
info!("proxy server listening on {}", self.addr);
loop {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
// Wait for a client connection.
tokio::select! {
tcp_accepted = listener.accept() => {
@ -194,23 +159,24 @@ impl Proxy {
// Spawn a task to handle the connection.
let io = TokioIo::new(tcp);
debug!("accepted connection from {}", remote_address);
info!("accepted connection from {}", remote_address);
let context = context.clone();
let config = self.config.clone();
let task = self.task.clone();
let dfdaemon_download_client = DfdaemonDownloadClient::new_unix(
config.download.server.socket_path.clone(),
).await?;
let registry_cert = self.registry_cert.clone();
let server_ca_cert = self.server_ca_cert.clone();
tokio::task::spawn(async move {
if let Err(err) = ServerBuilder::new()
.keep_alive(true)
.max_buf_size(read_buffer_size)
.preserve_header_case(true)
.title_case_headers(true)
.serve_connection(
io,
service_fn(move |request|{
let context = context.clone();
async move {
handler(context.config, context.task, request, context.dfdaemon_download_client, context.registry_cert, context.server_ca_cert, remote_address.ip()).await
}
} ),
service_fn(move |request| handler(config.clone(), task.clone(), request, dfdaemon_download_client.clone(), registry_cert.clone(), server_ca_cert.clone())),
)
.with_upgrades()
.await
@ -231,7 +197,7 @@ impl Proxy {
}
/// handler handles the request from the client.
#[instrument(skip_all, fields(url, method, remote_ip))]
#[instrument(skip_all, fields(uri, method))]
pub async fn handler(
config: Arc<Config>,
task: Arc<Task>,
@ -239,13 +205,7 @@ pub async fn handler(
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
server_ca_cert: Arc<Option<Certificate>>,
remote_ip: std::net::IpAddr,
) -> ClientResult<Response> {
// Span record the url and method.
Span::current().record("url", request.uri().to_string().as_str());
Span::current().record("method", request.method().as_str());
Span::current().record("remote_ip", remote_ip.to_string().as_str());
// Record the proxy request started metrics. The metrics will be recorded
// when the request is kept alive.
collect_proxy_request_started_metrics();
@ -258,7 +218,6 @@ pub async fn handler(
config,
task,
request,
remote_ip,
dfdaemon_download_client,
registry_cert,
server_ca_cert,
@ -270,20 +229,22 @@ pub async fn handler(
config,
task,
request,
remote_ip,
dfdaemon_download_client,
registry_cert,
)
.await;
}
// Span record the uri and method.
Span::current().record("uri", request.uri().to_string().as_str());
Span::current().record("method", request.method().as_str());
// Handle CONNECT request.
if Method::CONNECT == request.method() {
return https_handler(
config,
task,
request,
remote_ip,
dfdaemon_download_client,
registry_cert,
server_ca_cert,
@ -295,7 +256,6 @@ pub async fn handler(
config,
task,
request,
remote_ip,
dfdaemon_download_client,
registry_cert,
)
@ -308,7 +268,6 @@ pub async fn registry_mirror_http_handler(
config: Arc<Config>,
task: Arc<Task>,
request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
) -> ClientResult<Response> {
@ -317,7 +276,6 @@ pub async fn registry_mirror_http_handler(
config,
task,
request,
remote_ip,
dfdaemon_download_client,
registry_cert,
)
@ -330,7 +288,6 @@ pub async fn registry_mirror_https_handler(
config: Arc<Config>,
task: Arc<Task>,
request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
server_ca_cert: Arc<Option<Certificate>>,
@ -340,7 +297,6 @@ pub async fn registry_mirror_https_handler(
config,
task,
request,
remote_ip,
dfdaemon_download_client,
registry_cert,
server_ca_cert,
@ -354,7 +310,6 @@ pub async fn http_handler(
config: Arc<Config>,
task: Arc<Task>,
request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
) -> ClientResult<Response> {
@ -377,21 +332,19 @@ pub async fn http_handler(
// If find the matching rule, proxy the request via the dfdaemon.
let request_uri = request.uri();
if let Some(rule) = find_matching_rule(
config.proxy.rules.as_deref(),
request_uri.to_string().as_str(),
) {
if let Some(rule) =
find_matching_rule(config.proxy.rules.clone(), request_uri.to_string().as_str())
{
info!(
"proxy HTTP request via dfdaemon by rule config for method: {}, uri: {}",
request.method(),
request_uri
);
return proxy_via_dfdaemon(
return proxy_by_dfdaemon(
config,
task,
&rule,
rule.clone(),
request,
remote_ip,
dfdaemon_download_client,
)
.await;
@ -405,12 +358,11 @@ pub async fn http_handler(
request.method(),
request_uri
);
return proxy_via_dfdaemon(
return proxy_by_dfdaemon(
config,
task,
&Rule::default(),
Rule::default(),
request,
remote_ip,
dfdaemon_download_client,
)
.await;
@ -422,7 +374,7 @@ pub async fn http_handler(
request.method(),
request.uri()
);
return proxy_via_https(request, registry_cert).await;
return proxy_https(request, registry_cert).await;
}
info!(
@ -430,7 +382,7 @@ pub async fn http_handler(
request.method(),
request.uri()
);
return proxy_via_http(request).await;
return proxy_http(request).await;
}
/// https_handler handles the https request by client.
@ -439,7 +391,6 @@ pub async fn https_handler(
config: Arc<Config>,
task: Arc<Task>,
request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
server_ca_cert: Arc<Option<Certificate>>,
@ -449,7 +400,6 @@ pub async fn https_handler(
// Proxy the request directly to the remote server.
if let Some(host) = request.uri().host() {
let host = host.to_string();
let port = request.uri().port_u16().unwrap_or(443);
tokio::task::spawn(async move {
match hyper::upgrade::on(request).await {
Ok(upgraded) => {
@ -458,8 +408,6 @@ pub async fn https_handler(
task,
upgraded,
host,
port,
remote_ip,
dfdaemon_download_client,
registry_cert,
server_ca_cert,
@ -482,15 +430,12 @@ pub async fn https_handler(
/// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the
/// self-signed certificate. Otherwise, use the CA certificate to sign the
/// self-signed certificate.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all)]
async fn upgraded_tunnel(
config: Arc<Config>,
task: Arc<Task>,
upgraded: Upgraded,
host: String,
port: u16,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
server_ca_cert: Arc<Option<Certificate>>,
@ -502,11 +447,11 @@ async fn upgraded_tunnel(
let (server_certs, server_key) = match server_ca_cert.as_ref() {
Some(server_ca_cert) => {
info!("generate self-signed certificate by CA certificate");
generate_self_signed_certs_by_ca_cert(server_ca_cert, host.as_ref(), subject_alt_names)?
generate_self_signed_certs_by_ca_cert(server_ca_cert, subject_alt_names)?
}
None => {
info!("generate simple self-signed certificate");
generate_simple_self_signed_certs(host.as_ref(), subject_alt_names)?
generate_simple_self_signed_certs(subject_alt_names)?
}
};
@ -515,7 +460,7 @@ async fn upgraded_tunnel(
.with_no_client_auth()
.with_single_cert(server_certs, server_key)
.or_err(ErrorType::TLSConfigError)?;
server_config.alpn_protocols = SUPPORTED_HTTP_PROTOCOLS.clone();
server_config.alpn_protocols = vec![b"http/1.1".to_vec(), b"http/1.0".to_vec()];
let tls_acceptor = TlsAcceptor::from(Arc::new(server_config));
let tls_stream = tls_acceptor.accept(TokioIo::new(upgraded)).await?;
@ -537,9 +482,7 @@ async fn upgraded_tunnel(
config.clone(),
task.clone(),
host.clone(),
port,
request,
remote_ip,
dfdaemon_download_client.clone(),
registry_cert.clone(),
)
@ -555,20 +498,17 @@ async fn upgraded_tunnel(
}
/// upgraded_handler handles the upgraded https request from the client.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all, fields(url, method))]
#[instrument(skip_all, fields(uri, method))]
pub async fn upgraded_handler(
config: Arc<Config>,
task: Arc<Task>,
host: String,
port: u16,
mut request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
) -> ClientResult<Response> {
// Span record the url and method.
Span::current().record("url", request.uri().to_string().as_str());
// Span record the uri and method.
Span::current().record("uri", request.uri().to_string().as_str());
Span::current().record("method", request.method().as_str());
// Authenticate the request with the basic auth.
@ -587,38 +527,26 @@ pub async fn upgraded_handler(
// If the scheme is not set, set the scheme to https.
if request.uri().scheme().is_none() {
let builder = http::uri::Builder::new();
*request.uri_mut() = builder
.scheme("https")
.authority(format!("{}:{}", host, port))
.path_and_query(
request
.uri()
.path_and_query()
.map(|v| v.as_str())
.unwrap_or("/"),
)
.build()
*request.uri_mut() = format!("https://{}{}", host, request.uri())
.parse()
.or_err(ErrorType::ParseError)?;
}
// If find the matching rule, proxy the request via the dfdaemon.
let request_uri = request.uri();
if let Some(rule) = find_matching_rule(
config.proxy.rules.as_deref(),
request_uri.to_string().as_str(),
) {
if let Some(rule) =
find_matching_rule(config.proxy.rules.clone(), request_uri.to_string().as_str())
{
info!(
"proxy HTTPS request via dfdaemon by rule config for method: {}, uri: {}",
request.method(),
request_uri
);
return proxy_via_dfdaemon(
return proxy_by_dfdaemon(
config,
task,
&rule,
rule.clone(),
request,
remote_ip,
dfdaemon_download_client,
)
.await;
@ -632,12 +560,11 @@ pub async fn upgraded_handler(
request.method(),
request_uri
);
return proxy_via_dfdaemon(
return proxy_by_dfdaemon(
config,
task,
&Rule::default(),
Rule::default(),
request,
remote_ip,
dfdaemon_download_client,
)
.await;
@ -649,7 +576,7 @@ pub async fn upgraded_handler(
request.method(),
request.uri()
);
return proxy_via_https(request, registry_cert).await;
return proxy_https(request, registry_cert).await;
}
info!(
@ -657,25 +584,20 @@ pub async fn upgraded_handler(
request.method(),
request.uri()
);
return proxy_via_http(request).await;
return proxy_http(request).await;
}
/// proxy_via_dfdaemon proxies the request via the dfdaemon.
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
async fn proxy_via_dfdaemon(
/// proxy_by_dfdaemon proxies the request via the dfdaemon.
#[instrument(skip_all)]
async fn proxy_by_dfdaemon(
config: Arc<Config>,
task: Arc<Task>,
rule: &Rule,
rule: Rule,
request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
dfdaemon_download_client: DfdaemonDownloadClient,
) -> ClientResult<Response> {
// Collect the metrics for the proxy request via dfdaemon.
collect_proxy_request_via_dfdaemon_metrics();
// Make the download task request.
let download_task_request =
match make_download_task_request(config.clone(), rule, request, remote_ip) {
let download_task_request = match make_download_task_request(config.clone(), rule, request) {
Ok(download_task_request) => download_task_request,
Err(err) => {
error!("make download task request failed: {}", err);
@ -702,7 +624,7 @@ async fn proxy_via_dfdaemon(
backend.status_code.unwrap_or_default() as u16
)
.unwrap_or(http::StatusCode::INTERNAL_SERVER_ERROR),
Some(hashmap_to_headermap(&backend.header)?),
Some(hashmap_to_hyper_header_map(&backend.header)?),
));
}
Err(_) => {
@ -734,11 +656,6 @@ async fn proxy_via_dfdaemon(
));
};
// Span record the host_id, task_id, and peer_id.
Span::current().record("host_id", message.host_id.as_str());
Span::current().record("task_id", message.task_id.as_str());
Span::current().record("peer_id", message.peer_id.as_str());
// Handle the download task started response.
let Some(download_task_response::Response::DownloadTaskStartedResponse(
download_task_started_response,
@ -751,36 +668,31 @@ async fn proxy_via_dfdaemon(
));
};
// Write the status code to the writer.
let (sender, mut receiver) = mpsc::channel(10 * 1024);
// Get the read buffer size from the config.
let read_buffer_size = config.proxy.read_buffer_size;
// Write the task data to the reader.
let (reader, writer) = tokio::io::duplex(read_buffer_size);
let mut writer = BufWriter::with_capacity(read_buffer_size, writer);
let reader_stream = ReaderStream::with_capacity(reader, read_buffer_size);
let (reader, mut writer) = tokio::io::duplex(4096);
// Write the status code to the writer.
let (sender, mut receiver) = mpsc::channel(1024 * 10);
// Construct the response body.
let reader_stream = ReaderStream::new(reader);
let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data).map_err(ClientError::from));
let boxed_body = stream_body.boxed();
// Construct the response.
let mut response = Response::new(boxed_body);
*response.headers_mut() = make_response_headers(
message.task_id.as_str(),
download_task_started_response.clone(),
)?;
*response.headers_mut() = make_response_headers(download_task_started_response.clone())?;
*response.status_mut() = http::StatusCode::OK;
// Get the read buffer size from the config.
let read_buffer_size = config.proxy.read_buffer_size;
// Return the response if the client return the first piece.
let mut initialized = false;
// Write task data to pipe. If grpc received error message,
// shutdown the writer.
tokio::spawn(
async move {
tokio::spawn(async move {
// Initialize the hashmap of the finished piece readers and pieces.
let mut finished_piece_readers = HashMap::new();
@ -801,19 +713,18 @@ async fn proxy_via_dfdaemon(
loop {
match out_stream.message().await {
Ok(Some(message)) => {
if let Some(
download_task_response::Response::DownloadPieceFinishedResponse(
if let Some(download_task_response::Response::DownloadPieceFinishedResponse(
download_task_response,
),
) = message.response
)) = message.response
{
// Sleep for a while to avoid the out stream is aborted. If the task is small, proxy read the piece
// before the task download is finished. It will cause `user body write aborted` error.
sleep(Duration::from_millis(1)).await;
// Send the none response to the client, if the first piece is received.
if !initialized {
debug!("first piece received, send response");
sender
.send_timeout(None, REQUEST_TIMEOUT)
.await
.unwrap_or_default();
info!("first piece received, send response");
sender.send(None).await.unwrap_or_default();
initialized = true;
}
@ -826,48 +737,42 @@ async fn proxy_via_dfdaemon(
return;
};
let piece_range_reader = match task
let piece_reader = match task
.piece
.download_from_local_into_async_read(
task.piece
.id(message.task_id.as_str(), piece.number)
.as_str(),
.download_from_local_peer_into_async_read(
message.task_id.as_str(),
piece.number,
piece.length,
download_task_started_response.range,
true,
false,
)
.await
{
Ok(piece_range_reader) => piece_range_reader,
Ok(piece_reader) => piece_reader,
Err(err) => {
error!("download piece reader error: {}", err);
if let Err(err) = writer.shutdown().await {
writer.shutdown().await.unwrap_or_else(|err| {
error!("writer shutdown error: {}", err);
}
});
return;
}
};
// Use a buffer to read the piece.
let piece_range_reader =
BufReader::with_capacity(read_buffer_size, piece_range_reader);
let piece_reader = BufReader::with_capacity(read_buffer_size, piece_reader);
// Write the piece data to the pipe in order.
finished_piece_readers.insert(piece.number, piece_range_reader);
while let Some(mut piece_range_reader) =
finished_piece_readers.remove(&need_piece_number)
{
debug!("copy piece {} to stream", need_piece_number);
if let Err(err) =
tokio::io::copy(&mut piece_range_reader, &mut writer).await
finished_piece_readers.insert(piece.number, piece_reader);
while let Some(piece_reader) =
finished_piece_readers.get_mut(&need_piece_number)
{
info!("copy piece {} to stream", need_piece_number);
if let Err(err) = tokio::io::copy(piece_reader, &mut writer).await {
error!("download piece reader error: {}", err);
if let Err(err) = writer.shutdown().await {
writer.shutdown().await.unwrap_or_else(|err| {
error!("writer shutdown error: {}", err);
}
});
return;
}
@ -905,32 +810,26 @@ async fn proxy_via_dfdaemon(
Ok(backend) => {
error!("download task failed: {:?}", backend);
sender
.send_timeout(
Some(make_error_response(
.send(Some(make_error_response(
http::StatusCode::from_u16(
backend.status_code.unwrap_or_default() as u16,
)
.unwrap_or(http::StatusCode::INTERNAL_SERVER_ERROR),
Some(
hashmap_to_headermap(&backend.header)
hashmap_to_hyper_header_map(&backend.header)
.unwrap_or_default(),
),
)),
REQUEST_TIMEOUT,
)
)))
.await
.unwrap_or_default();
}
Err(_) => {
error!("download task failed: {}", err);
sender
.send_timeout(
Some(make_error_response(
.send(Some(make_error_response(
http::StatusCode::INTERNAL_SERVER_ERROR,
None,
)),
REQUEST_TIMEOUT,
)
)))
.await
.unwrap_or_default();
}
@ -940,13 +839,11 @@ async fn proxy_via_dfdaemon(
}
};
}
}
.in_current_span(),
);
});
match receiver.recv().await {
Some(Some(response)) => Ok(response),
Some(None) => return Ok(response),
Some(Some(response)) => return Ok(response),
Some(None) => Ok(response),
None => Ok(make_error_response(
http::StatusCode::INTERNAL_SERVER_ERROR,
None,
@ -954,9 +851,9 @@ async fn proxy_via_dfdaemon(
}
}
/// proxy_via_http proxies the HTTP request directly to the remote server.
/// proxy_http proxies the HTTP request directly to the remote server.
#[instrument(skip_all)]
async fn proxy_via_http(request: Request<hyper::body::Incoming>) -> ClientResult<Response> {
async fn proxy_http(request: Request<hyper::body::Incoming>) -> ClientResult<Response> {
let Some(host) = request.uri().host() else {
error!("CONNECT host is not socket addr: {:?}", request.uri());
return Ok(make_error_response(http::StatusCode::BAD_REQUEST, None));
@ -981,9 +878,9 @@ async fn proxy_via_http(request: Request<hyper::body::Incoming>) -> ClientResult
Ok(response.map(|b| b.map_err(ClientError::from).boxed()))
}
/// proxy_via_https proxies the HTTPS request directly to the remote server.
/// proxy_https proxies the HTTPS request directly to the remote server.
#[instrument(skip_all)]
async fn proxy_via_https(
async fn proxy_https(
request: Request<hyper::body::Incoming>,
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
) -> ClientResult<Response> {
@ -1011,20 +908,23 @@ async fn proxy_via_https(
.build();
let client = Client::builder(TokioExecutor::new()).build(https);
let response = client.request(request).await.inspect_err(|err| {
let response = client.request(request).await.map_err(|err| {
error!("request failed: {:?}", err);
err
})?;
Ok(response.map(|b| b.map_err(ClientError::from).boxed()))
}
/// make_registry_mirror_request makes a registry mirror request by the request.
#[instrument(skip_all)]
fn make_registry_mirror_request(
config: Arc<Config>,
mut request: Request<hyper::body::Incoming>,
) -> ClientResult<Request<hyper::body::Incoming>> {
let header = request.headers().clone();
let registry_mirror_uri = match header::get_registry(&header) {
// Convert the Reqwest header to the Hyper header.
let reqwest_request_header = hyper_headermap_to_reqwest_headermap(request.headers());
let registry_mirror_uri = match header::get_registry(&reqwest_request_header) {
Some(registry) => format!("{}{}", registry, request.uri().path())
.parse::<http::Uri>()
.or_err(ErrorType::ParseError)?,
@ -1036,7 +936,8 @@ fn make_registry_mirror_request(
.parse::<http::Uri>()
.or_err(ErrorType::ParseError)?,
};
header::get_registry(&header);
header::get_registry(&reqwest_request_header);
*request.uri_mut() = registry_mirror_uri.clone();
request.headers_mut().insert(
@ -1052,28 +953,17 @@ fn make_registry_mirror_request(
}
/// make_download_task_request makes a download task request by the request.
#[instrument(skip_all)]
fn make_download_task_request(
config: Arc<Config>,
rule: &Rule,
rule: Rule,
request: Request<hyper::body::Incoming>,
remote_ip: std::net::IpAddr,
) -> ClientResult<DownloadTaskRequest> {
// Convert the Reqwest header to the Hyper header.
let mut header = request.headers().clone();
let mut reqwest_request_header = hyper_headermap_to_reqwest_headermap(request.headers());
// Registry will return the 403 status code if the Host header is set.
header.remove(reqwest::header::HOST);
// Validate the request arguments.
let piece_length = header::get_piece_length(&header).map(|piece_length| piece_length.as_u64());
if let Some(piece_length) = piece_length {
if piece_length < MIN_PIECE_LENGTH {
return Err(ClientError::ValidationError(format!(
"piece length {} is less than the minimum piece length {}",
piece_length, MIN_PIECE_LENGTH
)));
}
}
reqwest_request_header.remove(reqwest::header::HOST);
Ok(DownloadTaskRequest {
download: Some(Download {
@ -1082,35 +972,29 @@ fn make_download_task_request(
// Download range use header range in HTTP protocol.
range: None,
r#type: TaskType::Standard as i32,
tag: header::get_tag(&header),
application: header::get_application(&header),
priority: header::get_priority(&header),
tag: header::get_tag(&reqwest_request_header),
application: header::get_application(&reqwest_request_header),
priority: header::get_priority(&reqwest_request_header),
filtered_query_params: header::get_filtered_query_params(
&header,
&reqwest_request_header,
rule.filtered_query_params.clone(),
),
request_header: headermap_to_hashmap(&header),
piece_length,
// Need the absolute path.
output_path: header::get_output_path(&header),
request_header: reqwest_headermap_to_hashmap(&reqwest_request_header),
piece_length: None,
output_path: None,
timeout: None,
need_back_to_source: false,
disable_back_to_source: config.proxy.disable_back_to_source,
certificate_chain: Vec::new(),
prefetch: need_prefetch(config.clone(), &header),
prefetch: need_prefetch(config.clone(), &reqwest_request_header),
object_storage: None,
hdfs: None,
is_prefetch: false,
need_piece_content: false,
force_hard_link: header::get_force_hard_link(&header),
content_for_calculating_task_id: header::get_content_for_calculating_task_id(&header),
remote_ip: Some(remote_ip.to_string()),
}),
})
}
/// need_prefetch returns whether the prefetch is needed by the configuration and the request
/// header.
#[instrument(skip_all)]
fn need_prefetch(config: Arc<Config>, header: &http::HeaderMap) -> bool {
// If the header not contains the range header, the request does not need prefetch.
if !header.contains_key(reqwest::header::RANGE) {
@ -1124,16 +1008,17 @@ fn need_prefetch(config: Arc<Config>, header: &http::HeaderMap) -> bool {
}
// Return the prefetch value from the configuration.
config.proxy.prefetch
return config.proxy.prefetch;
}
/// make_download_url makes a download url by the given uri.
#[instrument(skip_all)]
fn make_download_url(
uri: &hyper::Uri,
use_tls: bool,
redirect: Option<String>,
) -> ClientResult<String> {
let mut parts = http::uri::Parts::from(uri.clone());
let mut parts = uri.clone().into_parts();
// Set the scheme to https if the rule uses tls.
if use_tls {
@ -1142,8 +1027,9 @@ fn make_download_url(
// Set the authority to the redirect address.
if let Some(redirect) = redirect {
parts.authority =
Some(http::uri::Authority::try_from(redirect).or_err(ErrorType::ParseError)?);
parts.authority = Some(http::uri::Authority::from_static(Box::leak(
redirect.into_boxed_str(),
)));
}
Ok(http::Uri::from_parts(parts)
@ -1152,8 +1038,8 @@ fn make_download_url(
}
/// make_response_headers makes the response headers.
#[instrument(skip_all)]
fn make_response_headers(
task_id: &str,
mut download_task_started_response: DownloadTaskStartedResponse,
) -> ClientResult<hyper::header::HeaderMap> {
// Insert the content range header to the response header.
@ -1174,28 +1060,18 @@ fn make_response_headers(
);
};
if download_task_started_response.is_finished {
download_task_started_response.response_header.insert(
header::DRAGONFLY_TASK_DOWNLOAD_FINISHED_HEADER.to_string(),
"true".to_string(),
);
}
download_task_started_response.response_header.insert(
header::DRAGONFLY_TASK_ID_HEADER.to_string(),
task_id.to_string(),
);
hashmap_to_headermap(&download_task_started_response.response_header)
hashmap_to_hyper_header_map(&download_task_started_response.response_header)
}
/// find_matching_rule returns whether the dfdaemon should be used to download the task.
/// If the dfdaemon should be used, return the matched rule.
fn find_matching_rule(rules: Option<&[Rule]>, url: &str) -> Option<Rule> {
#[instrument(skip_all)]
fn find_matching_rule(rules: Option<Vec<Rule>>, url: &str) -> Option<Rule> {
rules?.iter().find(|rule| rule.regex.is_match(url)).cloned()
}
/// make_error_response makes an error response with the given status and message.
#[instrument(skip_all)]
fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>) -> Response {
let mut response = Response::new(empty());
*response.status_mut() = status;
@ -1209,6 +1085,7 @@ fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>
}
/// empty returns an empty body.
#[instrument(skip_all)]
fn empty() -> BoxBody<Bytes, ClientError> {
Empty::<Bytes>::new()
.map_err(|never| match never {})

View File

@ -17,5 +17,4 @@
pub mod persistent_cache_task;
pub mod piece;
pub mod piece_collector;
pub mod piece_downloader;
pub mod task;

File diff suppressed because it is too large Load Diff

View File

@ -14,14 +14,15 @@
* limitations under the License.
*/
use super::*;
use crate::grpc::dfdaemon_upload::DfdaemonUploadClient;
use crate::metrics::{
collect_backend_request_failure_metrics, collect_backend_request_finished_metrics,
collect_backend_request_started_metrics, collect_download_piece_traffic_metrics,
collect_upload_piece_traffic_metrics,
};
use chrono::Utc;
use dragonfly_api::common::v2::{Hdfs, ObjectStorage, Range, TrafficType};
use dragonfly_api::common::v2::{ObjectStorage, Range, TrafficType};
use dragonfly_api::dfdaemon::v2::DownloadPieceRequest;
use dragonfly_client_backend::{BackendFactory, GetRequest};
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::{error::BackendError, Error, Result};
@ -30,31 +31,29 @@ use dragonfly_client_util::id_generator::IDGenerator;
use leaky_bucket::RateLimiter;
use reqwest::header::{self, HeaderMap};
use std::collections::HashMap;
use std::io::Cursor;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::io::{AsyncRead, AsyncReadExt};
use tracing::{error, info, instrument, Span};
use super::*;
/// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper
/// than MAX_PIECE_COUNT, the piece length will be optimized by the file length.
/// When piece length became the MAX_PIECE_LENGTH, the piece count
/// When piece length becames the MAX_PIECE_LENGTH, the piece piece count
/// probably will be upper than MAX_PIECE_COUNT.
pub const MAX_PIECE_COUNT: u64 = 500;
const MAX_PIECE_COUNT: u64 = 500;
/// MIN_PIECE_LENGTH is the minimum piece length.
pub const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024;
const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024;
/// MAX_PIECE_LENGTH is the maximum piece length.
pub const MAX_PIECE_LENGTH: u64 = 64 * 1024 * 1024;
const MAX_PIECE_LENGTH: u64 = 16 * 1024 * 1024;
/// PieceLengthStrategy sets the optimization strategy of piece length.
pub enum PieceLengthStrategy {
/// OptimizeByFileLength optimizes the piece length by the file length.
OptimizeByFileLength(u64),
/// FixedPieceLength sets the fixed piece length.
FixedPieceLength(u64),
OptimizeByFileLength,
}
/// Piece represents a piece manager.
@ -68,9 +67,6 @@ pub struct Piece {
/// storage is the local storage.
storage: Arc<Storage>,
/// downloader is the piece downloader.
downloader: Arc<dyn piece_downloader::Downloader>,
/// backend_factory is the backend factory.
backend_factory: Arc<BackendFactory>,
@ -79,35 +75,27 @@ pub struct Piece {
/// upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second).
upload_rate_limiter: Arc<RateLimiter>,
/// prefetch_rate_limiter is the rate limiter of the prefetch speed in bps(bytes per second).
prefetch_rate_limiter: Arc<RateLimiter>,
}
/// Piece implements the piece manager.
impl Piece {
/// new returns a new Piece.
#[instrument(skip_all)]
pub fn new(
config: Arc<Config>,
id_generator: Arc<IDGenerator>,
storage: Arc<Storage>,
backend_factory: Arc<BackendFactory>,
) -> Result<Self> {
Ok(Self {
) -> Self {
Self {
config: config.clone(),
id_generator,
storage,
downloader: piece_downloader::DownloaderFactory::new(
config.storage.server.protocol.as_str(),
config.clone(),
)?
.build(),
backend_factory,
download_rate_limiter: Arc::new(
RateLimiter::builder()
.initial(config.download.rate_limit.as_u64() as usize)
.refill(config.download.rate_limit.as_u64() as usize)
.max(config.download.rate_limit.as_u64() as usize)
.interval(Duration::from_secs(1))
.fair(false)
.build(),
@ -116,37 +104,16 @@ impl Piece {
RateLimiter::builder()
.initial(config.upload.rate_limit.as_u64() as usize)
.refill(config.upload.rate_limit.as_u64() as usize)
.max(config.upload.rate_limit.as_u64() as usize)
.interval(Duration::from_secs(1))
.fair(false)
.build(),
),
prefetch_rate_limiter: Arc::new(
RateLimiter::builder()
.initial(config.proxy.prefetch_rate_limit.as_u64() as usize)
.refill(config.proxy.prefetch_rate_limit.as_u64() as usize)
.max(config.proxy.prefetch_rate_limit.as_u64() as usize)
.interval(Duration::from_secs(1))
.fair(false)
.build(),
),
})
}
/// id generates a new piece id.
#[inline]
pub fn id(&self, task_id: &str, number: u32) -> String {
self.storage.piece_id(task_id, number)
}
/// get gets a piece from the local storage.
pub fn get(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
self.storage.get_piece(piece_id)
}
/// get_all gets all pieces of a task from the local storage.
pub fn get_all(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
self.storage.get_pieces(task_id)
#[instrument(skip_all)]
pub fn get(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> {
self.storage.get_piece(task_id, number)
}
/// calculate_interested calculates the interested pieces by content_length and range.
@ -304,9 +271,13 @@ impl Piece {
}
/// calculate_piece_size calculates the piece size by content_length.
pub fn calculate_piece_length(&self, strategy: PieceLengthStrategy) -> u64 {
pub fn calculate_piece_length(
&self,
strategy: PieceLengthStrategy,
content_length: u64,
) -> u64 {
match strategy {
PieceLengthStrategy::OptimizeByFileLength(content_length) => {
PieceLengthStrategy::OptimizeByFileLength => {
let piece_length = (content_length as f64 / MAX_PIECE_COUNT as f64) as u64;
let actual_piece_length = piece_length.next_power_of_two();
@ -319,7 +290,6 @@ impl Piece {
(false, _) => MIN_PIECE_LENGTH,
}
}
PieceLengthStrategy::FixedPieceLength(piece_length) => piece_length,
}
}
@ -328,19 +298,18 @@ impl Piece {
(content_length as f64 / piece_length as f64).ceil() as u32
}
/// upload_from_local_into_async_read uploads a single piece from local cache.
/// upload_from_local_peer_into_async_read uploads a single piece from a local peer.
#[instrument(skip_all, fields(piece_id))]
pub async fn upload_from_local_into_async_read(
pub async fn upload_from_local_peer_into_async_read(
&self,
piece_id: &str,
task_id: &str,
number: u32,
length: u64,
range: Option<Range>,
disable_rate_limit: bool,
) -> Result<impl AsyncRead> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
Span::current().record("piece_id", self.storage.piece_id(task_id, number));
// Acquire the upload rate limiter.
if !disable_rate_limit {
@ -349,50 +318,43 @@ impl Piece {
// Upload the piece content.
self.storage
.upload_piece(piece_id, task_id, range)
.upload_piece(task_id, number, range)
.await
.inspect(|_| {
.map(|reader| {
collect_upload_piece_traffic_metrics(
self.id_generator.task_type(task_id) as i32,
length,
);
reader
})
}
/// download_from_local_into_async_read downloads a single piece from local cache.
/// download_from_local_peer_into_async_read downloads a single piece from a local peer.
#[instrument(skip_all, fields(piece_id))]
pub async fn download_from_local_into_async_read(
pub async fn download_from_local_peer_into_async_read(
&self,
piece_id: &str,
task_id: &str,
number: u32,
length: u64,
range: Option<Range>,
disable_rate_limit: bool,
is_prefetch: bool,
) -> Result<impl AsyncRead> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
Span::current().record("piece_id", self.storage.piece_id(task_id, number));
// Acquire the download rate limiter.
if !disable_rate_limit {
if is_prefetch {
// Acquire the prefetch rate limiter.
self.prefetch_rate_limiter.acquire(length as usize).await;
} else {
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
}
}
// Upload the piece content.
self.storage.upload_piece(piece_id, task_id, range).await
self.storage.upload_piece(task_id, number, range).await
}
/// download_from_local downloads a single piece from local cache. Fake the download piece
/// from the local cache, just collect the metrics.
/// download_from_local_peer downloads a single piece from a local peer. Fake the download piece
/// from the local peer, just collect the metrics.
#[instrument(skip_all)]
pub fn download_from_local(&self, task_id: &str, length: u64) {
pub fn download_from_local_peer(&self, task_id: &str, length: u64) {
collect_download_piece_traffic_metrics(
&TrafficType::LocalPeer,
self.id_generator.task_type(task_id) as i32,
@ -400,104 +362,132 @@ impl Piece {
);
}
/// download_from_parent downloads a single piece from a parent.
#[allow(clippy::too_many_arguments)]
/// download_from_remote_peer downloads a single piece from a remote peer.
#[instrument(skip_all, fields(piece_id))]
pub async fn download_from_parent(
pub async fn download_from_remote_peer(
&self,
piece_id: &str,
host_id: &str,
task_id: &str,
number: u32,
length: u64,
parent: piece_collector::CollectedParent,
is_prefetch: bool,
) -> Result<metadata::Piece> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
Span::current().record("piece_id", self.storage.piece_id(task_id, number));
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
// Record the start of downloading piece.
let piece = self
.storage
.download_piece_started(piece_id, number)
.await?;
let piece = self.storage.download_piece_started(task_id, number).await?;
// If the piece is downloaded by the other thread,
// return the piece directly.
if piece.is_finished() {
info!("finished piece {} from local", piece_id);
return Ok(piece);
}
if is_prefetch {
// Acquire the prefetch rate limiter.
self.prefetch_rate_limiter.acquire(length as usize).await;
} else {
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
}
// Create a dfdaemon client.
let host = parent.host.clone().ok_or_else(|| {
error!("peer host is empty");
if let Some(err) = self.storage.download_piece_failed(piece_id).err() {
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
Error::InvalidPeer(parent.id.clone())
})?;
let (content, offset, digest) = self
.downloader
.download_piece(
format!("{}:{}", host.ip, host.port).as_str(),
number,
host_id,
task_id,
let dfdaemon_upload_client = DfdaemonUploadClient::new(
self.config.clone(),
format!("http://{}:{}", host.ip, host.port),
)
.await
.inspect_err(|err| {
error!("download piece failed: {}", err);
if let Some(err) = self.storage.download_piece_failed(piece_id).err() {
.map_err(|err| {
error!(
"create dfdaemon upload client from {}:{} failed: {}",
host.ip, host.port, err
);
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
})?;
let mut reader = Cursor::new(content);
// Record the finish of downloading piece.
match self
.storage
.download_piece_from_parent_finished(
piece_id,
task_id,
offset,
length,
digest.as_str(),
parent.id.as_str(),
&mut reader,
self.config.storage.write_piece_timeout,
err
})?;
// Send the interested pieces request.
let response = dfdaemon_upload_client
.download_piece(
DownloadPieceRequest {
host_id: host_id.to_string(),
task_id: task_id.to_string(),
piece_number: number,
},
self.config.download.piece_timeout,
)
.await
{
Ok(piece) => {
.map_err(|err| {
error!("download piece failed: {}", err);
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
err
})?;
let piece = response.piece.ok_or_else(|| {
error!("piece is empty");
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
Error::InvalidParameter
})?;
// Get the piece content.
let content = piece.content.ok_or_else(|| {
error!("piece content is empty");
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
Error::InvalidParameter
})?;
// Record the finish of downloading piece.
self.storage
.download_piece_from_remote_peer_finished(
task_id,
number,
piece.offset,
piece.digest.as_str(),
parent.id.as_str(),
&mut content.as_slice(),
)
.await
.map_err(|err| {
// Record the failure of downloading piece,
// If storage fails to record piece.
error!("download piece finished: {}", err);
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
err
})?;
self.storage
.get_piece(task_id, number)?
.ok_or_else(|| {
error!("piece not found");
Error::PieceNotFound(number.to_string())
})
.map(|piece| {
collect_download_piece_traffic_metrics(
&TrafficType::RemotePeer,
self.id_generator.task_type(task_id) as i32,
length,
);
Ok(piece)
}
Err(err) => {
error!("download piece finished: {}", err);
if let Some(err) = self.storage.download_piece_failed(piece_id).err() {
error!("set piece metadata failed: {}", err)
};
Err(err)
}
}
piece
})
}
/// download_from_source downloads a single piece from the source.
@ -505,42 +495,29 @@ impl Piece {
#[instrument(skip_all, fields(piece_id))]
pub async fn download_from_source(
&self,
piece_id: &str,
task_id: &str,
number: u32,
url: &str,
offset: u64,
length: u64,
request_header: HeaderMap,
is_prefetch: bool,
object_storage: Option<ObjectStorage>,
hdfs: Option<Hdfs>,
) -> Result<metadata::Piece> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
Span::current().record("piece_id", self.storage.piece_id(task_id, number));
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
// Record the start of downloading piece.
let piece = self
.storage
.download_piece_started(piece_id, number)
.await?;
let piece = self.storage.download_piece_started(task_id, number).await?;
// If the piece is downloaded by the other thread,
// return the piece directly.
if piece.is_finished() {
info!("finished piece {} from local", piece_id);
return Ok(piece);
}
if is_prefetch {
// Acquire the prefetch rate limiter.
self.prefetch_rate_limiter.acquire(length as usize).await;
} else {
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
}
// Add range header to the request by offset and length.
let mut request_header = request_header.clone();
request_header.insert(
@ -551,11 +528,13 @@ impl Piece {
);
// Download the piece from the source.
let backend = self.backend_factory.build(url).inspect_err(|err| {
let backend = self.backend_factory.build(url).map_err(|err| {
error!("build backend failed: {}", err);
if let Some(err) = self.storage.download_piece_failed(piece_id).err() {
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
err
})?;
// Record the start time.
@ -569,7 +548,7 @@ impl Piece {
let mut response = backend
.get(GetRequest {
task_id: task_id.to_string(),
piece_id: piece_id.to_string(),
piece_id: self.storage.piece_id(task_id, number),
url: url.to_string(),
range: Some(Range {
start: offset,
@ -579,10 +558,9 @@ impl Piece {
timeout: self.config.download.piece_timeout,
client_cert: None,
object_storage,
hdfs,
})
.await
.inspect_err(|err| {
.map_err(|err| {
// Collect the backend request failure metrics.
collect_backend_request_failure_metrics(
backend.scheme().as_str(),
@ -591,9 +569,11 @@ impl Piece {
// if the request is failed.
error!("backend get failed: {}", err);
if let Some(err) = self.storage.download_piece_failed(piece_id).err() {
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
err
})?;
if !response.success {
@ -614,12 +594,12 @@ impl Piece {
let error_message = response.error_message.unwrap_or_default();
error!("backend get failed: {} {}", error_message, buffer.as_str());
self.storage.download_piece_failed(piece_id)?;
return Err(Error::BackendError(Box::new(BackendError {
self.storage.download_piece_failed(task_id, number)?;
return Err(Error::BackendError(BackendError {
message: error_message,
status_code: Some(response.http_status_code.unwrap_or_default()),
header: Some(response.http_header.unwrap_or_default()),
})));
}));
}
// Collect the backend request finished metrics.
@ -630,247 +610,41 @@ impl Piece {
);
// Record the finish of downloading piece.
match self
.storage
self.storage
.download_piece_from_source_finished(
piece_id,
task_id,
number,
offset,
length,
&mut response.reader,
self.config.storage.write_piece_timeout,
)
.await
{
Ok(piece) => {
.map_err(|err| {
// Record the failure of downloading piece,
// If storage fails to record piece.
error!("download piece finished: {}", err);
if let Some(err) = self.storage.download_piece_failed(task_id, number).err() {
error!("set piece metadata failed: {}", err)
};
err
})?;
self.storage
.get_piece(task_id, number)?
.ok_or_else(|| {
error!("piece not found");
Error::PieceNotFound(number.to_string())
})
.map(|piece| {
collect_download_piece_traffic_metrics(
&TrafficType::BackToSource,
self.id_generator.task_type(task_id) as i32,
length,
);
Ok(piece)
}
Err(err) => {
error!("download piece finished: {}", err);
if let Some(err) = self.storage.download_piece_failed(piece_id).err() {
error!("set piece metadata failed: {}", err)
};
Err(err)
}
}
}
/// persistent_cache_id generates a new persistent cache piece id.
#[inline]
pub fn persistent_cache_id(&self, task_id: &str, number: u32) -> String {
self.storage.persistent_cache_piece_id(task_id, number)
}
/// get_persistent_cache gets a persistent cache piece from the local storage.
#[instrument(skip_all)]
pub fn get_persistent_cache(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
self.storage.get_persistent_cache_piece(piece_id)
}
/// create_persistent_cache creates a new persistent cache piece.
#[instrument(skip_all)]
pub async fn create_persistent_cache<R: AsyncRead + Unpin + ?Sized>(
&self,
piece_id: &str,
task_id: &str,
number: u32,
offset: u64,
length: u64,
reader: &mut R,
) -> Result<metadata::Piece> {
self.storage
.create_persistent_cache_piece(piece_id, task_id, number, offset, length, reader)
.await
}
/// upload_persistent_cache_from_local_into_async_read uploads a persistent cache piece from local cache.
#[instrument(skip_all, fields(piece_id))]
pub async fn upload_persistent_cache_from_local_into_async_read(
&self,
piece_id: &str,
task_id: &str,
length: u64,
range: Option<Range>,
) -> Result<impl AsyncRead> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
// Acquire the upload rate limiter.
self.upload_rate_limiter.acquire(length as usize).await;
// Upload the persistent cache piece content.
self.storage
.upload_persistent_cache_piece(piece_id, task_id, range)
.await
.inspect(|_| {
collect_upload_piece_traffic_metrics(
self.id_generator.task_type(task_id) as i32,
length,
);
piece
})
}
/// download_persistent_cache_from_local_into_async_read downloads a persistent cache piece from local cache.
#[instrument(skip_all, fields(piece_id))]
pub async fn download_persistent_cache_from_local_into_async_read(
&self,
piece_id: &str,
task_id: &str,
length: u64,
range: Option<Range>,
disable_rate_limit: bool,
is_prefetch: bool,
) -> Result<impl AsyncRead> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
// Acquire the download rate limiter.
if !disable_rate_limit {
if is_prefetch {
// Acquire the prefetch rate limiter.
self.prefetch_rate_limiter.acquire(length as usize).await;
} else {
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
}
}
// Upload the piece content.
self.storage
.upload_persistent_cache_piece(piece_id, task_id, range)
.await
}
/// download_persistent_cache_from_local downloads a persistent cache piece from local cache. Fake the download
/// persistent cache piece from the local cache, just collect the metrics.
#[instrument(skip_all)]
pub fn download_persistent_cache_from_local(&self, task_id: &str, length: u64) {
collect_download_piece_traffic_metrics(
&TrafficType::LocalPeer,
self.id_generator.task_type(task_id) as i32,
length,
);
}
/// download_persistent_cache_from_parent downloads a persistent cache piece from a parent.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all, fields(piece_id))]
pub async fn download_persistent_cache_from_parent(
&self,
piece_id: &str,
host_id: &str,
task_id: &str,
number: u32,
length: u64,
parent: piece_collector::CollectedParent,
is_prefetch: bool,
) -> Result<metadata::Piece> {
// Span record the piece_id.
Span::current().record("piece_id", piece_id);
Span::current().record("piece_length", length);
if is_prefetch {
// Acquire the prefetch rate limiter.
self.prefetch_rate_limiter.acquire(length as usize).await;
} else {
// Acquire the download rate limiter.
self.download_rate_limiter.acquire(length as usize).await;
}
// Record the start of downloading piece.
let piece = self
.storage
.download_persistent_cache_piece_started(piece_id, number)
.await?;
// If the piece is downloaded by the other thread,
// return the piece directly.
if piece.is_finished() {
info!("finished persistent cache piece {} from local", piece_id);
return Ok(piece);
}
// Create a dfdaemon client.
let host = parent.host.clone().ok_or_else(|| {
error!("peer host is empty");
if let Some(err) = self
.storage
.download_persistent_cache_piece_failed(piece_id)
.err()
{
error!("set persistent cache piece metadata failed: {}", err)
};
Error::InvalidPeer(parent.id.clone())
})?;
let (content, offset, digest) = self
.downloader
.download_persistent_cache_piece(
format!("{}:{}", host.ip, host.port).as_str(),
number,
host_id,
task_id,
)
.await
.inspect_err(|err| {
error!("download persistent cache piece failed: {}", err);
if let Some(err) = self
.storage
.download_persistent_cache_piece_failed(piece_id)
.err()
{
error!("set persistent cache piece metadata failed: {}", err)
};
})?;
let mut reader = Cursor::new(content);
// Record the finish of downloading piece.
match self
.storage
.download_persistent_cache_piece_from_parent_finished(
piece_id,
task_id,
offset,
length,
digest.as_str(),
parent.id.as_str(),
&mut reader,
)
.await
{
Ok(piece) => {
collect_download_piece_traffic_metrics(
&TrafficType::RemotePeer,
self.id_generator.task_type(task_id) as i32,
length,
);
Ok(piece)
}
Err(err) => {
error!("download persistent cache piece finished: {}", err);
if let Some(err) = self
.storage
.download_persistent_cache_piece_failed(piece_id)
.err()
{
error!("set persistent cache piece metadata failed: {}", err)
};
Err(err)
}
}
}
}
#[cfg(test)]
@ -879,7 +653,7 @@ mod tests {
use tempfile::tempdir;
#[tokio::test]
async fn test_calculate_interested() {
async fn should_calculate_interested() {
let temp_dir = tempdir().unwrap();
let config = Config::default();
@ -906,8 +680,7 @@ mod tests {
id_generator.clone(),
storage.clone(),
backend_factory.clone(),
)
.unwrap();
);
let test_cases = vec![
(1000, 1, None, 1, vec![0], 0, 1),

View File

@ -17,8 +17,9 @@
use crate::grpc::dfdaemon_upload::DfdaemonUploadClient;
use dashmap::DashMap;
use dragonfly_api::common::v2::Host;
use dragonfly_api::dfdaemon::v2::{SyncPersistentCachePiecesRequest, SyncPiecesRequest};
use dragonfly_api::dfdaemon::v2::SyncPiecesRequest;
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::error::{ErrorType, OrErr};
use dragonfly_client_core::{Error, Result};
use dragonfly_client_storage::metadata;
use std::sync::Arc;
@ -28,9 +29,7 @@ use tokio::task::JoinSet;
use tokio_stream::StreamExt;
use tracing::{error, info, instrument, Instrument};
const DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS: Duration = Duration::from_millis(5);
/// CollectedParent is the parent peer collected from the parent.
/// CollectedParent is the parent peer collected from the remote peer.
#[derive(Clone, Debug)]
pub struct CollectedParent {
/// id is the id of the parent.
@ -69,278 +68,27 @@ pub struct PieceCollector {
/// interested_pieces is the pieces interested by the collector.
interested_pieces: Vec<metadata::Piece>,
/// collected_pieces is a map to store the collected pieces from different parents.
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
/// collected_pieces is the pieces collected from peers.
collected_pieces: Arc<DashMap<u32, String>>,
}
/// PieceCollector is used to collect pieces from peers.
impl PieceCollector {
/// new creates a new PieceCollector.
pub async fn new(
#[instrument(skip_all)]
pub fn new(
config: Arc<Config>,
host_id: &str,
task_id: &str,
interested_pieces: Vec<metadata::Piece>,
parents: Vec<CollectedParent>,
) -> Self {
let collected_pieces = Arc::new(DashMap::with_capacity(interested_pieces.len()));
for interested_piece in &interested_pieces {
collected_pieces.insert(interested_piece.number, Vec::new());
}
Self {
config,
task_id: task_id.to_string(),
host_id: host_id.to_string(),
parents,
interested_pieces,
collected_pieces,
}
}
/// run runs the piece collector.
#[instrument(skip_all)]
pub async fn run(&self) -> Receiver<CollectedPiece> {
let config = self.config.clone();
let host_id = self.host_id.clone();
let task_id = self.task_id.clone();
let parents = self.parents.clone();
let interested_pieces = self.interested_pieces.clone();
let collected_pieces = self.collected_pieces.clone();
let collected_piece_timeout = self.config.download.collected_piece_timeout;
let (collected_piece_tx, collected_piece_rx) = mpsc::channel(128 * 1024);
tokio::spawn(
async move {
Self::collect_from_parents(
config,
&host_id,
&task_id,
parents,
interested_pieces,
collected_pieces,
collected_piece_tx,
collected_piece_timeout,
)
.await
.unwrap_or_else(|err| {
error!("collect pieces failed: {}", err);
let collected_pieces = Arc::new(DashMap::new());
interested_pieces
.clone()
.into_iter()
.for_each(|interested_piece| {
collected_pieces.insert(interested_piece.number, "".to_string());
});
}
.in_current_span(),
);
collected_piece_rx
}
/// collect_from_parents collects pieces from multiple parents with load balancing strategy.
///
/// The collection process works in two phases:
/// 1. **Synchronization Phase**: Waits for a configured duration (DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS)
/// to collect the same piece information from different parents. This allows the collector
/// to gather multiple sources for each piece.
///
/// 2. **Selection Phase**: After the wait period, randomly selects one parent from the available
/// candidates for each piece and forwards it to the piece downloader.
///
/// **Load Balancing Strategy**:
/// The random parent selection is designed to distribute download load across multiple parents
/// during concurrent piece downloads. This approach ensures:
/// - Optimal utilization of bandwidth from multiple parent nodes
/// - Prevention of overwhelming any single parent with too many requests
/// - Better overall download performance through parallel connections
///
/// This strategy is particularly effective when downloading multiple pieces simultaneously,
/// as it naturally spreads the workload across the available parent pool.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all)]
async fn collect_from_parents(
config: Arc<Config>,
host_id: &str,
task_id: &str,
parents: Vec<CollectedParent>,
interested_pieces: Vec<metadata::Piece>,
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
collected_piece_tx: Sender<CollectedPiece>,
collected_piece_timeout: Duration,
) -> Result<()> {
// Create a task to collect pieces from peers.
let mut join_set = JoinSet::new();
for parent in parents.iter() {
#[allow(clippy::too_many_arguments)]
async fn sync_pieces(
config: Arc<Config>,
host_id: String,
task_id: String,
parent: CollectedParent,
interested_pieces: Vec<metadata::Piece>,
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
collected_piece_tx: Sender<CollectedPiece>,
collected_piece_timeout: Duration,
) -> Result<CollectedParent> {
info!("sync pieces from parent {}", parent.id);
// If candidate_parent.host is None, skip it.
let host = parent.host.clone().ok_or_else(|| {
error!("peer {:?} host is empty", parent);
Error::InvalidPeer(parent.id.clone())
})?;
// Create a dfdaemon client.
let dfdaemon_upload_client = DfdaemonUploadClient::new(
config,
format!("http://{}:{}", host.ip, host.port),
false,
)
.await
.inspect_err(|err| {
error!(
"create dfdaemon upload client from parent {} failed: {}",
parent.id, err
);
})?;
let response = dfdaemon_upload_client
.sync_pieces(SyncPiecesRequest {
host_id: host_id.to_string(),
task_id: task_id.to_string(),
interested_piece_numbers: interested_pieces
.iter()
.map(|piece| piece.number)
.collect(),
})
.await
.inspect_err(|err| {
error!("sync pieces from parent {} failed: {}", parent.id, err);
})?;
// If the response repeating timeout exceeds the piece download timeout, the stream will return error.
let out_stream = response.into_inner().timeout(collected_piece_timeout);
tokio::pin!(out_stream);
while let Some(message) = out_stream.try_next().await.inspect_err(|err| {
error!("sync pieces from parent {} failed: {}", parent.id, err);
})? {
let message = message?;
if let Some(mut parents) = collected_pieces.get_mut(&message.number) {
parents.push(parent.clone());
} else {
continue;
}
// Wait for collecting the piece from different parents when the first
// piece is collected.
tokio::time::sleep(DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS).await;
let parents = match collected_pieces.remove(&message.number) {
Some((_, parents)) => parents,
None => continue,
};
let parent = match parents.get(fastrand::usize(..parents.len())) {
Some(parent) => parent,
None => {
error!(
"collected_pieces does not contain parent for piece {}",
message.number
);
continue;
}
};
info!(
"picked up piece {}-{} metadata from parent {}",
task_id, message.number, parent.id
);
collected_piece_tx
.send(CollectedPiece {
number: message.number,
length: message.length,
parent: parent.clone(),
})
.await
.inspect_err(|err| {
error!("send CollectedPiece failed: {}", err);
})?;
}
Ok(parent)
}
join_set.spawn(
sync_pieces(
config.clone(),
host_id.to_string(),
task_id.to_string(),
parent.clone(),
interested_pieces.clone(),
collected_pieces.clone(),
collected_piece_tx.clone(),
collected_piece_timeout,
)
.in_current_span(),
);
}
// Wait for all tasks to finish.
while let Some(message) = join_set.join_next().await {
match message {
Ok(Ok(peer)) => {
info!("peer {} sync pieces finished", peer.id);
// If all pieces are collected, abort all tasks.
if collected_pieces.is_empty() {
info!("all pieces are collected, abort all tasks");
join_set.abort_all();
}
}
Ok(Err(err)) => {
error!("sync pieces failed: {}", err);
}
Err(err) => {
error!("sync pieces failed: {}", err);
}
}
}
Ok(())
}
}
/// PersistentCachePieceCollector is used to collect persistent cache pieces from peers.
pub struct PersistentCachePieceCollector {
/// config is the configuration of the dfdaemon.
config: Arc<Config>,
/// host_id is the id of the host.
host_id: String,
/// task_id is the id of the persistent cache task.
task_id: String,
/// parents is the parent peers.
parents: Vec<CollectedParent>,
/// interested_pieces is the pieces interested by the collector.
interested_pieces: Vec<metadata::Piece>,
/// collected_pieces is a map to store the collected pieces from different parents.
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
}
/// PersistentCachePieceCollector is used to collect persistent cache pieces from peers.
impl PersistentCachePieceCollector {
/// new creates a new PieceCollector.
pub async fn new(
config: Arc<Config>,
host_id: &str,
task_id: &str,
interested_pieces: Vec<metadata::Piece>,
parents: Vec<CollectedParent>,
) -> Self {
let collected_pieces = Arc::new(DashMap::with_capacity(interested_pieces.len()));
for interested_piece in &interested_pieces {
collected_pieces.insert(interested_piece.number, Vec::new());
}
Self {
config,
@ -362,13 +110,13 @@ impl PersistentCachePieceCollector {
let interested_pieces = self.interested_pieces.clone();
let collected_pieces = self.collected_pieces.clone();
let collected_piece_timeout = self.config.download.piece_timeout;
let (collected_piece_tx, collected_piece_rx) = mpsc::channel(10 * 1024);
let (collected_piece_tx, collected_piece_rx) = mpsc::channel(1024 * 10);
tokio::spawn(
async move {
Self::collect_from_parents(
Self::collect_from_remote_peers(
config,
&host_id,
&task_id,
host_id,
task_id,
parents,
interested_pieces,
collected_pieces,
@ -377,7 +125,7 @@ impl PersistentCachePieceCollector {
)
.await
.unwrap_or_else(|err| {
error!("collect persistent cache pieces failed: {}", err);
error!("collect pieces failed: {}", err);
});
}
.in_current_span(),
@ -386,34 +134,16 @@ impl PersistentCachePieceCollector {
collected_piece_rx
}
/// collect_from_parents collects pieces from multiple parents with load balancing strategy.
///
/// The collection process works in two phases:
/// 1. **Synchronization Phase**: Waits for a configured duration (DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS)
/// to collect the same piece information from different parents. This allows the collector
/// to gather multiple sources for each piece.
///
/// 2. **Selection Phase**: After the wait period, randomly selects one parent from the available
/// candidates for each piece and forwards it to the piece downloader.
///
/// **Load Balancing Strategy**:
/// The random parent selection is designed to distribute download load across multiple parents
/// during concurrent piece downloads. This approach ensures:
/// - Optimal utilization of bandwidth from multiple parent nodes
/// - Prevention of overwhelming any single parent with too many requests
/// - Better overall download performance through parallel connections
///
/// This strategy is particularly effective when downloading multiple pieces simultaneously,
/// as it naturally spreads the workload across the available parent pool.
/// collect_from_remote_peers collects pieces from remote peers.
#[allow(clippy::too_many_arguments)]
#[instrument(skip_all)]
async fn collect_from_parents(
async fn collect_from_remote_peers(
config: Arc<Config>,
host_id: &str,
task_id: &str,
host_id: String,
task_id: String,
parents: Vec<CollectedParent>,
interested_pieces: Vec<metadata::Piece>,
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
collected_pieces: Arc<DashMap<u32, String>>,
collected_piece_tx: Sender<CollectedPiece>,
collected_piece_timeout: Duration,
) -> Result<()> {
@ -426,35 +156,34 @@ impl PersistentCachePieceCollector {
host_id: String,
task_id: String,
parent: CollectedParent,
parents: Vec<CollectedParent>,
interested_pieces: Vec<metadata::Piece>,
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
collected_pieces: Arc<DashMap<u32, String>>,
collected_piece_tx: Sender<CollectedPiece>,
collected_piece_timeout: Duration,
) -> Result<CollectedParent> {
info!("sync persistent cache pieces from parent {}", parent.id);
info!("sync pieces from parent {}", parent.id);
// If candidate_parent.host is None, skip it.
let host = parent.host.clone().ok_or_else(|| {
error!("persistent cache peer {:?} host is empty", parent);
error!("peer {:?} host is empty", parent);
Error::InvalidPeer(parent.id.clone())
})?;
// Create a dfdaemon client.
let dfdaemon_upload_client = DfdaemonUploadClient::new(
config,
format!("http://{}:{}", host.ip, host.port),
false,
)
let dfdaemon_upload_client =
DfdaemonUploadClient::new(config, format!("http://{}:{}", host.ip, host.port))
.await
.inspect_err(|err| {
.map_err(|err| {
error!(
"create dfdaemon upload client from parent {} failed: {}",
parent.id, err
);
err
})?;
let response = dfdaemon_upload_client
.sync_persistent_cache_pieces(SyncPersistentCachePiecesRequest {
.sync_pieces(SyncPiecesRequest {
host_id: host_id.to_string(),
task_id: task_id.to_string(),
interested_piece_numbers: interested_pieces
@ -463,54 +192,39 @@ impl PersistentCachePieceCollector {
.collect(),
})
.await
.inspect_err(|err| {
error!(
"sync persistent cache pieces from parent {} failed: {}",
parent.id, err
);
.map_err(|err| {
error!("sync pieces from parent {} failed: {}", parent.id, err);
err
})?;
// If the response repeating timeout exceeds the piece download timeout, the stream will return error.
let out_stream = response.into_inner().timeout(collected_piece_timeout);
tokio::pin!(out_stream);
while let Some(message) = out_stream.try_next().await.inspect_err(|err| {
error!(
"sync persistent cache pieces from parent {} failed: {}",
parent.id, err
);
})? {
while let Some(message) =
out_stream.try_next().await.or_err(ErrorType::StreamError)?
{
let message = message?;
if let Some(mut parents) = collected_pieces.get_mut(&message.number) {
parents.push(parent.clone());
} else {
continue;
}
// Wait for collecting the piece from different parents when the first
// piece is collected.
tokio::time::sleep(DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS).await;
let parents = match collected_pieces.remove(&message.number) {
Some((_, parents)) => parents,
let mut parent_id =
match collected_pieces.try_get_mut(&message.number).try_unwrap() {
Some(parent_id) => parent_id,
None => continue,
};
let parent = match parents.get(fastrand::usize(..parents.len())) {
Some(parent) => parent,
None => {
error!(
"collected_pieces does not contain parent for piece {}",
message.number
);
continue;
}
};
parent_id.push_str(&parent.id);
info!(
"picked up piece {}-{} metadata from parent {}",
"received piece {}-{} metadata from parent {}",
task_id, message.number, parent.id
);
let parent = parents
.iter()
.find(|parent| parent.id == parent_id.as_str())
.ok_or_else(|| {
error!("parent {} not found", parent_id.as_str());
Error::InvalidPeer(parent_id.clone())
})?;
collected_piece_tx
.send(CollectedPiece {
number: message.number,
@ -518,9 +232,16 @@ impl PersistentCachePieceCollector {
parent: parent.clone(),
})
.await
.inspect_err(|err| {
.map_err(|err| {
error!("send CollectedPiece failed: {}", err);
err
})?;
// Release the lock of the piece with parent_id.
drop(parent_id);
// Remove the piece from collected_pieces.
collected_pieces.remove(&message.number);
}
Ok(parent)
@ -529,9 +250,10 @@ impl PersistentCachePieceCollector {
join_set.spawn(
sync_pieces(
config.clone(),
host_id.to_string(),
task_id.to_string(),
host_id.clone(),
task_id.clone(),
parent.clone(),
parents.clone(),
interested_pieces.clone(),
collected_pieces.clone(),
collected_piece_tx.clone(),
@ -545,19 +267,19 @@ impl PersistentCachePieceCollector {
while let Some(message) = join_set.join_next().await {
match message {
Ok(Ok(peer)) => {
info!("peer {} sync persistent cache pieces finished", peer.id);
info!("peer {} sync pieces finished", peer.id);
// If all pieces are collected, abort all tasks.
if collected_pieces.is_empty() {
info!("all persistent cache pieces are collected, abort all tasks");
if collected_pieces.len() == 0 {
info!("all pieces are collected, abort all tasks");
join_set.abort_all();
}
}
Ok(Err(err)) => {
error!("sync persistent cache pieces failed: {}", err);
error!("sync pieces failed: {}", err);
}
Err(err) => {
error!("sync persistent cache pieces failed: {}", err);
error!("sync pieces failed: {}", err);
}
}
}

View File

@ -1,405 +0,0 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::grpc::dfdaemon_upload::DfdaemonUploadClient;
use dragonfly_api::dfdaemon::v2::{DownloadPersistentCachePieceRequest, DownloadPieceRequest};
use dragonfly_client_config::dfdaemon::Config;
use dragonfly_client_core::{Error, Result};
use dragonfly_client_storage::metadata;
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::Mutex;
use tracing::{debug, error, instrument};
/// DEFAULT_DOWNLOADER_CAPACITY is the default capacity of the downloader to store the clients.
const DEFAULT_DOWNLOADER_CAPACITY: usize = 2000;
/// DEFAULT_DOWNLOADER_IDLE_TIMEOUT is the default idle timeout for the downloader.
const DEFAULT_DOWNLOADER_IDLE_TIMEOUT: Duration = Duration::from_secs(30);
/// Downloader is the interface for downloading pieces, which is implemented by different
/// protocols. The downloader is used to download pieces from the other peers.
#[tonic::async_trait]
pub trait Downloader: Send + Sync {
/// download_piece downloads a piece from the other peer by different protocols.
async fn download_piece(
&self,
addr: &str,
number: u32,
host_id: &str,
task_id: &str,
) -> Result<(Vec<u8>, u64, String)>;
/// download_persistent_cache_piece downloads a persistent cache piece from the other peer by different
/// protocols.
async fn download_persistent_cache_piece(
&self,
addr: &str,
number: u32,
host_id: &str,
task_id: &str,
) -> Result<(Vec<u8>, u64, String)>;
}
/// DownloaderFactory is the factory for creating different downloaders by different protocols.
pub struct DownloaderFactory {
/// downloader is the downloader for downloading pieces, which is implemented by different
/// protocols.
downloader: Arc<dyn Downloader + Send + Sync>,
}
/// DownloadFactory implements the DownloadFactory trait.
impl DownloaderFactory {
/// new returns a new DownloadFactory.
pub fn new(protocol: &str, config: Arc<Config>) -> Result<Self> {
let downloader = match protocol {
"grpc" => Arc::new(GRPCDownloader::new(
config.clone(),
DEFAULT_DOWNLOADER_CAPACITY,
DEFAULT_DOWNLOADER_IDLE_TIMEOUT,
)),
_ => {
error!("downloader unsupported protocol: {}", protocol);
return Err(Error::InvalidParameter);
}
};
Ok(Self { downloader })
}
/// build returns the downloader.
pub fn build(&self) -> Arc<dyn Downloader> {
self.downloader.clone()
}
}
/// RequestGuard is the guard for the request.
struct RequestGuard {
/// active_requests is the number of the active requests.
active_requests: Arc<AtomicUsize>,
}
/// RequestGuard implements the guard for the request to add or subtract the active requests.
impl RequestGuard {
/// new returns a new RequestGuard.
fn new(active_requests: Arc<AtomicUsize>) -> Self {
active_requests.fetch_add(1, Ordering::SeqCst);
Self { active_requests }
}
}
/// RequestGuard implements the Drop trait.
impl Drop for RequestGuard {
/// drop subtracts the active requests.
fn drop(&mut self) {
self.active_requests.fetch_sub(1, Ordering::SeqCst);
}
}
/// DfdaemonUploadClientEntry is the entry of the dfdaemon upload client.
#[derive(Clone)]
struct DfdaemonUploadClientEntry {
/// client is the dfdaemon upload client.
client: DfdaemonUploadClient,
/// active_requests is the number of the active requests.
active_requests: Arc<AtomicUsize>,
/// actived_at is the time when the client is the last active time.
actived_at: Arc<std::sync::Mutex<Instant>>,
}
/// GRPCDownloader is the downloader for downloading pieces by the gRPC protocol.
/// It will reuse the dfdaemon upload clients to download pieces from the other peers by
/// peer's address.
pub struct GRPCDownloader {
/// config is the configuration of the dfdaemon.
config: Arc<Config>,
/// clients is the map of the dfdaemon upload clients.
clients: Arc<Mutex<HashMap<String, DfdaemonUploadClientEntry>>>,
/// capacity is the capacity of the dfdaemon upload clients. If the number of the
/// clients exceeds the capacity, it will clean up the idle clients.
capacity: usize,
/// client_idle_timeout is the idle timeout for the client. If the client is idle for a long
/// time, it will be removed when cleaning up the idle clients.
idle_timeout: Duration,
/// cleanup_at is the time when the client is the last cleanup time.
cleanup_at: Arc<Mutex<Instant>>,
}
/// GRPCDownloader implements the downloader with the gRPC protocol.
impl GRPCDownloader {
/// new returns a new GRPCDownloader.
pub fn new(config: Arc<Config>, capacity: usize, idle_timeout: Duration) -> Self {
Self {
config,
clients: Arc::new(Mutex::new(HashMap::new())),
capacity,
idle_timeout,
cleanup_at: Arc::new(Mutex::new(Instant::now())),
}
}
/// client returns the dfdaemon upload client by the address.
///
/// Opterations:
/// 1. If the client entry exists, it will return the client directly to reuse the client by
/// the address.
/// 2. If the client entry does not exist, it will create a new client entry and insert it
/// into the clients map.
async fn client(&self, addr: &str) -> Result<DfdaemonUploadClient> {
let now = Instant::now();
// Cleanup the idle clients first to avoid the clients exceeding the capacity and the
// clients are idle for a long time.
self.cleanup_idle_client_entries().await;
let clients = self.clients.lock().await;
if let Some(entry) = clients.get(addr) {
debug!("reusing client: {}", addr);
*entry.actived_at.lock().unwrap() = now;
return Ok(entry.client.clone());
}
drop(clients);
// If there are many concurrent requests to create the client, it will create multiple
// clients for the same address. But it will reuse the same client by entry operation.
debug!("creating client: {}", addr);
let client =
DfdaemonUploadClient::new(self.config.clone(), format!("http://{}", addr), true)
.await?;
let mut clients = self.clients.lock().await;
let entry = clients
.entry(addr.to_string())
.or_insert(DfdaemonUploadClientEntry {
client: client.clone(),
active_requests: Arc::new(AtomicUsize::new(0)),
actived_at: Arc::new(std::sync::Mutex::new(now)),
});
// If it is created by other concurrent requests and reused client, need to update the
// last active time.
*entry.actived_at.lock().unwrap() = now;
Ok(entry.client.clone())
}
/// get_client_entry returns the client entry by the address.
async fn get_client_entry(&self, addr: &str) -> Option<DfdaemonUploadClientEntry> {
let clients = self.clients.lock().await;
clients.get(addr).cloned()
}
/// remove_client_entry removes the client entry if it is idle.
async fn remove_client_entry(&self, addr: &str) {
let mut clients = self.clients.lock().await;
if let Some(entry) = clients.get(addr) {
if entry.active_requests.load(Ordering::SeqCst) == 0 {
clients.remove(addr);
}
}
}
/// cleanup_idle_clients cleans up the idle clients, which are idle for a long time or have no
/// active requests.
async fn cleanup_idle_client_entries(&self) {
let now = Instant::now();
// Avoid hot cleanup for the clients.
let cleanup_at = self.cleanup_at.lock().await;
let interval = self.idle_timeout / 2;
if now.duration_since(*cleanup_at) < interval {
debug!("avoid hot cleanup");
return;
}
drop(cleanup_at);
let mut clients = self.clients.lock().await;
let exceeds_capacity = clients.len() > self.capacity;
clients.retain(|addr, entry| {
let active_requests = entry.active_requests.load(Ordering::SeqCst);
let is_active = active_requests > 0;
let actived_at = entry.actived_at.lock().unwrap();
let idel_duration = now.duration_since(*actived_at);
let is_recent = idel_duration <= self.idle_timeout;
// Retain the client if it is active or not exceeds the capacity and is recent.
let should_retain = is_active || (!exceeds_capacity && is_recent);
if !should_retain {
debug!(
"removing idle client: {}, exceeds_capacity: {}, idle_duration: {}s",
addr,
exceeds_capacity,
idel_duration.as_secs(),
);
}
should_retain
});
// Update the cleanup time.
*self.cleanup_at.lock().await = now;
}
}
/// GRPCDownloader implements the Downloader trait.
#[tonic::async_trait]
impl Downloader for GRPCDownloader {
/// download_piece downloads a piece from the other peer by the gRPC protocol.
#[instrument(skip_all)]
async fn download_piece(
&self,
addr: &str,
number: u32,
host_id: &str,
task_id: &str,
) -> Result<(Vec<u8>, u64, String)> {
let client = self.client(addr).await?;
let entry = self
.get_client_entry(addr)
.await
.ok_or(Error::UnexpectedResponse)?;
let request_guard = RequestGuard::new(entry.active_requests.clone());
let response = match client
.download_piece(
DownloadPieceRequest {
host_id: host_id.to_string(),
task_id: task_id.to_string(),
piece_number: number,
},
self.config.download.piece_timeout,
)
.await
{
Ok(response) => response,
Err(err) => {
// If the request fails, it will drop the request guard and remove the client
// entry to avoid using the invalid client.
drop(request_guard);
self.remove_client_entry(addr).await;
return Err(err);
}
};
let Some(piece) = response.piece else {
return Err(Error::InvalidParameter);
};
let Some(content) = piece.content else {
return Err(Error::InvalidParameter);
};
// Calculate the digest of the piece metadata and compare it with the expected digest,
// it verifies the integrity of the piece metadata.
let piece_metadata = metadata::Piece {
number,
length: piece.length,
offset: piece.offset,
digest: piece.digest.clone(),
..Default::default()
};
if let Some(expected_digest) = response.digest {
let digest = piece_metadata.calculate_digest();
if expected_digest != digest {
return Err(Error::DigestMismatch(
expected_digest.to_string(),
digest.to_string(),
));
}
}
Ok((content, piece.offset, piece.digest))
}
/// download_persistent_cache_piece downloads a persistent cache piece from the other peer by
/// the gRPC protocol.
#[instrument(skip_all)]
async fn download_persistent_cache_piece(
&self,
addr: &str,
number: u32,
host_id: &str,
task_id: &str,
) -> Result<(Vec<u8>, u64, String)> {
let client = self.client(addr).await?;
let entry = self
.get_client_entry(addr)
.await
.ok_or(Error::UnexpectedResponse)?;
let request_guard = RequestGuard::new(entry.active_requests.clone());
let response = match client
.download_persistent_cache_piece(
DownloadPersistentCachePieceRequest {
host_id: host_id.to_string(),
task_id: task_id.to_string(),
piece_number: number,
},
self.config.download.piece_timeout,
)
.await
{
Ok(response) => response,
Err(err) => {
// If the request fails, it will drop the request guard and remove the client
// entry to avoid using the invalid client.
drop(request_guard);
self.remove_client_entry(addr).await;
return Err(err);
}
};
let Some(piece) = response.piece else {
return Err(Error::InvalidParameter);
};
let Some(content) = piece.content else {
return Err(Error::InvalidParameter);
};
// Calculate the digest of the piece metadata and compare it with the expected digest,
// it verifies the integrity of the piece metadata.
let piece_metadata = metadata::Piece {
number,
length: piece.length,
offset: piece.offset,
digest: piece.digest.clone(),
..Default::default()
};
if let Some(expected_digest) = response.digest {
let digest = piece_metadata.calculate_digest();
if expected_digest != digest {
return Err(Error::DigestMismatch(
expected_digest.to_string(),
digest.to_string(),
));
}
}
Ok((content, piece.offset, piece.digest))
}
}

File diff suppressed because it is too large Load Diff

View File

@ -109,100 +109,3 @@ pub async fn shutdown_signal() {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::time::{sleep, Duration};
#[tokio::test]
async fn test_shutdown_trigger_and_recv() {
// Create a new shutdown instance.
let mut shutdown = Shutdown::new();
// Trigger the shutdown signal in a separate task.
let shutdown_clone = shutdown.clone();
tokio::spawn(async move {
// Small delay to ensure the receiver is waiting.
sleep(Duration::from_millis(10)).await;
shutdown_clone.trigger();
});
// Wait for the shutdown signal.
shutdown.recv().await;
// Verify that is_shutdown is set to true.
assert!(shutdown.is_shutdown());
}
#[tokio::test]
async fn test_shutdown_multiple_receivers() {
// Create a new shutdown instance.
let mut shutdown1 = Shutdown::new();
let mut shutdown2 = shutdown1.clone();
let mut shutdown3 = shutdown1.clone();
// Trigger the shutdown signal.
shutdown1.trigger();
// All receivers should receive the signal.
shutdown1.recv().await;
shutdown2.recv().await;
shutdown3.recv().await;
// Verify that all instances have is_shutdown set to true.
assert!(shutdown1.is_shutdown());
assert!(shutdown2.is_shutdown());
assert!(shutdown3.is_shutdown());
}
#[tokio::test]
async fn test_shutdown_clone_behavior() {
// Create a new shutdown instance.
let mut shutdown1 = Shutdown::new();
// Set is_shutdown to true.
shutdown1.trigger();
shutdown1.recv().await;
assert!(shutdown1.is_shutdown());
// Clone the instance.
let shutdown2 = shutdown1.clone();
// Verify that the clone has the same is_shutdown value.
assert_eq!(shutdown1.is_shutdown(), shutdown2.is_shutdown());
// Create a new instance before triggering.
let mut shutdown3 = Shutdown::new();
let mut shutdown4 = shutdown3.clone();
// Trigger after cloning.
shutdown3.trigger();
// Both should receive the signal.
shutdown3.recv().await;
shutdown4.recv().await;
assert!(shutdown3.is_shutdown());
assert!(shutdown4.is_shutdown());
}
#[tokio::test]
async fn test_shutdown_already_triggered() {
// Create a new shutdown instance.
let mut shutdown = Shutdown::new();
// Trigger and receive.
shutdown.trigger();
shutdown.recv().await;
assert!(shutdown.is_shutdown());
// Call recv again, should return immediately.
let start = std::time::Instant::now();
shutdown.recv().await;
let elapsed = start.elapsed();
// Verify that recv returned immediately (less than 5ms).
assert!(elapsed < Duration::from_millis(5));
}
}

View File

@ -67,6 +67,7 @@ pub struct Stats {
/// Stats implements the stats server.
impl Stats {
/// new creates a new Stats.
#[instrument(skip_all)]
pub fn new(
addr: SocketAddr,
shutdown: shutdown::Shutdown,
@ -80,6 +81,7 @@ impl Stats {
}
/// run starts the stats server.
#[instrument(skip_all)]
pub async fn run(&self) {
// Clone the shutdown channel.
let mut shutdown = self.shutdown.clone();
@ -108,6 +110,7 @@ impl Stats {
_ = shutdown.recv() => {
// Stats server shutting down with signals.
info!("stats server shutting down");
return
}
}
}

View File

@ -14,19 +14,16 @@
* limitations under the License.
*/
use dragonfly_client_config::dfdaemon::Host;
use opentelemetry::{global, trace::TracerProvider, KeyValue};
use opentelemetry_otlp::{WithExportConfig, WithTonicConfig};
use opentelemetry_sdk::{propagation::TraceContextPropagator, Resource};
use opentelemetry::sdk::propagation::TraceContextPropagator;
use rolling_file::*;
use std::fs;
use std::fs::OpenOptions;
use std::os::unix::io::AsRawFd;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use tonic::metadata::{MetadataKey, MetadataMap, MetadataValue};
use tracing::{info, Level};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_opentelemetry::OpenTelemetryLayer;
use tracing_flame::FlameLayer;
use tracing_log::LogTracer;
use tracing_subscriber::{
filter::LevelFilter,
fmt::{time::ChronoLocal, Layer},
@ -34,9 +31,6 @@ use tracing_subscriber::{
EnvFilter, Registry,
};
/// SPAN_EXPORTER_TIMEOUT is the timeout for the span exporter.
const SPAN_EXPORTER_TIMEOUT: Duration = Duration::from_secs(10);
/// init_tracing initializes the tracing system.
#[allow(clippy::too_many_arguments)]
pub fn init_tracing(
@ -44,22 +38,18 @@ pub fn init_tracing(
log_dir: PathBuf,
log_level: Level,
log_max_files: usize,
otel_protocol: Option<String>,
otel_endpoint: Option<String>,
otel_path: Option<PathBuf>,
otel_headers: Option<reqwest::header::HeaderMap>,
host: Option<Host>,
is_seed_peer: bool,
console: bool,
jaeger_addr: Option<String>,
flamegraph: bool,
redirect_stderr: bool,
verbose: bool,
) -> Vec<WorkerGuard> {
let mut guards = vec![];
// Setup stdout layer.
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
guards.push(stdout_guard);
// Initialize stdout layer.
let stdout_filter = if console {
let stdout_filter = if verbose {
LevelFilter::DEBUG
} else {
LevelFilter::OFF
@ -74,6 +64,7 @@ pub fn init_tracing(
.with_timer(ChronoLocal::rfc_3339())
.pretty()
.with_filter(stdout_filter);
guards.push(stdout_guard);
// Setup file layer.
fs::create_dir_all(log_dir.clone()).expect("failed to create log directory");
@ -85,8 +76,6 @@ pub fn init_tracing(
.expect("failed to create rolling file appender");
let (rolling_writer, rolling_writer_guard) = tracing_appender::non_blocking(rolling_appender);
guards.push(rolling_writer_guard);
let file_logging_layer = Layer::new()
.with_writer(rolling_writer)
.with_ansi(false)
@ -97,126 +86,71 @@ pub fn init_tracing(
.with_thread_ids(false)
.with_timer(ChronoLocal::rfc_3339())
.compact();
guards.push(rolling_writer_guard);
// Setup env filter for log level.
let env_filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::default().add_directive(log_level.into()));
// Enable console subscriber layer for tracing spawn tasks on `127.0.0.1:6669` when log level is TRACE.
let console_subscriber_layer = if log_level == Level::TRACE {
Some(console_subscriber::spawn())
// Setup flame layer.
let flame_layer = if flamegraph {
let (flame_layer, _guard) = FlameLayer::with_file(log_dir.join("tracing.folded"))
.expect("failed to create flame layer");
Some(flame_layer)
} else {
None
};
let subscriber = Registry::default()
.with(env_filter)
.with(console_subscriber_layer)
.with(file_logging_layer)
.with(stdout_logging_layer);
.with(stdout_logging_layer)
.with(flame_layer);
// If OTLP protocol and endpoint are provided, set up OpenTelemetry tracing.
if let (Some(protocol), Some(endpoint)) = (otel_protocol, otel_endpoint) {
let otlp_exporter = match protocol.as_str() {
"grpc" => {
let mut metadata = MetadataMap::new();
if let Some(headers) = otel_headers {
for (key, value) in headers.iter() {
metadata.insert(
MetadataKey::from_str(key.as_str())
.expect("failed to create metadata key"),
MetadataValue::from_str(value.to_str().unwrap())
.expect("failed to create metadata value"),
);
}
}
// Setup jaeger layer.
if let Some(jaeger_addr) = jaeger_addr {
opentelemetry::global::set_text_map_propagator(TraceContextPropagator::new());
let tracer = opentelemetry_jaeger::new_agent_pipeline()
.with_service_name(name)
.with_endpoint(jaeger_addr)
.install_batch(opentelemetry::runtime::Tokio)
.expect("install");
let jaeger_layer = tracing_opentelemetry::layer().with_tracer(tracer);
let subscriber = subscriber.with(jaeger_layer);
let endpoint_url = url::Url::parse(&format!("http://{}", endpoint))
.expect("failed to parse OTLP endpoint URL");
opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_endpoint(endpoint_url)
.with_timeout(SPAN_EXPORTER_TIMEOUT)
.with_metadata(metadata)
.build()
.expect("failed to create OTLP exporter")
}
"http" | "https" => {
let mut endpoint_url = url::Url::parse(&format!("{}://{}", protocol, endpoint))
.expect("failed to parse OTLP endpoint URL");
if let Some(path) = otel_path {
endpoint_url = endpoint_url
.join(path.to_str().unwrap())
.expect("failed to join OTLP endpoint path");
}
opentelemetry_otlp::SpanExporter::builder()
.with_http()
.with_endpoint(endpoint_url.as_str())
.with_protocol(opentelemetry_otlp::Protocol::HttpJson)
.with_timeout(SPAN_EXPORTER_TIMEOUT)
.build()
.expect("failed to create OTLP exporter")
}
_ => {
panic!("unsupported OTLP protocol: {}", protocol);
}
};
let host = host.unwrap();
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(otlp_exporter)
.with_resource(
Resource::builder()
.with_service_name(format!("{}-{}", name, host.ip.unwrap()))
.with_schema_url(
[
KeyValue::new(
opentelemetry_semantic_conventions::attribute::SERVICE_NAMESPACE,
"dragonfly",
),
KeyValue::new(
opentelemetry_semantic_conventions::attribute::HOST_NAME,
host.hostname,
),
KeyValue::new(
opentelemetry_semantic_conventions::attribute::HOST_IP,
host.ip.unwrap().to_string(),
),
],
opentelemetry_semantic_conventions::SCHEMA_URL,
)
.with_attribute(opentelemetry::KeyValue::new(
"host.idc",
host.idc.unwrap_or_default(),
))
.with_attribute(opentelemetry::KeyValue::new(
"host.location",
host.location.unwrap_or_default(),
))
.with_attribute(opentelemetry::KeyValue::new("host.seed_peer", is_seed_peer))
.build(),
)
.build();
let tracer = provider.tracer(name.to_string());
global::set_tracer_provider(provider.clone());
global::set_text_map_propagator(TraceContextPropagator::new());
let jaeger_layer = OpenTelemetryLayer::new(tracer);
subscriber.with(jaeger_layer).init();
tracing::subscriber::set_global_default(subscriber)
.expect("failed to set global subscriber");
} else {
subscriber.init();
tracing::subscriber::set_global_default(subscriber)
.expect("failed to set global subscriber");
}
std::panic::set_hook(Box::new(tracing_panic::panic_hook));
LogTracer::init().expect("failed to init LogTracer");
info!(
"tracing initialized directory: {}, level: {}",
log_dir.as_path().display(),
log_level
);
// Redirect stderr to file.
if redirect_stderr {
redirect_stderr_to_file(log_dir);
}
guards
}
/// redirect_stderr_to_file redirects stderr to a file.
fn redirect_stderr_to_file(log_dir: PathBuf) {
let log_path = log_dir.join("stderr.log");
let file = OpenOptions::new()
.create(true)
.append(true)
.open(log_path)
.unwrap();
unsafe {
libc::dup2(file.as_raw_fd(), libc::STDERR_FILENO);
}
}

View File

@ -1,2 +1,2 @@
[toolchain]
channel = "1.85.0"
channel = "1.80.0"